python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for deserializing `Function`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.core.framework import function_pb2
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as function_lib
from tensorflow.python.framework import func_graph as func_graph_lib
from tensorflow.python.framework import function_def_to_graph as function_def_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _is_tensor(t):
return isinstance(t, (ops.Tensor, resource_variable_ops.BaseResourceVariable))
def _call_concrete_function(function, inputs):
"""Calls a restored Function with structured inputs.
This differs from `function.__call__` in that inputs and outputs are
structured and that it casts inputs to tensors if needed.
Note: this does not checks that non-tensor inputs match. That should be
done before via `_concrete_function_callable_with`.
Args:
function: ConcreteFunction to call.
inputs: Structured inputs compatible with
`function.graph.structured_input_signature`.
Returns:
The structured function output.
"""
expected_structure = function.graph.structured_input_signature
flatten_inputs = nest.flatten_up_to(expected_structure, inputs)
tensor_inputs = []
for arg, expected in zip(flatten_inputs, nest.flatten(expected_structure)):
if isinstance(expected, tensor_spec.TensorSpec):
tensor_inputs.append(
ops.convert_to_tensor(arg, dtype_hint=expected.dtype))
result = function._call_flat(tensor_inputs, function._captured_inputs) # pylint: disable=protected-access
if isinstance(result, ops.Operation):
return None
return result
def _try_convert_to_tensor_spec(arg, dtype_hint):
"""Returns None or TensorSpec obtained if `arg` is converted to tensor."""
try:
# Note: try conversion in a FuncGraph to avoid poluting current context.
with func_graph_lib.FuncGraph(name="guess_conversion").as_default():
result = ops.convert_to_tensor(arg, dtype_hint=dtype_hint)
return tensor_spec.TensorSpec(shape=result.shape, dtype=result.dtype)
except (TypeError, ValueError):
return None
def _concrete_function_callable_with(function, inputs, allow_conversion):
"""Returns whether concrete `function` can be called with `inputs`."""
expected_structure = function.graph.structured_input_signature
try:
flatten_inputs = nest.flatten_up_to(expected_structure, inputs)
except (TypeError, ValueError):
return False
try:
# Verify that no input elements were dropped during flattening.
repacked = nest.pack_sequence_as(expected_structure, flatten_inputs)
# TODO(b/129422719): Namedtuple subclasses re-created through
# saved_model.load don't compare equal in type to the original in
# assert_same_structure. Fix that and we can take out check_types=False
# here.
nest.assert_same_structure(inputs, repacked, check_types=False)
except (TypeError, ValueError):
return False
for arg, expected in zip(flatten_inputs, nest.flatten(expected_structure)):
if isinstance(expected, tensor_spec.TensorSpec):
if allow_conversion:
arg = _try_convert_to_tensor_spec(arg, dtype_hint=expected.dtype)
if not _is_tensor(arg) and not isinstance(arg, tensor_spec.TensorSpec):
return False
if arg.dtype != expected.dtype:
return False
if not expected.shape.is_compatible_with(arg.shape):
return False
else:
if arg != expected:
return False
return True
def _deserialize_function_spec_as_nonmethod(function_spec_proto, coder):
"""Deserialize a FunctionSpec object from its proto representation."""
typeless_fullargspec = coder.decode_proto(function_spec_proto.fullargspec)
# Convert a method function into a non method.
if function_spec_proto.is_method:
if not typeless_fullargspec.args:
raise NotImplementedError(
"Missing support to deserialize a method function without a named "
"'self' argument.")
args = typeless_fullargspec.args[1:]
else:
args = typeless_fullargspec.args
fullargspec = tf_inspect.FullArgSpec(
args=args,
varargs=typeless_fullargspec.varargs,
varkw=typeless_fullargspec.varkw,
defaults=typeless_fullargspec.defaults,
kwonlyargs=typeless_fullargspec.kwonlyargs,
kwonlydefaults=typeless_fullargspec.kwonlydefaults,
annotations=typeless_fullargspec.annotations)
input_signature = coder.decode_proto(function_spec_proto.input_signature)
return function_lib.FunctionSpec(fullargspec=fullargspec,
is_method=False,
args_to_prepend=[],
kwargs_to_include={},
input_signature=input_signature)
# TODO(allenl): The fact that we can't derive ConcreteFunction calling
# conventions from the serialized input spec right now is unfortunate. Merging
# these would be good, maybe by adding TensorSpec names to cache keys so renamed
# keyword arguments would yield different ConcreteFunctions.
def setup_bare_concrete_function(saved_bare_concrete_function,
concrete_functions):
"""Makes a restored bare concrete function callable."""
# Bare concrete functions accept only flat lists of Tensors with unique
# names.
concrete_function = concrete_functions[
saved_bare_concrete_function.concrete_function_name]
# pylint: disable=protected-access
concrete_function._arg_keywords = (
saved_bare_concrete_function.argument_keywords)
concrete_function._num_positional_args = (
saved_bare_concrete_function.allowed_positional_arguments)
# pylint: enable=protected-access
concrete_function.add_to_graph()
return concrete_function
class RestoredFunction(def_function.Function):
"""Wrapper class for a function that has been restored from saved state.
See `def_function.Function`.
"""
def __init__(self, python_function, name, function_spec, concrete_functions):
# TODO(mdan): We may enable autograph once exceptions are supported.
super(RestoredFunction, self).__init__(
python_function, name, autograph=False)
self.concrete_functions = concrete_functions
self._function_spec = function_spec
def _list_all_concrete_functions_for_serialization(self):
return self.concrete_functions
def _defun_with_scope(self, scope):
func = super(RestoredFunction, self)._defun_with_scope(scope)
func._function_spec = self._function_spec # pylint: disable=protected-access
return func
def recreate_function(saved_function, concrete_functions):
"""Creates a `Function` from a `SavedFunction`.
Args:
saved_function: `SavedFunction` proto.
concrete_functions: map from function name to `ConcreteFunction`.
Returns:
A `Function`.
"""
# TODO(andresp): Construct a `Function` with the cache populated
# instead of creating a new `Function` backed by a Python layer to
# glue things together. Current approach is nesting functions deeper for each
# serialization cycle.
coder = nested_structure_coder.StructureCoder()
# Note: handling method functions is tricky since make_decorator does not
# allows control of "ismethod". Additionally since restored functions do
# not behave as methods i.e. they always use the same captured tensors
# independent of the object they are bound to, there is little value on
# propagating that correctly.
#
# Ideally this conversion should happen at serialization time. But since
# there are SavedModels which have "ismethod" populated and have an extra
# argument that they expect to be ignored, we do it at deserialization.
function_spec = _deserialize_function_spec_as_nonmethod(
saved_function.function_spec,
coder)
def restored_function_body(*args, **kwargs):
"""Calls a restored function."""
# This is the format of function.graph.structured_input_signature. At this
# point, the args and kwargs have already been canonicalized.
inputs = (args, kwargs)
# First try to find a concrete function that can be called without input
# conversions. This allows one to pick a more specific trace in case there
# was also a more expensive one that supported tensors.
for allow_conversion in [False, True]:
for function_name in saved_function.concrete_functions:
function = concrete_functions[function_name]
if _concrete_function_callable_with(function, inputs, allow_conversion):
return _call_concrete_function(function, inputs)
signature_descriptions = []
def _pretty_format_positional(positional):
return "Positional arguments ({} total):\n * {}".format(
len(positional),
"\n * ".join([str(a) for a in positional]))
for index, function_name in enumerate(saved_function.concrete_functions):
concrete_function = concrete_functions[function_name]
positional, keyword = concrete_function.structured_input_signature
signature_descriptions.append(
"Option {}:\n {}\n Keyword arguments: {}"
.format(index + 1, _pretty_format_positional(positional), keyword))
raise ValueError(
"Could not find matching function to call loaded from the SavedModel. "
"Got:\n {}\n Keyword arguments: {}\n\nExpected "
"these arguments to match one of the following {} option(s):\n\n{}"
.format(_pretty_format_positional(args), kwargs,
len(saved_function.concrete_functions),
"\n\n".join(signature_descriptions)))
concrete_function_objects = []
for concrete_function_name in saved_function.concrete_functions:
concrete_function_objects.append(concrete_functions[concrete_function_name])
restored_function = RestoredFunction(
restored_function_body,
restored_function_body.__name__,
function_spec,
concrete_function_objects)
return tf_decorator.make_decorator(
restored_function_body,
restored_function,
decorator_argspec=function_spec.fullargspec)
def load_function_def_library(library):
"""Load a set of functions as concrete functions without captured inputs.
Functions names are manipulated during load such that they do not overlap
with previously created ones.
Args:
library: FunctionDefLibrary proto message.
Returns:
Map of original function names in the library to instances of
`ConcreteFunction` without captured inputs.
Raises:
ValueError: if functions dependencies have a cycle.
"""
functions = {}
load_shared_name_suffix = "_load_{}".format(ops.uid())
for fdef in _sort_function_defs(library):
copy = _fix_fdef(fdef, functions, load_shared_name_suffix)
# There is no need to copy functions into the function def graph.
# It leads to a O(n^2) increase of memory when importing functions
# and the extra function definitions are a no-op since they already
# imported as a function before (due to the topologic sort import).
func_graph = function_def_lib.function_def_to_graph(
copy, copy_functions=False)
for dep in _list_function_deps(fdef):
functions[dep].add_to_graph(func_graph)
func = function_lib.ConcreteFunction(func_graph)
func.add_to_graph()
functions[fdef.signature.name] = func
# Also register the gradients in the current root context.
with ops.init_scope():
func._register_gradient() # pylint: disable=protected-access
return functions
def _sort_function_defs(library):
"""Return a topologic sort of FunctionDefs in a library."""
edges = collections.defaultdict(list)
in_count = collections.defaultdict(lambda: 0)
for fdef in library.function:
for dep in _list_function_deps(fdef):
edges[dep].append(fdef.signature.name)
in_count[fdef.signature.name] += 1
ready = [
fdef.signature.name
for fdef in library.function
if in_count[fdef.signature.name] == 0
]
output = []
while ready:
node = ready.pop()
output.append(node)
for dest in edges[node]:
in_count[dest] -= 1
if not in_count[dest]:
ready.append(dest)
if len(output) != len(library.function):
failed_to_resolve = sorted(set(in_count.keys()) - set(output))
raise ValueError("There is a cyclic-dependency between functions. ",
"Could not resolve %r." % (failed_to_resolve,))
reverse = {fdef.signature.name: fdef for fdef in library.function}
return [reverse[x] for x in output]
def _fix_fdef(orig_fdef, functions, shared_name_suffix):
"""Fixes a FunctionDef proto to be loaded in current context.
In particular, when loading a function library into an eager context, one
must rename the functions to avoid conflicts with existent functions.
Args:
orig_fdef: FunctionDef proto to fix. It is not modified.
functions: map from function name to a ConcreteFunction instance.
shared_name_suffix: A unique string for this load which helps to avoid
`shared_name` collisions across loads. Two functions from the same load
using the same `shared_name` still need to share, but functions from
different loads with the same `shared_name` should not.
Returns:
A fixed copy of the original FunctionDef.
"""
fdef = function_pb2.FunctionDef()
fdef.CopyFrom(orig_fdef)
for node_def in fdef.node_def:
if "_gradient_op_type" in node_def.attr:
if node_def.op in ["StatefulPartitionedCall", "PartitionedCall"]:
# TODO(andresp): This code assumes that the gradient registered for this
# function call is the default gradient for the function and not a
# custom one.
fname = node_def.attr["f"].func.name
node_def.attr["_gradient_op_type"].s = compat.as_bytes(
functions[fname]._gradient_name) # pylint: disable=protected-access
else:
logging.warning("Importing a function (%s) with ops with custom "
"gradients. Will likely fail if a gradient is "
"requested.", fdef.signature.name)
for _, attr_value in node_def.attr.items():
if attr_value.func.name:
attr_value.func.name = functions[attr_value.func.name].name
# TODO(b/124205571): Avoid accidental sharing and destruction of restored
# resources. For now uniquify "shared_name" when loading functions to avoid
# sharing.
if "shared_name" in node_def.attr:
node_def.attr["shared_name"].s += compat.as_bytes(shared_name_suffix)
fdef.signature.name = _clean_function_name(fdef.signature.name)
return fdef
def _list_function_deps(fdef):
# TODO(andresp): Recurse into list attributes and into NameAttrList attrs both
# when listing deps and when fixing them. `function_def_to_graph` also
# requires fixes.
deps = set()
for node_def in fdef.node_def:
for _, attr_value in node_def.attr.items():
if attr_value.WhichOneof("value") == "func":
deps.add(attr_value.func.name)
return deps
def _clean_function_name(name):
"""Vanity function to keep the function names comprehensible."""
# Note: each time a function is wrapped into `function_lib.ConcreteFunction`
# its name becomes "__inference_<orig>_xyz".
match = re.search(r"^__inference_(.*)_\d+$", name)
if match:
return match.group(1)
else:
return name
|
tensorflow-master
|
tensorflow/python/saved_model/function_deserialization.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModel simple save functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import simple_save
from tensorflow.python.saved_model import tag_constants
class SimpleSaveTest(test.TestCase):
def _init_and_validate_variable(self, sess, variable_name, variable_value):
v = variables.Variable(variable_value, name=variable_name)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(variable_value, self.evaluate(v))
return v
def _check_variable_info(self, actual_variable, expected_variable):
self.assertEqual(actual_variable.name, expected_variable.name)
self.assertEqual(actual_variable.dtype, expected_variable.dtype)
self.assertEqual(len(actual_variable.shape), len(expected_variable.shape))
for i in range(len(actual_variable.shape)):
self.assertEqual(actual_variable.shape[i], expected_variable.shape[i])
def _check_tensor_info(self, actual_tensor_info, expected_tensor):
self.assertEqual(actual_tensor_info.name, expected_tensor.name)
self.assertEqual(actual_tensor_info.dtype, expected_tensor.dtype)
self.assertEqual(
len(actual_tensor_info.tensor_shape.dim), len(expected_tensor.shape))
for i in range(len(actual_tensor_info.tensor_shape.dim)):
self.assertEqual(actual_tensor_info.tensor_shape.dim[i].size,
expected_tensor.shape[i])
@test_util.run_deprecated_v1
def testSimpleSave(self):
"""Test simple_save that uses the default parameters."""
export_dir = os.path.join(test.get_temp_dir(),
"test_simple_save")
# Initialize input and output variables and save a prediction graph using
# the default parameters.
with self.session(graph=ops.Graph()) as sess:
var_x = self._init_and_validate_variable(sess, "var_x", 1)
var_y = self._init_and_validate_variable(sess, "var_y", 2)
inputs = {"x": var_x}
outputs = {"y": var_y}
simple_save.simple_save(sess, export_dir, inputs, outputs)
# Restore the graph with a valid tag and check the global variables and
# signature def map.
with self.session(graph=ops.Graph()) as sess:
graph = loader.load(sess, [tag_constants.SERVING], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
# Check value and metadata of the saved variables.
self.assertEqual(len(collection_vars), 2)
self.assertEqual(1, collection_vars[0].eval())
self.assertEqual(2, collection_vars[1].eval())
self._check_variable_info(collection_vars[0], var_x)
self._check_variable_info(collection_vars[1], var_y)
# Check that the appropriate signature_def_map is created with the
# default key and method name, and the specified inputs and outputs.
signature_def_map = graph.signature_def
self.assertEqual(1, len(signature_def_map))
self.assertEqual(signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
list(signature_def_map.keys())[0])
signature_def = signature_def_map[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
self.assertEqual(signature_constants.PREDICT_METHOD_NAME,
signature_def.method_name)
self.assertEqual(1, len(signature_def.inputs))
self._check_tensor_info(signature_def.inputs["x"], var_x)
self.assertEqual(1, len(signature_def.outputs))
self._check_tensor_info(signature_def.outputs["y"], var_y)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/saved_model/simple_save_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import a TF v1-style SavedModel when executing eagerly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.eager import context
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import signature_serialization
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training.tracking import tracking
class _Initializer(tracking.CapturableResource):
"""Represents an initialization operation restored from a SavedModel.
Without this object re-export of imported 1.x SavedModels would omit the
original SavedModel's initialization procedure.
Created when `tf.saved_model.load` loads a TF 1.x-style SavedModel with an
initialization op. This object holds a function which runs the
initialization. It does not require any manual user intervention;
`tf.saved_model.save` will see this object and automatically add it to the
exported SavedModel, and `tf.saved_model.load` runs the initialization
function automatically.
"""
def __init__(self, init_fn, asset_paths):
super(_Initializer, self).__init__()
self._asset_paths = asset_paths
self._init_fn = init_fn
def _create_resource(self):
return array_ops.placeholder(
dtype=dtypes.resource, shape=[], name="unused_resource")
def _initialize(self):
return self._init_fn(*[path.asset_path for path in self._asset_paths])
class _EagerSavedModelLoader(loader_impl.SavedModelLoader):
"""Loads a SavedModel without using Sessions."""
def get_meta_graph_def_from_tags(self, tags):
"""Override to support implicit one-MetaGraph loading with tags=None."""
if tags is None:
if len(self._saved_model.meta_graphs) != 1:
tag_sets = [mg.meta_info_def.tags
for mg in self._saved_model.meta_graphs]
raise ValueError(
("Importing a SavedModel with tf.saved_model.load requires a "
"'tags=' argument if there is more than one MetaGraph. Got "
"'tags=None', but there are {} MetaGraphs in the SavedModel with "
"tag sets {}. Pass a 'tags=' argument to load this SavedModel.")
.format(len(self._saved_model.meta_graphs), tag_sets))
return self._saved_model.meta_graphs[0]
return super(_EagerSavedModelLoader, self).get_meta_graph_def_from_tags(
tags)
def load_graph(self, returns, meta_graph_def):
"""Called from wrap_function to import `meta_graph_def`."""
# pylint: disable=protected-access
saver, _ = tf_saver._import_meta_graph_with_return_elements(
meta_graph_def)
# pylint: enable=protected-access
returns[0] = saver
def restore_variables(self, wrapped, saver):
"""Restores variables from the checkpoint."""
if saver is not None:
saver_def = saver.saver_def
filename_tensor = wrapped.graph.as_graph_element(
saver_def.filename_tensor_name)
# We both feed and fetch filename_tensor so we have an operation to use to
# feed into variable initializers (only relevant for v1 graph building).
restore_fn = wrapped.prune(
feeds=[filename_tensor],
fetches=[filename_tensor,
wrapped.graph.as_graph_element(saver_def.restore_op_name)])
initializer, _ = restore_fn(constant_op.constant(self._variables_path))
if not ops.executing_eagerly_outside_functions():
for variable in wrapped.graph.get_collection_ref(
ops.GraphKeys.GLOBAL_VARIABLES):
# pylint: disable=protected-access
variable._initializer_op = initializer
# pylint: enable=protected-access
def _extract_signatures(self, wrapped, meta_graph_def):
"""Creates ConcreteFunctions for signatures in `meta_graph_def`."""
signature_functions = {}
for signature_key, signature_def in meta_graph_def.signature_def.items():
if signature_def.inputs:
input_names, input_specs = zip(*signature_def.inputs.items())
else:
input_names = []
input_specs = []
# TODO(allenl): Support optional arguments
feeds = [wrapped.graph.as_graph_element(inp.name) for inp in input_specs]
fetches = {name: out for name, out in signature_def.outputs.items()}
try:
signature_fn = wrapped.prune(feeds=feeds, fetches=fetches)
except lift_to_graph.UnliftableError as ex:
# Mutate the exception to add a bit more detail.
args = ex.args
if not args:
message = ""
else:
message = args[0]
message = (
("A SavedModel signature needs an input for each placeholder the "
"signature's outputs use. An output for signature '{}' depends on "
"a placeholder which is not an input (i.e. the placeholder is not "
"fed a value).\n\n").format(signature_key)
+ message)
ex.args = (message,) + args[1:]
raise
# pylint: disable=protected-access
signature_fn._arg_keywords = input_names
if len(input_names) == 1:
# Allowing positional arguments does not create any ambiguity if there's
# only one.
signature_fn._num_positional_args = 1
else:
signature_fn._num_positional_args = 0
# pylint: enable=protected-access
signature_functions[signature_key] = signature_fn
return signature_functions
def load(self, tags):
"""Creates an object from the MetaGraph identified by `tags`."""
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
load_graph_returns = [None]
wrapped = wrap_function.wrap_function(
functools.partial(self.load_graph, load_graph_returns, meta_graph_def),
signature=[])
saver, = load_graph_returns
self.restore_variables(wrapped, saver)
with wrapped.graph.as_default():
init_op = loader_impl.get_init_op(
meta_graph_def) or monitored_session.Scaffold.default_local_init_op()
# Add a dummy Tensor we know we can fetch to add control dependencies to.
init_anchor = constant_op.constant(0., name="dummy_fetch")
root = tracking.AutoTrackable()
asset_feed_tensors = []
asset_paths = []
for tensor_name, value in loader_impl.get_asset_tensors(
self._export_dir, meta_graph_def).items():
asset_feed_tensors.append(wrapped.graph.as_graph_element(tensor_name))
asset_paths.append(tracking.TrackableAsset(value))
init_fn = wrapped.prune(
feeds=asset_feed_tensors,
fetches=[init_anchor, wrapped.graph.as_graph_element(init_op)])
initializer = _Initializer(init_fn, asset_paths)
# pylint: disable=protected-access
local_init_op, _ = initializer._initialize()
# pylint: enable=protected-access
with ops.init_scope():
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, local_init_op)
for variable in wrapped.graph.get_collection_ref(
ops.GraphKeys.LOCAL_VARIABLES):
# pylint: disable=protected-access
variable._initializer_op = local_init_op
# pylint: enable=protected-access
root.initializer = initializer
root.asset_paths = asset_paths
signature_functions = self._extract_signatures(wrapped, meta_graph_def)
root.signatures = signature_serialization.create_signature_map(
signature_functions)
root.variables = list(wrapped.graph.variables)
root.tensorflow_version = (
meta_graph_def.meta_info_def.tensorflow_version)
root.tensorflow_git_version = (
meta_graph_def.meta_info_def.tensorflow_git_version)
root.graph = wrapped.graph
root.prune = wrapped.prune
return root
def load(export_dir, tags):
"""Load a v1-style SavedModel as an object."""
loader = _EagerSavedModelLoader(export_dir)
return loader.load(tags=tags)
|
tensorflow-master
|
tensorflow/python/saved_model/load_v1_in_v2.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for managing different mode strings used by Keras and Estimator models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
class KerasModeKeys(object):
"""Standard names for model modes.
The following standard keys are defined:
* `TRAIN`: training/fitting mode.
* `TEST`: testing/evaluation mode.
* `PREDICT`: prediction/inference mode.
"""
TRAIN = 'train'
TEST = 'test'
PREDICT = 'predict'
# TODO(kathywu): Remove copy in Estimator after nightlies
class EstimatorModeKeys(object):
"""Standard names for Estimator model modes.
The following standard keys are defined:
* `TRAIN`: training/fitting mode.
* `EVAL`: testing/evaluation mode.
* `PREDICT`: predication/inference mode.
"""
TRAIN = 'train'
EVAL = 'eval'
PREDICT = 'infer'
def is_predict(mode):
return mode in [KerasModeKeys.PREDICT, EstimatorModeKeys.PREDICT]
def is_eval(mode):
return mode in [KerasModeKeys.TEST, EstimatorModeKeys.EVAL]
def is_train(mode):
return mode in [KerasModeKeys.TRAIN, EstimatorModeKeys.TRAIN]
class ModeKeyMap(collections.Mapping):
"""Map using ModeKeys as keys.
This class creates an immutable mapping from modes to values. For example,
SavedModel export of Keras and Estimator models use this to map modes to their
corresponding MetaGraph tags/SignatureDef keys.
Since this class uses modes, rather than strings, as keys, both "predict"
(Keras's PREDICT ModeKey) and "infer" (Estimator's PREDICT ModeKey) map to the
same value.
"""
def __init__(self, **kwargs):
self._internal_dict = {}
self._keys = []
for key in kwargs:
self._keys.append(key)
dict_key = self._get_internal_key(key)
if dict_key in self._internal_dict:
raise ValueError(
'Error creating ModeKeyMap. Multiple keys/values found for {} mode.'
.format(dict_key))
self._internal_dict[dict_key] = kwargs[key]
def _get_internal_key(self, key):
"""Return keys used for the internal dictionary."""
if is_train(key):
return KerasModeKeys.TRAIN
if is_eval(key):
return KerasModeKeys.TEST
if is_predict(key):
return KerasModeKeys.PREDICT
raise ValueError('Invalid mode key: {}.'.format(key))
def __getitem__(self, key):
return self._internal_dict[self._get_internal_key(key)]
def __iter__(self):
return iter(self._keys)
def __len__(self):
return len(self._keys)
|
tensorflow-master
|
tensorflow/python/saved_model/model_utils/mode_keys.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for creating SavedModels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import time
import six
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model.model_utils import export_output as export_output_lib
from tensorflow.python.saved_model.model_utils import mode_keys
from tensorflow.python.saved_model.model_utils.mode_keys import KerasModeKeys as ModeKeys
from tensorflow.python.util import compat
# Mapping of the modes to appropriate MetaGraph tags in the SavedModel.
EXPORT_TAG_MAP = mode_keys.ModeKeyMap(**{
ModeKeys.PREDICT: [tag_constants.SERVING],
ModeKeys.TRAIN: [tag_constants.TRAINING],
ModeKeys.TEST: [tag_constants.EVAL]})
# For every exported mode, a SignatureDef map should be created using the
# functions `export_outputs_for_mode` and `build_all_signature_defs`. By
# default, this map will contain a single Signature that defines the input
# tensors and output predictions, losses, and/or metrics (depending on the mode)
# The default keys used in the SignatureDef map are defined below.
SIGNATURE_KEY_MAP = mode_keys.ModeKeyMap(**{
ModeKeys.PREDICT: signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
ModeKeys.TRAIN: signature_constants.DEFAULT_TRAIN_SIGNATURE_DEF_KEY,
ModeKeys.TEST: signature_constants.DEFAULT_EVAL_SIGNATURE_DEF_KEY})
# Default names used in the SignatureDef input map, which maps strings to
# TensorInfo protos.
SINGLE_FEATURE_DEFAULT_NAME = 'feature'
SINGLE_RECEIVER_DEFAULT_NAME = 'input'
SINGLE_LABEL_DEFAULT_NAME = 'label'
### Below utilities are specific to SavedModel exports.
def build_all_signature_defs(receiver_tensors,
export_outputs,
receiver_tensors_alternatives=None,
serving_only=True):
"""Build `SignatureDef`s for all export outputs.
Args:
receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying
input nodes where this receiver expects to be fed by default. Typically,
this is a single placeholder expecting serialized `tf.Example` protos.
export_outputs: a dict of ExportOutput instances, each of which has
an as_signature_def instance method that will be called to retrieve
the signature_def for all export output tensors.
receiver_tensors_alternatives: a dict of string to additional
groups of receiver tensors, each of which may be a `Tensor` or a dict of
string to `Tensor`. These named receiver tensor alternatives generate
additional serving signatures, which may be used to feed inputs at
different points within the input receiver subgraph. A typical usage is
to allow feeding raw feature `Tensor`s *downstream* of the
tf.io.parse_example() op. Defaults to None.
serving_only: boolean; if true, resulting signature defs will only include
valid serving signatures. If false, all requested signatures will be
returned.
Returns:
signature_def representing all passed args.
Raises:
ValueError: if export_outputs is not a dict
"""
if not isinstance(receiver_tensors, dict):
receiver_tensors = {SINGLE_RECEIVER_DEFAULT_NAME: receiver_tensors}
if export_outputs is None or not isinstance(export_outputs, dict):
raise ValueError('export_outputs must be a dict and not'
'{}'.format(type(export_outputs)))
signature_def_map = {}
excluded_signatures = {}
for output_key, export_output in export_outputs.items():
signature_name = '{}'.format(output_key or 'None')
try:
signature = export_output.as_signature_def(receiver_tensors)
signature_def_map[signature_name] = signature
except ValueError as e:
excluded_signatures[signature_name] = str(e)
if receiver_tensors_alternatives:
for receiver_name, receiver_tensors_alt in (
six.iteritems(receiver_tensors_alternatives)):
if not isinstance(receiver_tensors_alt, dict):
receiver_tensors_alt = {
SINGLE_RECEIVER_DEFAULT_NAME: receiver_tensors_alt
}
for output_key, export_output in export_outputs.items():
signature_name = '{}:{}'.format(receiver_name or 'None', output_key or
'None')
try:
signature = export_output.as_signature_def(receiver_tensors_alt)
signature_def_map[signature_name] = signature
except ValueError as e:
excluded_signatures[signature_name] = str(e)
_log_signature_report(signature_def_map, excluded_signatures)
# The above calls to export_output_lib.as_signature_def should return only
# valid signatures; if there is a validity problem, they raise a ValueError,
# in which case we exclude that signature from signature_def_map above.
# The is_valid_signature check ensures that the signatures produced are
# valid for serving, and acts as an additional sanity check for export
# signatures produced for serving. We skip this check for training and eval
# signatures, which are not intended for serving.
if serving_only:
signature_def_map = {
k: v
for k, v in signature_def_map.items()
if signature_def_utils.is_valid_signature(v)
}
return signature_def_map
_FRIENDLY_METHOD_NAMES = {
signature_constants.CLASSIFY_METHOD_NAME: 'Classify',
signature_constants.REGRESS_METHOD_NAME: 'Regress',
signature_constants.PREDICT_METHOD_NAME: 'Predict',
signature_constants.SUPERVISED_TRAIN_METHOD_NAME: 'Train',
signature_constants.SUPERVISED_EVAL_METHOD_NAME: 'Eval',
}
def _log_signature_report(signature_def_map, excluded_signatures):
"""Log a report of which signatures were produced."""
sig_names_by_method_name = collections.defaultdict(list)
# We'll collect whatever method_names are present, but also we want to make
# sure to output a line for each of the three standard methods even if they
# have no signatures.
for method_name in _FRIENDLY_METHOD_NAMES:
sig_names_by_method_name[method_name] = []
for signature_name, sig in signature_def_map.items():
sig_names_by_method_name[sig.method_name].append(signature_name)
# TODO(b/67733540): consider printing the full signatures, not just names
for method_name, sig_names in sig_names_by_method_name.items():
if method_name in _FRIENDLY_METHOD_NAMES:
method_name = _FRIENDLY_METHOD_NAMES[method_name]
logging.info('Signatures INCLUDED in export for {}: {}'.format(
method_name, sig_names if sig_names else 'None'))
if excluded_signatures:
logging.info('Signatures EXCLUDED from export because they cannot be '
'be served via TensorFlow Serving APIs:')
for signature_name, message in excluded_signatures.items():
logging.info('\'{}\' : {}'.format(signature_name, message))
if not signature_def_map:
logging.warn('Export includes no signatures!')
elif (signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in
signature_def_map):
logging.warn('Export includes no default signature!')
# When we create a timestamped directory, there is a small chance that the
# directory already exists because another process is also creating these
# directories. In this case we just wait one second to get a new timestamp and
# try again. If this fails several times in a row, then something is seriously
# wrong.
MAX_DIRECTORY_CREATION_ATTEMPTS = 10
def get_timestamped_export_dir(export_dir_base):
"""Builds a path to a new subdirectory within the base directory.
Each export is written into a new subdirectory named using the
current time. This guarantees monotonically increasing version
numbers even across multiple runs of the pipeline.
The timestamp used is the number of seconds since epoch UTC.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
Returns:
The full path of the new subdirectory (which is not actually created yet).
Raises:
RuntimeError: if repeated attempts fail to obtain a unique timestamped
directory name.
"""
attempts = 0
while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS:
timestamp = int(time.time())
result_dir = os.path.join(
compat.as_bytes(export_dir_base), compat.as_bytes(str(timestamp)))
if not gfile.Exists(result_dir):
# Collisions are still possible (though extremely unlikely): this
# directory is not actually created yet, but it will be almost
# instantly on return from this function.
return result_dir
time.sleep(1)
attempts += 1
logging.warn('Directory {} already exists; retrying (attempt {}/{})'.format(
result_dir, attempts, MAX_DIRECTORY_CREATION_ATTEMPTS))
raise RuntimeError('Failed to obtain a unique export directory name after '
'{} attempts.'.format(MAX_DIRECTORY_CREATION_ATTEMPTS))
def get_temp_export_dir(timestamped_export_dir):
"""Builds a directory name based on the argument but starting with 'temp-'.
This relies on the fact that TensorFlow Serving ignores subdirectories of
the base directory that can't be parsed as integers.
Args:
timestamped_export_dir: the name of the eventual export directory, e.g.
/foo/bar/<timestamp>
Returns:
A sister directory prefixed with 'temp-', e.g. /foo/bar/temp-<timestamp>.
"""
(dirname, basename) = os.path.split(timestamped_export_dir)
temp_export_dir = os.path.join(
compat.as_bytes(dirname), compat.as_bytes('temp-{}'.format(basename)))
return temp_export_dir
def export_outputs_for_mode(
mode, serving_export_outputs=None, predictions=None, loss=None,
metrics=None):
"""Util function for constructing a `ExportOutput` dict given a mode.
The returned dict can be directly passed to `build_all_signature_defs` helper
function as the `export_outputs` argument, used for generating a SignatureDef
map.
Args:
mode: A `ModeKeys` specifying the mode.
serving_export_outputs: Describes the output signatures to be exported to
`SavedModel` and used during serving. Should be a dict or None.
predictions: A dict of Tensors or single Tensor representing model
predictions. This argument is only used if serving_export_outputs is not
set.
loss: A dict of Tensors or single Tensor representing calculated loss.
metrics: A dict of (metric_value, update_op) tuples, or a single tuple.
metric_value must be a Tensor, and update_op must be a Tensor or Op
Returns:
Dictionary mapping the a key to an `tf.estimator.export.ExportOutput` object
The key is the expected SignatureDef key for the mode.
Raises:
ValueError: if an appropriate ExportOutput cannot be found for the mode.
"""
if mode not in SIGNATURE_KEY_MAP:
raise ValueError(
'Export output type not found for mode: {}. Expected one of: {}.\n'
'One likely error is that V1 Estimator Modekeys were somehow passed to '
'this function. Please ensure that you are using the new ModeKeys.'
.format(mode, SIGNATURE_KEY_MAP.keys()))
signature_key = SIGNATURE_KEY_MAP[mode]
if mode_keys.is_predict(mode):
return get_export_outputs(serving_export_outputs, predictions)
elif mode_keys.is_train(mode):
return {signature_key: export_output_lib.TrainOutput(
loss=loss, predictions=predictions, metrics=metrics)}
else:
return {signature_key: export_output_lib.EvalOutput(
loss=loss, predictions=predictions, metrics=metrics)}
def get_export_outputs(export_outputs, predictions):
"""Validate export_outputs or create default export_outputs.
Args:
export_outputs: Describes the output signatures to be exported to
`SavedModel` and used during serving. Should be a dict or None.
predictions: Predictions `Tensor` or dict of `Tensor`.
Returns:
Valid export_outputs dict
Raises:
TypeError: if export_outputs is not a dict or its values are not
ExportOutput instances.
"""
if export_outputs is None:
default_output = export_output_lib.PredictOutput(predictions)
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: default_output}
if not isinstance(export_outputs, dict):
raise TypeError('export_outputs must be dict, given: {}'.format(
export_outputs))
for v in six.itervalues(export_outputs):
if not isinstance(v, export_output_lib.ExportOutput):
raise TypeError(
'Values in export_outputs must be ExportOutput objects. '
'Given: {}'.format(export_outputs))
_maybe_add_default_serving_output(export_outputs)
return export_outputs
def _maybe_add_default_serving_output(export_outputs):
"""Add a default serving output to the export_outputs if not present.
Args:
export_outputs: Describes the output signatures to be exported to
`SavedModel` and used during serving. Should be a dict.
Returns:
export_outputs dict with default serving signature added if necessary
Raises:
ValueError: if multiple export_outputs were provided without a default
serving key.
"""
if len(export_outputs) == 1:
(key, value), = export_outputs.items()
if key != signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_outputs[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = value
if len(export_outputs) > 1:
if (signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
not in export_outputs):
raise ValueError(
'Multiple export_outputs were provided, but none of them is '
'specified as the default. Do this by naming one of them with '
'signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.')
return export_outputs
|
tensorflow-master
|
tensorflow/python/saved_model/model_utils/export_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for export."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model.model_utils import export_output as export_output_lib
class ExportOutputTest(test.TestCase):
def test_regress_value_must_be_float(self):
with context.graph_mode():
value = array_ops.placeholder(dtypes.string, 1, name='output-tensor-1')
with self.assertRaisesRegexp(
ValueError, 'Regression output value must be a float32 Tensor'):
export_output_lib.RegressionOutput(value)
def test_classify_classes_must_be_strings(self):
with context.graph_mode():
classes = array_ops.placeholder(dtypes.float32, 1, name='output-tensor-1')
with self.assertRaisesRegexp(
ValueError, 'Classification classes must be a string Tensor'):
export_output_lib.ClassificationOutput(classes=classes)
def test_classify_scores_must_be_float(self):
with context.graph_mode():
scores = array_ops.placeholder(dtypes.string, 1, name='output-tensor-1')
with self.assertRaisesRegexp(
ValueError, 'Classification scores must be a float32 Tensor'):
export_output_lib.ClassificationOutput(scores=scores)
def test_classify_requires_classes_or_scores(self):
with self.assertRaisesRegexp(
ValueError, 'At least one of scores and classes must be set.'):
export_output_lib.ClassificationOutput()
def test_build_standardized_signature_def_regression(self):
with context.graph_mode():
input_tensors = {
'input-1':
array_ops.placeholder(
dtypes.string, 1, name='input-tensor-1')
}
value = array_ops.placeholder(dtypes.float32, 1, name='output-tensor-1')
export_output = export_output_lib.RegressionOutput(value)
actual_signature_def = export_output.as_signature_def(input_tensors)
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value('DT_FLOAT')
dtype_string = types_pb2.DataType.Value('DT_STRING')
expected_signature_def.inputs[
signature_constants.REGRESS_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name='input-tensor-1:0',
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.REGRESS_OUTPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name='output-tensor-1:0',
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.REGRESS_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classify_classes_only(self):
"""Tests classification with one output tensor."""
with context.graph_mode():
input_tensors = {
'input-1':
array_ops.placeholder(
dtypes.string, 1, name='input-tensor-1')
}
classes = array_ops.placeholder(dtypes.string, 1, name='output-tensor-1')
export_output = export_output_lib.ClassificationOutput(classes=classes)
actual_signature_def = export_output.as_signature_def(input_tensors)
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_string = types_pb2.DataType.Value('DT_STRING')
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name='input-tensor-1:0',
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(name='output-tensor-1:0',
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classify_both(self):
"""Tests multiple output tensors that include classes and scores."""
with context.graph_mode():
input_tensors = {
'input-1':
array_ops.placeholder(
dtypes.string, 1, name='input-tensor-1')
}
classes = array_ops.placeholder(dtypes.string, 1,
name='output-tensor-classes')
scores = array_ops.placeholder(dtypes.float32, 1,
name='output-tensor-scores')
export_output = export_output_lib.ClassificationOutput(
scores=scores, classes=classes)
actual_signature_def = export_output.as_signature_def(input_tensors)
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value('DT_FLOAT')
dtype_string = types_pb2.DataType.Value('DT_STRING')
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name='input-tensor-1:0',
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(name='output-tensor-classes:0',
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(name='output-tensor-scores:0',
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classify_scores_only(self):
"""Tests classification without classes tensor."""
with context.graph_mode():
input_tensors = {
'input-1':
array_ops.placeholder(
dtypes.string, 1, name='input-tensor-1')
}
scores = array_ops.placeholder(dtypes.float32, 1,
name='output-tensor-scores')
export_output = export_output_lib.ClassificationOutput(
scores=scores)
actual_signature_def = export_output.as_signature_def(input_tensors)
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value('DT_FLOAT')
dtype_string = types_pb2.DataType.Value('DT_STRING')
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name='input-tensor-1:0',
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(name='output-tensor-scores:0',
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_predict_outputs_valid(self):
"""Tests that no errors are raised when provided outputs are valid."""
outputs = {
'output0': constant_op.constant([0]),
u'output1': constant_op.constant(['foo']),
}
export_output_lib.PredictOutput(outputs)
# Single Tensor is OK too
export_output_lib.PredictOutput(constant_op.constant([0]))
def test_predict_outputs_invalid(self):
with self.assertRaisesRegexp(
ValueError,
'Prediction output key must be a string'):
export_output_lib.PredictOutput({1: constant_op.constant([0])})
with self.assertRaisesRegexp(
ValueError,
'Prediction output value must be a Tensor'):
export_output_lib.PredictOutput({
'prediction1': sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
})
class MockSupervisedOutput(export_output_lib._SupervisedOutput):
"""So that we can test the abstract class methods directly."""
def _get_signature_def_fn(self):
pass
class SupervisedOutputTest(test.TestCase):
def test_supervised_outputs_valid(self):
"""Tests that no errors are raised when provided outputs are valid."""
with context.graph_mode():
loss = {'my_loss': constant_op.constant([0])}
predictions = {u'output1': constant_op.constant(['foo'])}
metric_obj = metrics_module.Mean()
metric_obj.update_state(constant_op.constant([0]))
metrics = {
'metrics': metric_obj,
'metrics2': (constant_op.constant([0]), constant_op.constant([10]))
}
outputter = MockSupervisedOutput(loss, predictions, metrics)
self.assertEqual(outputter.loss['loss/my_loss'], loss['my_loss'])
self.assertEqual(
outputter.predictions['predictions/output1'], predictions['output1'])
self.assertEqual(outputter.metrics['metrics/update_op'].name,
'metric_op_wrapper:0')
self.assertEqual(
outputter.metrics['metrics2/update_op'], metrics['metrics2'][1])
# Single Tensor is OK too
outputter = MockSupervisedOutput(
loss['my_loss'], predictions['output1'], metrics['metrics'])
self.assertEqual(outputter.loss, {'loss': loss['my_loss']})
self.assertEqual(
outputter.predictions, {'predictions': predictions['output1']})
self.assertEqual(outputter.metrics['metrics/update_op'].name,
'metric_op_wrapper_1:0')
def test_supervised_outputs_none(self):
outputter = MockSupervisedOutput(
constant_op.constant([0]), None, None)
self.assertEqual(len(outputter.loss), 1)
self.assertEqual(outputter.predictions, None)
self.assertEqual(outputter.metrics, None)
def test_supervised_outputs_invalid(self):
with self.assertRaisesRegexp(ValueError, 'predictions output value must'):
MockSupervisedOutput(constant_op.constant([0]), [3], None)
with self.assertRaisesRegexp(ValueError, 'loss output value must'):
MockSupervisedOutput('str', None, None)
with self.assertRaisesRegexp(ValueError, 'metrics output value must'):
MockSupervisedOutput(None, None, (15.3, 4))
with self.assertRaisesRegexp(ValueError, 'loss output key must'):
MockSupervisedOutput({25: 'Tensor'}, None, None)
def test_supervised_outputs_tuples(self):
"""Tests that no errors are raised when provided outputs are valid."""
with context.graph_mode():
loss = {('my', 'loss'): constant_op.constant([0])}
predictions = {(u'output1', '2'): constant_op.constant(['foo'])}
metric_obj = metrics_module.Mean()
metric_obj.update_state(constant_op.constant([0]))
metrics = {
('metrics', '1'):
metric_obj,
('metrics', '2'): (constant_op.constant([0]),
constant_op.constant([10]))
}
outputter = MockSupervisedOutput(loss, predictions, metrics)
self.assertEqual(set(outputter.loss.keys()), set(['loss/my/loss']))
self.assertEqual(set(outputter.predictions.keys()),
set(['predictions/output1/2']))
self.assertEqual(
set(outputter.metrics.keys()),
set([
'metrics/1/value', 'metrics/1/update_op', 'metrics/2/value',
'metrics/2/update_op'
]))
def test_supervised_outputs_no_prepend(self):
"""Tests that no errors are raised when provided outputs are valid."""
with context.graph_mode():
loss = {'loss': constant_op.constant([0])}
predictions = {u'predictions': constant_op.constant(['foo'])}
metric_obj = metrics_module.Mean()
metric_obj.update_state(constant_op.constant([0]))
metrics = {
'metrics_1': metric_obj,
'metrics_2': (constant_op.constant([0]), constant_op.constant([10]))
}
outputter = MockSupervisedOutput(loss, predictions, metrics)
self.assertEqual(set(outputter.loss.keys()), set(['loss']))
self.assertEqual(set(outputter.predictions.keys()), set(['predictions']))
self.assertEqual(
set(outputter.metrics.keys()),
set([
'metrics_1/value', 'metrics_1/update_op', 'metrics_2/update_op',
'metrics_2/value'
]))
def test_train_signature_def(self):
with context.graph_mode():
loss = {'my_loss': constant_op.constant([0])}
predictions = {u'output1': constant_op.constant(['foo'])}
metric_obj = metrics_module.Mean()
metric_obj.update_state(constant_op.constant([0]))
metrics = {
'metrics_1': metric_obj,
'metrics_2': (constant_op.constant([0]), constant_op.constant([10]))
}
outputter = export_output_lib.TrainOutput(loss, predictions, metrics)
receiver = {u'features': constant_op.constant(100, shape=(100, 2)),
'labels': constant_op.constant(100, shape=(100, 1))}
sig_def = outputter.as_signature_def(receiver)
self.assertTrue('loss/my_loss' in sig_def.outputs)
self.assertTrue('metrics_1/value' in sig_def.outputs)
self.assertTrue('metrics_2/value' in sig_def.outputs)
self.assertTrue('predictions/output1' in sig_def.outputs)
self.assertTrue('features' in sig_def.inputs)
def test_eval_signature_def(self):
with context.graph_mode():
loss = {'my_loss': constant_op.constant([0])}
predictions = {u'output1': constant_op.constant(['foo'])}
outputter = export_output_lib.EvalOutput(loss, predictions, None)
receiver = {u'features': constant_op.constant(100, shape=(100, 2)),
'labels': constant_op.constant(100, shape=(100, 1))}
sig_def = outputter.as_signature_def(receiver)
self.assertTrue('loss/my_loss' in sig_def.outputs)
self.assertFalse('metrics/value' in sig_def.outputs)
self.assertTrue('predictions/output1' in sig_def.outputs)
self.assertTrue('features' in sig_def.inputs)
def test_metric_op_is_tensor(self):
"""Tests that ops.Operation is wrapped by a tensor for metric_ops."""
with context.graph_mode():
loss = {'my_loss': constant_op.constant([0])}
predictions = {u'output1': constant_op.constant(['foo'])}
metric_obj = metrics_module.Mean()
metric_obj.update_state(constant_op.constant([0]))
metrics = {
'metrics_1': metric_obj,
'metrics_2': (constant_op.constant([0]), control_flow_ops.no_op())
}
outputter = MockSupervisedOutput(loss, predictions, metrics)
self.assertTrue(outputter.metrics['metrics_1/update_op'].name.startswith(
'metric_op_wrapper'))
self.assertTrue(
isinstance(outputter.metrics['metrics_1/update_op'], ops.Tensor))
self.assertTrue(
isinstance(outputter.metrics['metrics_1/value'], ops.Tensor))
self.assertEqual(outputter.metrics['metrics_2/value'],
metrics['metrics_2'][0])
self.assertTrue(outputter.metrics['metrics_2/update_op'].name.startswith(
'metric_op_wrapper'))
self.assertTrue(
isinstance(outputter.metrics['metrics_2/update_op'], ops.Tensor))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/saved_model/model_utils/export_output_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for saving a Keras Model or Estimator to the SavedModel format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.python.saved_model.model_utils.export_output import *
from tensorflow.python.saved_model.model_utils.export_utils import build_all_signature_defs
from tensorflow.python.saved_model.model_utils.export_utils import export_outputs_for_mode
from tensorflow.python.saved_model.model_utils.export_utils import EXPORT_TAG_MAP
from tensorflow.python.saved_model.model_utils.export_utils import get_export_outputs
from tensorflow.python.saved_model.model_utils.export_utils import get_temp_export_dir
from tensorflow.python.saved_model.model_utils.export_utils import get_timestamped_export_dir
from tensorflow.python.saved_model.model_utils.export_utils import SIGNATURE_KEY_MAP
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/python/saved_model/model_utils/__init__.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ModeKey Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.saved_model.model_utils import mode_keys
class ModeKeyMapTest(test.TestCase):
def test_map(self):
mode_map = mode_keys.ModeKeyMap(**{
mode_keys.KerasModeKeys.PREDICT: 3,
mode_keys.KerasModeKeys.TEST: 1
})
# Test dictionary __getitem__
self.assertEqual(3, mode_map[mode_keys.KerasModeKeys.PREDICT])
self.assertEqual(3, mode_map[mode_keys.EstimatorModeKeys.PREDICT])
self.assertEqual(1, mode_map[mode_keys.KerasModeKeys.TEST])
self.assertEqual(1, mode_map[mode_keys.EstimatorModeKeys.EVAL])
with self.assertRaises(KeyError):
_ = mode_map[mode_keys.KerasModeKeys.TRAIN]
with self.assertRaises(KeyError):
_ = mode_map[mode_keys.EstimatorModeKeys.TRAIN]
with self.assertRaisesRegexp(ValueError, 'Invalid mode'):
_ = mode_map['serve']
# Test common dictionary methods
self.assertLen(mode_map, 2)
self.assertEqual({1, 3}, set(mode_map.values()))
self.assertEqual(
{mode_keys.KerasModeKeys.TEST, mode_keys.KerasModeKeys.PREDICT},
set(mode_map.keys()))
# Map is immutable
with self.assertRaises(TypeError):
mode_map[mode_keys.KerasModeKeys.TEST] = 1
def test_invalid_init(self):
with self.assertRaisesRegexp(ValueError, 'Multiple keys/values found'):
_ = mode_keys.ModeKeyMap(**{
mode_keys.KerasModeKeys.PREDICT: 3,
mode_keys.EstimatorModeKeys.PREDICT: 1
})
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/saved_model/model_utils/mode_keys_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for different types of export output."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import signature_def_utils
class ExportOutput(object):
"""Represents an output of a model that can be served.
These typically correspond to model heads.
"""
__metaclass__ = abc.ABCMeta
_SEPARATOR_CHAR = '/'
@abc.abstractmethod
def as_signature_def(self, receiver_tensors):
"""Generate a SignatureDef proto for inclusion in a MetaGraphDef.
The SignatureDef will specify outputs as described in this ExportOutput,
and will use the provided receiver_tensors as inputs.
Args:
receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying
input nodes that will be fed.
"""
pass
def _check_output_key(self, key, error_label):
# For multi-head models, the key can be a tuple.
if isinstance(key, tuple):
key = self._SEPARATOR_CHAR.join(key)
if not isinstance(key, six.string_types):
raise ValueError(
'{} output key must be a string; got {}.'.format(error_label, key))
return key
def _wrap_and_check_outputs(
self, outputs, single_output_default_name, error_label=None):
"""Wraps raw tensors as dicts and checks type.
Note that we create a new dict here so that we can overwrite the keys
if necessary.
Args:
outputs: A `Tensor` or a dict of string to `Tensor`.
single_output_default_name: A string key for use in the output dict
if the provided `outputs` is a raw tensor.
error_label: descriptive string for use in error messages. If none,
single_output_default_name will be used.
Returns:
A dict of tensors
Raises:
ValueError: if the outputs dict keys are not strings or tuples of strings
or the values are not Tensors.
"""
if not isinstance(outputs, dict):
outputs = {single_output_default_name: outputs}
output_dict = {}
for key, value in outputs.items():
error_name = error_label or single_output_default_name
key = self._check_output_key(key, error_name)
if not isinstance(value, ops.Tensor):
raise ValueError(
'{} output value must be a Tensor; got {}.'.format(
error_name, value))
output_dict[key] = value
return output_dict
class ClassificationOutput(ExportOutput):
"""Represents the output of a classification head.
Either classes or scores or both must be set.
The classes `Tensor` must provide string labels, not integer class IDs.
If only classes is set, it is interpreted as providing top-k results in
descending order.
If only scores is set, it is interpreted as providing a score for every class
in order of class ID.
If both classes and scores are set, they are interpreted as zipped, so each
score corresponds to the class at the same index. Clients should not depend
on the order of the entries.
"""
def __init__(self, scores=None, classes=None):
"""Constructor for `ClassificationOutput`.
Args:
scores: A float `Tensor` giving scores (sometimes but not always
interpretable as probabilities) for each class. May be `None`, but
only if `classes` is set. Interpretation varies-- see class doc.
classes: A string `Tensor` giving predicted class labels. May be `None`,
but only if `scores` is set. Interpretation varies-- see class doc.
Raises:
ValueError: if neither classes nor scores is set, or one of them is not a
`Tensor` with the correct dtype.
"""
if (scores is not None
and not (isinstance(scores, ops.Tensor)
and scores.dtype.is_floating)):
raise ValueError('Classification scores must be a float32 Tensor; '
'got {}'.format(scores))
if (classes is not None
and not (isinstance(classes, ops.Tensor)
and dtypes.as_dtype(classes.dtype) == dtypes.string)):
raise ValueError('Classification classes must be a string Tensor; '
'got {}'.format(classes))
if scores is None and classes is None:
raise ValueError('At least one of scores and classes must be set.')
self._scores = scores
self._classes = classes
@property
def scores(self):
return self._scores
@property
def classes(self):
return self._classes
def as_signature_def(self, receiver_tensors):
if len(receiver_tensors) != 1:
raise ValueError('Classification input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
(_, examples), = receiver_tensors.items()
if dtypes.as_dtype(examples.dtype) != dtypes.string:
raise ValueError('Classification input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
return signature_def_utils.classification_signature_def(
examples, self.classes, self.scores)
class RegressionOutput(ExportOutput):
"""Represents the output of a regression head."""
def __init__(self, value):
"""Constructor for `RegressionOutput`.
Args:
value: a float `Tensor` giving the predicted values. Required.
Raises:
ValueError: if the value is not a `Tensor` with dtype tf.float32.
"""
if not (isinstance(value, ops.Tensor) and value.dtype.is_floating):
raise ValueError('Regression output value must be a float32 Tensor; '
'got {}'.format(value))
self._value = value
@property
def value(self):
return self._value
def as_signature_def(self, receiver_tensors):
if len(receiver_tensors) != 1:
raise ValueError('Regression input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
(_, examples), = receiver_tensors.items()
if dtypes.as_dtype(examples.dtype) != dtypes.string:
raise ValueError('Regression input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
return signature_def_utils.regression_signature_def(examples, self.value)
class PredictOutput(ExportOutput):
"""Represents the output of a generic prediction head.
A generic prediction need not be either a classification or a regression.
Named outputs must be provided as a dict from string to `Tensor`,
"""
_SINGLE_OUTPUT_DEFAULT_NAME = 'output'
def __init__(self, outputs):
"""Constructor for PredictOutput.
Args:
outputs: A `Tensor` or a dict of string to `Tensor` representing the
predictions.
Raises:
ValueError: if the outputs is not dict, or any of its keys are not
strings, or any of its values are not `Tensor`s.
"""
self._outputs = self._wrap_and_check_outputs(
outputs, self._SINGLE_OUTPUT_DEFAULT_NAME, error_label='Prediction')
@property
def outputs(self):
return self._outputs
def as_signature_def(self, receiver_tensors):
return signature_def_utils.predict_signature_def(receiver_tensors,
self.outputs)
class _SupervisedOutput(ExportOutput):
"""Represents the output of a supervised training or eval process."""
__metaclass__ = abc.ABCMeta
LOSS_NAME = 'loss'
PREDICTIONS_NAME = 'predictions'
METRICS_NAME = 'metrics'
METRIC_VALUE_SUFFIX = 'value'
METRIC_UPDATE_SUFFIX = 'update_op'
_loss = None
_predictions = None
_metrics = None
def __init__(self, loss=None, predictions=None, metrics=None):
"""Constructor for SupervisedOutput (ie, Train or Eval output).
Args:
loss: dict of Tensors or single Tensor representing calculated loss.
predictions: dict of Tensors or single Tensor representing model
predictions.
metrics: Dict of metric results keyed by name.
The values of the dict can be one of the following:
(1) instance of `Metric` class.
(2) (metric_value, update_op) tuples, or a single tuple.
metric_value must be a Tensor, and update_op must be a Tensor or Op.
Raises:
ValueError: if any of the outputs' dict keys are not strings or tuples of
strings or the values are not Tensors (or Operations in the case of
update_op).
"""
if loss is not None:
loss_dict = self._wrap_and_check_outputs(loss, self.LOSS_NAME)
self._loss = self._prefix_output_keys(loss_dict, self.LOSS_NAME)
if predictions is not None:
pred_dict = self._wrap_and_check_outputs(
predictions, self.PREDICTIONS_NAME)
self._predictions = self._prefix_output_keys(
pred_dict, self.PREDICTIONS_NAME)
if metrics is not None:
self._metrics = self._wrap_and_check_metrics(metrics)
def _prefix_output_keys(self, output_dict, output_name):
"""Prepend output_name to the output_dict keys if it doesn't exist.
This produces predictable prefixes for the pre-determined outputs
of SupervisedOutput.
Args:
output_dict: dict of string to Tensor, assumed valid.
output_name: prefix string to prepend to existing keys.
Returns:
dict with updated keys and existing values.
"""
new_outputs = {}
for key, val in output_dict.items():
key = self._prefix_key(key, output_name)
new_outputs[key] = val
return new_outputs
def _prefix_key(self, key, output_name):
if key.find(output_name) != 0:
key = output_name + self._SEPARATOR_CHAR + key
return key
def _wrap_and_check_metrics(self, metrics):
"""Handle the saving of metrics.
Metrics is either a tuple of (value, update_op), or a dict of such tuples.
Here, we separate out the tuples and create a dict with names to tensors.
Args:
metrics: Dict of metric results keyed by name.
The values of the dict can be one of the following:
(1) instance of `Metric` class.
(2) (metric_value, update_op) tuples, or a single tuple.
metric_value must be a Tensor, and update_op must be a Tensor or Op.
Returns:
dict of output_names to tensors
Raises:
ValueError: if the dict key is not a string, or the metric values or ops
are not tensors.
"""
if not isinstance(metrics, dict):
metrics = {self.METRICS_NAME: metrics}
outputs = {}
for key, value in metrics.items():
if isinstance(value, tuple):
metric_val, metric_op = value
else: # value is a keras.Metrics object
metric_val = value.result()
assert len(value.updates) == 1 # We expect only one update op.
metric_op = value.updates[0]
key = self._check_output_key(key, self.METRICS_NAME)
key = self._prefix_key(key, self.METRICS_NAME)
val_name = key + self._SEPARATOR_CHAR + self.METRIC_VALUE_SUFFIX
op_name = key + self._SEPARATOR_CHAR + self.METRIC_UPDATE_SUFFIX
if not isinstance(metric_val, ops.Tensor):
raise ValueError(
'{} output value must be a Tensor; got {}.'.format(
key, metric_val))
if (not isinstance(metric_op, ops.Tensor) and
not isinstance(metric_op, ops.Operation)):
raise ValueError(
'{} update_op must be a Tensor or Operation; got {}.'.format(
key, metric_op))
# We must wrap any ops in a Tensor before export, as the SignatureDef
# proto expects tensors only. See b/109740581
metric_op_tensor = metric_op
if isinstance(metric_op, ops.Operation):
with ops.control_dependencies([metric_op]):
metric_op_tensor = constant_op.constant([], name='metric_op_wrapper')
outputs[val_name] = metric_val
outputs[op_name] = metric_op_tensor
return outputs
@property
def loss(self):
return self._loss
@property
def predictions(self):
return self._predictions
@property
def metrics(self):
return self._metrics
@abc.abstractmethod
def _get_signature_def_fn(self):
"""Returns a function that produces a SignatureDef given desired outputs."""
pass
def as_signature_def(self, receiver_tensors):
signature_def_fn = self._get_signature_def_fn()
return signature_def_fn(
receiver_tensors, self.loss, self.predictions, self.metrics)
class TrainOutput(_SupervisedOutput):
"""Represents the output of a supervised training process.
This class generates the appropriate signature def for exporting
training output by type-checking and wrapping loss, predictions, and metrics
values.
"""
def _get_signature_def_fn(self):
return signature_def_utils.supervised_train_signature_def
class EvalOutput(_SupervisedOutput):
"""Represents the output of a supervised eval process.
This class generates the appropriate signature def for exporting
eval output by type-checking and wrapping loss, predictions, and metrics
values.
"""
def _get_signature_def_fn(self):
return signature_def_utils.supervised_eval_signature_def
|
tensorflow-master
|
tensorflow/python/saved_model/model_utils/export_output.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for export utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import time
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model.model_utils import export_output
from tensorflow.python.saved_model.model_utils import export_utils
from tensorflow.python.saved_model.model_utils.mode_keys import KerasModeKeys
class ExportTest(test_util.TensorFlowTestCase):
@test_util.deprecated_graph_mode_only
def test_build_all_signature_defs_without_receiver_alternatives(self):
receiver_tensor = array_ops.placeholder(dtypes.string)
output_1 = constant_op.constant([1.])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.RegressionOutput(value=output_1),
"head-2": export_output.ClassificationOutput(classes=output_2),
"head-3": export_output.PredictOutput(outputs={
"some_output_3": output_3
}),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs)
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(receiver_tensor,
output_1),
"head-2":
signature_def_utils.classification_signature_def(receiver_tensor,
output_2, None),
"head-3":
signature_def_utils.predict_signature_def({
"input": receiver_tensor
}, {"some_output_3": output_3})
}
self.assertDictEqual(expected_signature_defs, signature_defs)
@test_util.deprecated_graph_mode_only
def test_build_all_signature_defs_with_dict_alternatives(self):
receiver_tensor = array_ops.placeholder(dtypes.string)
receiver_tensors_alternative_1 = {
"foo": array_ops.placeholder(dtypes.int64),
"bar": array_ops.sparse_placeholder(dtypes.float32)}
receiver_tensors_alternatives = {"other": receiver_tensors_alternative_1}
output_1 = constant_op.constant([1.])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.RegressionOutput(value=output_1),
"head-2": export_output.ClassificationOutput(classes=output_2),
"head-3": export_output.PredictOutput(outputs={
"some_output_3": output_3
}),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs, receiver_tensors_alternatives)
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(
receiver_tensor,
output_1),
"head-2":
signature_def_utils.classification_signature_def(
receiver_tensor,
output_2, None),
"head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensor},
{"some_output_3": output_3}),
"other:head-3":
signature_def_utils.predict_signature_def(
receiver_tensors_alternative_1,
{"some_output_3": output_3})
# Note that the alternatives 'other:serving_default' and
# 'other:head-2' are invalid, because regession and classification
# signatures must take a single string input. Here we verify that
# these invalid signatures are not included in the export_utils.
}
self.assertDictEqual(expected_signature_defs, signature_defs)
@test_util.deprecated_graph_mode_only
def test_build_all_signature_defs_with_single_alternatives(self):
receiver_tensor = array_ops.placeholder(dtypes.string)
receiver_tensors_alternative_1 = array_ops.placeholder(dtypes.int64)
receiver_tensors_alternative_2 = array_ops.sparse_placeholder(
dtypes.float32)
# Note we are passing single Tensors as values of
# receiver_tensors_alternatives, where normally that is a dict.
# In this case a dict will be created using the default receiver tensor
# name "input".
receiver_tensors_alternatives = {"other1": receiver_tensors_alternative_1,
"other2": receiver_tensors_alternative_2}
output_1 = constant_op.constant([1.])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.RegressionOutput(value=output_1),
"head-2": export_output.ClassificationOutput(classes=output_2),
"head-3": export_output.PredictOutput(outputs={
"some_output_3": output_3
}),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs, receiver_tensors_alternatives)
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(
receiver_tensor,
output_1),
"head-2":
signature_def_utils.classification_signature_def(
receiver_tensor,
output_2, None),
"head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensor},
{"some_output_3": output_3}),
"other1:head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensors_alternative_1},
{"some_output_3": output_3}),
"other2:head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensors_alternative_2},
{"some_output_3": output_3})
# Note that the alternatives 'other:serving_default' and 'other:head-2'
# are invalid, because regession and classification signatures must take
# a single string input. Here we verify that these invalid signatures
# are not included in the export_utils.
}
self.assertDictEqual(expected_signature_defs, signature_defs)
def test_build_all_signature_defs_export_outputs_required(self):
receiver_tensor = constant_op.constant(["11"])
with self.assertRaises(ValueError) as e:
export_utils.build_all_signature_defs(receiver_tensor, None)
self.assertTrue(str(e.exception).startswith(
"export_outputs must be a dict"))
def test_get_timestamped_export_dir(self):
export_dir_base = tempfile.mkdtemp() + "export/"
export_dir_1 = export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_2 = export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_3 = export_utils.get_timestamped_export_dir(
export_dir_base)
# Export directories should be named using a timestamp that is seconds
# since epoch. Such a timestamp is 10 digits long.
time_1 = os.path.basename(export_dir_1)
self.assertEqual(10, len(time_1))
time_2 = os.path.basename(export_dir_2)
self.assertEqual(10, len(time_2))
time_3 = os.path.basename(export_dir_3)
self.assertEqual(10, len(time_3))
self.assertTrue(int(time_1) < int(time_2))
self.assertTrue(int(time_2) < int(time_3))
@test_util.deprecated_graph_mode_only
def test_build_all_signature_defs_serving_only(self):
receiver_tensor = {"input": array_ops.placeholder(dtypes.string)}
output_1 = constant_op.constant([1.])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.PredictOutput(outputs=output_1),
"train": export_output.TrainOutput(loss=output_1),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs)
expected_signature_defs = {
"serving_default": signature_def_utils.predict_signature_def(
receiver_tensor, {"output": output_1})
}
self.assertDictEqual(expected_signature_defs, signature_defs)
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs, serving_only=False)
expected_signature_defs.update({
"train": signature_def_utils.supervised_train_signature_def(
receiver_tensor, loss={"loss": output_1})
})
self.assertDictEqual(expected_signature_defs, signature_defs)
@test_util.deprecated_graph_mode_only
def test_export_outputs_for_mode(self):
predictions = {"predictions": constant_op.constant([1.])}
loss = {"loss": constant_op.constant([2.])}
metrics = {
"metrics": (constant_op.constant([3.]), constant_op.constant([4.]))}
expected_metrics = {
"metrics/value": metrics["metrics"][0],
"metrics/update_op": metrics["metrics"][1]
}
def _build_export_output(mode):
return export_utils.export_outputs_for_mode(
mode, None, predictions, loss, metrics)
ret = _build_export_output(KerasModeKeys.TRAIN)
self.assertIn(signature_constants.DEFAULT_TRAIN_SIGNATURE_DEF_KEY, ret)
export_out = ret[signature_constants.DEFAULT_TRAIN_SIGNATURE_DEF_KEY]
self.assertIsInstance(export_out, export_output.TrainOutput)
self.assertEqual(export_out.predictions, predictions)
self.assertEqual(export_out.loss, loss)
self.assertEqual(export_out.metrics, expected_metrics)
ret = _build_export_output(KerasModeKeys.TEST)
self.assertIn(signature_constants.DEFAULT_EVAL_SIGNATURE_DEF_KEY, ret)
export_out = ret[signature_constants.DEFAULT_EVAL_SIGNATURE_DEF_KEY]
self.assertIsInstance(export_out, export_output.EvalOutput)
self.assertEqual(export_out.predictions, predictions)
self.assertEqual(export_out.loss, loss)
self.assertEqual(export_out.metrics, expected_metrics)
ret = _build_export_output(KerasModeKeys.PREDICT)
self.assertIn(signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, ret)
export_out = ret[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
self.assertIsInstance(export_out, export_output.PredictOutput)
self.assertEqual(export_out.outputs, predictions)
classes = constant_op.constant(["class5"])
ret = export_utils.export_outputs_for_mode(
KerasModeKeys.PREDICT,
{"classify": export_output.ClassificationOutput(
classes=classes)})
self.assertIn("classify", ret)
export_out = ret["classify"]
self.assertIsInstance(export_out, export_output.ClassificationOutput)
self.assertEqual(export_out.classes, classes)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/saved_model/model_utils/export_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import base_layer as keras_base_layer
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.layers import base as base_layers
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class BaseLayerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testLayerProperties(self):
layer = base_layers.Layer(name='my_layer')
self.assertEqual(layer.variables, [])
self.assertEqual(layer.trainable_variables, [])
self.assertEqual(layer.non_trainable_variables, [])
if not context.executing_eagerly():
# updates, losses only supported in GRAPH mode
self.assertEqual(layer.updates, [])
self.assertEqual(layer.losses, [])
self.assertEqual(layer.built, False)
layer = base_layers.Layer(name='my_layer', trainable=False)
self.assertEqual(layer.trainable, False)
@test_util.run_in_graph_and_eager_modes
def testInt64Layer(self):
layer = base_layers.Layer(name='my_layer', dtype='int64')
layer.add_variable('my_var', [2, 2])
self.assertEqual(layer.name, 'my_layer')
@test_util.run_in_graph_and_eager_modes
def testKerasStyleAddWeight(self):
keras_layer = keras_base_layer.Layer(name='keras_layer')
with ops.name_scope('foo'):
keras_variable = keras_layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(keras_variable.name, 'foo/my_var:0')
with ops.name_scope('baz'):
old_style_layer = base_layers.Layer(name='my_layer')
# Test basic variable creation.
variable = old_style_layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(variable.name, 'my_layer/my_var:0')
with base_layers.keras_style_scope():
layer = base_layers.Layer(name='my_layer')
# Test basic variable creation.
with ops.name_scope('bar'):
variable = layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(variable.name, 'bar/my_var:0')
@test_util.run_in_graph_and_eager_modes
def testAddWeight(self):
layer = base_layers.Layer(name='my_layer')
# Test basic variable creation.
variable = layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(variable.name, 'my_layer/my_var:0')
self.assertEqual(layer.variables, [variable])
self.assertEqual(layer.trainable_variables, [variable])
self.assertEqual(layer.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
layer.variables,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Test non-trainable variable creation.
# layer.add_variable should work even outside `build` and `call`.
variable_2 = layer.add_variable(
'non_trainable_var', [2, 2],
initializer=init_ops.zeros_initializer(),
trainable=False)
self.assertEqual(layer.variables, [variable, variable_2])
self.assertEqual(layer.trainable_variables, [variable])
self.assertEqual(layer.non_trainable_variables, [variable_2])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
_ = layer.add_variable(
'reg_var', [2, 2],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(len(layer.losses), 1)
added_variable = [False]
# Test that sync `ON_READ` variables are defaulted to be non-trainable.
variable_3 = layer.add_variable(
'sync_on_read_var', [2, 2],
initializer=init_ops.zeros_initializer(),
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertEqual(layer.non_trainable_variables, [variable_2, variable_3])
@def_function.function
def function_adds_weight():
if not added_variable[0]:
layer.add_variable(
'reg_var_from_function', [2, 2],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
added_variable[0] = True
function_adds_weight()
self.assertEqual(len(layer.losses), 2)
def testInvalidTrainableSynchronizationCombination(self):
layer = base_layers.Layer(name='my_layer')
with self.assertRaisesRegexp(
ValueError, 'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.'):
_ = layer.add_variable(
'v', [2, 2],
initializer=init_ops.zeros_initializer(),
synchronization=variable_scope.VariableSynchronization.ON_READ,
trainable=True)
@test_util.run_deprecated_v1
def testReusePartitionedVaraiblesAndRegularizers(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
partitioner = partitioned_variables.fixed_size_partitioner(3)
for reuse in [False, True]:
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
partitioner=partitioner,
reuse=reuse):
layer = base_layers.Layer(name='my_layer')
_ = layer.add_variable(
'reg_part_var', [4, 4],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 3)
@test_util.run_in_graph_and_eager_modes
def testCall(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
if not context.executing_eagerly():
# op is only supported in GRAPH mode
self.assertEqual(outputs.op.name, 'my_layer/Square')
@test_util.run_in_graph_and_eager_modes
def testDeepCopy(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
layer._private_tensor = random_ops.random_uniform(())
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
if not context.executing_eagerly():
# op only supported in GRAPH mode.
self.assertEqual(outputs.op.name, 'my_layer/Square')
layer_copy = copy.deepcopy(layer)
self.assertEqual(layer_copy.name, layer.name)
self.assertEqual(layer_copy._scope.name, layer._scope.name)
self.assertEqual(layer_copy._private_tensor, layer._private_tensor)
@test_util.run_in_graph_and_eager_modes
def testScopeNaming(self):
class PrivateLayer(base_layers.Layer):
def call(self, inputs):
return inputs
inputs = random_ops.random_uniform((5,))
default_layer = PrivateLayer()
_ = default_layer.apply(inputs)
self.assertEqual(default_layer._scope.name, 'private_layer')
default_layer1 = PrivateLayer()
default_layer1.apply(inputs)
self.assertEqual(default_layer1._scope.name, 'private_layer_1')
my_layer = PrivateLayer(name='my_layer')
my_layer.apply(inputs)
self.assertEqual(my_layer._scope.name, 'my_layer')
my_layer1 = PrivateLayer(name='my_layer')
my_layer1.apply(inputs)
self.assertEqual(my_layer1._scope.name, 'my_layer_1')
my_layer2 = PrivateLayer(name='my_layer')
my_layer2.apply(inputs)
self.assertEqual(my_layer2._scope.name, 'my_layer_2')
# Name scope shouldn't affect names.
with ops.name_scope('some_name_scope'):
default_layer2 = PrivateLayer()
default_layer2.apply(inputs)
self.assertEqual(default_layer2._scope.name, 'private_layer_2')
my_layer3 = PrivateLayer(name='my_layer')
my_layer3.apply(inputs)
self.assertEqual(my_layer3._scope.name, 'my_layer_3')
other_layer = PrivateLayer(name='other_layer')
other_layer.apply(inputs)
self.assertEqual(other_layer._scope.name, 'other_layer')
# Variable scope gets added to scope names.
with variable_scope.variable_scope('var_scope'):
default_layer_scoped = PrivateLayer()
default_layer_scoped.apply(inputs)
self.assertEqual(default_layer_scoped._scope.name,
'var_scope/private_layer')
my_layer_scoped = PrivateLayer(name='my_layer')
my_layer_scoped.apply(inputs)
self.assertEqual(my_layer_scoped._scope.name, 'var_scope/my_layer')
my_layer_scoped1 = PrivateLayer(name='my_layer')
my_layer_scoped1.apply(inputs)
self.assertEqual(my_layer_scoped1._scope.name, 'var_scope/my_layer_1')
@test_util.run_in_graph_and_eager_modes
def testInputSpecNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected ndim=2'):
layer.apply(constant_op.constant([1]))
# Note that we re-create the layer since in Eager mode, input spec checks
# only happen on first call.
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecMinNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(min_ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected min_ndim=2'):
layer.apply(constant_op.constant([1]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[[1], [2]]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecMaxNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(max_ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected max_ndim=2'):
layer.apply(constant_op.constant([[[1], [2]]]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([1]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecDtypeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(dtype='float32')
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected dtype=float32'):
layer.apply(constant_op.constant(1, dtype=dtypes.int32))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant(1.0, dtype=dtypes.float32))
@test_util.run_in_graph_and_eager_modes
def testInputSpecAxesCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(axes={-1: 2})
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected axis'):
layer.apply(constant_op.constant([1, 2, 3]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([1, 2]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecShapeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(shape=(None, 3))
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected shape'):
layer.apply(constant_op.constant([[1, 2]]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2, 3], [4, 5, 6]]))
@test_util.run_in_graph_and_eager_modes
def testNoInputSpec(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = None
def call(self, inputs):
return inputs
layer = CustomerLayer()
layer.apply(constant_op.constant(1))
# Works
if not context.executing_eagerly():
layer.apply(array_ops.placeholder('int32'))
layer.apply(array_ops.placeholder('int32', shape=(2, 3)))
@test_util.run_in_graph_and_eager_modes
def test_count_params(self):
dense = core_layers.Dense(16)
dense.build((None, 4))
self.assertEqual(dense.count_params(), 16 * 4 + 16)
dense = core_layers.Dense(16)
with self.assertRaises(ValueError):
dense.count_params()
@test_util.run_in_graph_and_eager_modes
def testDictInputOutput(self):
class DictLayer(base_layers.Layer):
def call(self, inputs):
return {'l' + key: inputs[key] for key in inputs}
layer = DictLayer()
if context.executing_eagerly():
i1 = constant_op.constant(3)
i2 = constant_op.constant(4.0)
result = layer.apply({'abel': i1, 'ogits': i2})
self.assertTrue(isinstance(result, dict))
self.assertEqual(set(['label', 'logits']), set(result.keys()))
self.assertEqual(3, result['label'].numpy())
self.assertEqual(4.0, result['logits'].numpy())
else:
i1 = array_ops.placeholder('int32')
i2 = array_ops.placeholder('float32')
result = layer.apply({'abel': i1, 'ogits': i2})
self.assertTrue(isinstance(result, dict))
self.assertEqual(set(['label', 'logits']), set(result.keys()))
@test_util.run_deprecated_v1
def testActivityRegularizer(self):
regularizer = math_ops.reduce_sum
layer = base_layers.Layer(activity_regularizer=regularizer)
x = array_ops.placeholder('int32')
layer.apply(x)
self.assertEqual(len(layer.get_losses_for(x)), 1)
def testNameScopeIsConsistentWithVariableScope(self):
# Github issue 13429.
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.my_var = self.add_variable('my_var', (), dtypes.float32)
self.built = True
def call(self, inputs):
return math_ops.multiply(inputs, self.my_var, name='my_op')
def _gen_layer(x, name=None):
layer = MyLayer(name=name)
out = layer.apply(x)
return layer, out
# unnamed layer
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x)
layer1, op1 = _gen_layer(op)
layer2, op2 = _gen_layer(op1)
self.assertEqual(layer.my_var.name, 'my_layer/my_var:0')
self.assertEqual(op.name, 'my_layer/my_op:0')
self.assertEqual(layer1.my_var.name, 'my_layer_1/my_var:0')
self.assertEqual(op1.name, 'my_layer_1/my_op:0')
self.assertEqual(layer2.my_var.name, 'my_layer_2/my_var:0')
self.assertEqual(op2.name, 'my_layer_2/my_op:0')
# name starts from zero
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x, name='name')
layer1, op1 = _gen_layer(op, name='name_1')
layer2, op2 = _gen_layer(op1, name='name_2')
self.assertEqual(layer.my_var.name, 'name/my_var:0')
self.assertEqual(op.name, 'name/my_op:0')
self.assertEqual(layer1.my_var.name, 'name_1/my_var:0')
self.assertEqual(op1.name, 'name_1/my_op:0')
self.assertEqual(layer2.my_var.name, 'name_2/my_var:0')
self.assertEqual(op2.name, 'name_2/my_op:0')
# name starts from one
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x, name='name_1')
layer1, op1 = _gen_layer(op, name='name_2')
layer2, op2 = _gen_layer(op1, name='name_3')
self.assertEqual(layer.my_var.name, 'name_1/my_var:0')
self.assertEqual(op.name, 'name_1/my_op:0')
self.assertEqual(layer1.my_var.name, 'name_2/my_var:0')
self.assertEqual(op1.name, 'name_2/my_op:0')
self.assertEqual(layer2.my_var.name, 'name_3/my_var:0')
self.assertEqual(op2.name, 'name_3/my_op:0')
def testVariablesAreLiftedFromFunctionBuildingGraphs(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.my_var = self.add_variable('my_var', (), dtypes.float32)
self.built = True
def call(self, inputs):
return inputs
outer_graph = ops.get_default_graph()
function_building_graph = ops.Graph()
function_building_graph._building_function = True
with outer_graph.as_default():
with function_building_graph.as_default():
layer = MyLayer()
# Create a variable by invoking build through __call__ and assert that
# it is both tracked and lifted into the outer graph.
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
layer.apply(inputs)
self.assertEqual(len(layer.variables), 1)
self.assertEqual(len(layer.trainable_variables), 1)
self.assertEqual(layer.variables[0].graph, outer_graph)
@test_util.run_deprecated_v1
def testGetUpdateFor(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(),
dtypes.float32,
trainable=False)
self.b = self.add_variable('b',
(),
dtypes.float32,
trainable=False)
self.add_update(state_ops.assign_add(self.a, 1., name='b_update'))
self.built = True
def call(self, inputs):
self.add_update(state_ops.assign_add(self.a, inputs, name='a_update'),
inputs=True)
return inputs + 1
layer = MyLayer()
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(None)), 1)
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
# Call same layer on new input, creating one more conditional update
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.updates), 3)
self.assertEqual(len(layer.get_updates_for(None)), 1)
# Check that we are successfully filtering out irrelevant updates
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
@test_util.run_deprecated_v1
def testGetLossesFor(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(),
dtypes.float32,
trainable=False)
self.b = self.add_variable('b',
(),
dtypes.float32,
trainable=False)
self.add_loss(self.a)
self.built = True
def call(self, inputs):
self.add_loss(inputs, inputs=True)
return inputs + 1
layer = MyLayer()
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.get_losses_for(None)), 1)
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
# Call same layer on new input, creating one more conditional loss
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.get_losses_for(None)), 1)
# Check that we are successfully filtering out irrelevant losses
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/layers/base_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains the convolutional layer classes and their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import layers as keras_layers
from tensorflow.python.layers import base
from tensorflow.python.ops import init_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['layers.Conv1D'])
class Conv1D(keras_layers.Conv1D, base.Layer):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the
length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: An integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv1D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs)
@deprecation.deprecated(
date=None,
instructions='Use `tf.keras.layers.Conv1D` instead.')
@tf_export(v1=['layers.conv1d'])
def conv1d(inputs,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for 1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the
length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: An integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Conv1D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@tf_export(v1=['layers.Conv2D'])
class Conv2D(keras_layers.Conv2D, base.Layer):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs)
@deprecation.deprecated(
date=None,
instructions='Use `tf.keras.layers.Conv2D` instead.')
@tf_export(v1=['layers.conv2d'])
def conv2d(inputs,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the 2D convolution layer.
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@tf_export(v1=['layers.Conv3D'])
class Conv3D(keras_layers.Conv3D, base.Layer):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth,
height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
dilation_rate: An integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv3D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs)
@deprecation.deprecated(
date=None,
instructions='Use `tf.keras.layers.Conv3D` instead.')
@tf_export(v1=['layers.conv3d'])
def conv3d(inputs,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the 3D convolution layer.
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth,
height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
dilation_rate: An integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Conv3D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@tf_export(v1=['layers.SeparableConv1D'])
class SeparableConv1D(keras_layers.SeparableConv1D, base.Layer):
"""Depthwise separable 1D convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial
dimensions of the filters.
strides: A single integer specifying the strides
of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: A single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer=None,
pointwise_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SeparableConv1D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
@tf_export(v1=['layers.SeparableConv2D'])
class SeparableConv2D(keras_layers.SeparableConv2D, base.Layer):
"""Depthwise separable 2D convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer=None,
pointwise_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SeparableConv2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
@deprecation.deprecated(
date=None,
instructions='Use `tf.keras.layers.SeparableConv1D` instead.')
@tf_export(v1=['layers.separable_conv1d'])
def separable_conv1d(inputs,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer=None,
pointwise_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the depthwise separable 1D convolution layer.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial
dimensions of the filters.
strides: A single integer specifying the strides
of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: A single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = SeparableConv1D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@deprecation.deprecated(
date=None,
instructions='Use `tf.keras.layers.SeparableConv2D` instead.')
@tf_export(v1=['layers.separable_conv2d'])
def separable_conv2d(inputs,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer=None,
pointwise_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the depthwise separable 2D convolution layer.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = SeparableConv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@tf_export(v1=['layers.Conv2DTranspose'])
class Conv2DTranspose(keras_layers.Conv2DTranspose, base.Layer):
"""Transposed 2D convolution layer (sometimes called 2D Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
@deprecation.deprecated(
date=None,
instructions='Use `tf.keras.layers.Conv2DTranspose` instead.')
@tf_export(v1=['layers.conv2d_transpose'])
def conv2d_transpose(inputs,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for transposed 2D convolution layer.
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
Arguments:
inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
activation: Activation function. Set it to `None` to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If `None`, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Conv2DTranspose(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@tf_export(v1=['layers.Conv3DTranspose'])
class Conv3DTranspose(keras_layers.Conv3DTranspose, base.Layer):
"""Transposed 3D convolution layer (sometimes called 3D Deconvolution).
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for all spatial
dimensions.
strides: An integer or tuple/list of 3 integers, specifying the strides
of the convolution along the depth, height and width.
Can be a single integer to specify the same value for all spatial
dimensions.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
activation: Activation function. Set it to `None` to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If `None`, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv3DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
@deprecation.deprecated(
date=None,
instructions='Use `tf.keras.layers.Conv3DTranspose` instead.')
@tf_export(v1=['layers.conv3d_transpose'])
def conv3d_transpose(inputs,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for transposed 3D convolution layer.
Arguments:
inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 3 positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 3 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Conv3DTranspose(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
# Aliases
Convolution1D = Conv1D
Convolution2D = Conv2D
Convolution3D = Conv3D
SeparableConvolution2D = SeparableConv2D
Convolution2DTranspose = Deconvolution2D = Deconv2D = Conv2DTranspose
Convolution3DTranspose = Deconvolution3D = Deconv3D = Conv3DTranspose
convolution1d = conv1d
convolution2d = conv2d
convolution3d = conv3d
separable_convolution2d = separable_conv2d
convolution2d_transpose = deconvolution2d = deconv2d = conv2d_transpose
convolution3d_transpose = deconvolution3d = deconv3d = conv3d_transpose
|
tensorflow-master
|
tensorflow/python/layers/convolutional.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ConvUtilsTest(test.TestCase):
def testConvertDataFormat(self):
self.assertEqual('NCDHW', utils.convert_data_format('channels_first', 5))
self.assertEqual('NCHW', utils.convert_data_format('channels_first', 4))
self.assertEqual('NCW', utils.convert_data_format('channels_first', 3))
self.assertEqual('NHWC', utils.convert_data_format('channels_last', 4))
self.assertEqual('NWC', utils.convert_data_format('channels_last', 3))
self.assertEqual('NDHWC', utils.convert_data_format('channels_last', 5))
with self.assertRaises(ValueError):
utils.convert_data_format('invalid', 2)
def testNormalizeTuple(self):
self.assertEqual((2, 2, 2), utils.normalize_tuple(2, n=3, name='strides'))
self.assertEqual(
(2, 1, 2), utils.normalize_tuple((2, 1, 2), n=3, name='strides'))
with self.assertRaises(ValueError):
utils.normalize_tuple((2, 1), n=3, name='strides')
with self.assertRaises(ValueError):
utils.normalize_tuple(None, n=3, name='strides')
def testNormalizeDataFormat(self):
self.assertEqual(
'channels_last', utils.normalize_data_format('Channels_Last'))
self.assertEqual(
'channels_first', utils.normalize_data_format('CHANNELS_FIRST'))
with self.assertRaises(ValueError):
utils.normalize_data_format('invalid')
def testNormalizePadding(self):
self.assertEqual('same', utils.normalize_padding('SAME'))
self.assertEqual('valid', utils.normalize_padding('VALID'))
with self.assertRaises(ValueError):
utils.normalize_padding('invalid')
def testConvOutputLength(self):
self.assertEqual(4, utils.conv_output_length(4, 2, 'same', 1, 1))
self.assertEqual(2, utils.conv_output_length(4, 2, 'same', 2, 1))
self.assertEqual(3, utils.conv_output_length(4, 2, 'valid', 1, 1))
self.assertEqual(2, utils.conv_output_length(4, 2, 'valid', 2, 1))
self.assertEqual(5, utils.conv_output_length(4, 2, 'full', 1, 1))
self.assertEqual(3, utils.conv_output_length(4, 2, 'full', 2, 1))
self.assertEqual(2, utils.conv_output_length(5, 2, 'valid', 2, 2))
def testConvInputLength(self):
self.assertEqual(3, utils.conv_input_length(4, 2, 'same', 1))
self.assertEqual(2, utils.conv_input_length(2, 2, 'same', 2))
self.assertEqual(4, utils.conv_input_length(3, 2, 'valid', 1))
self.assertEqual(4, utils.conv_input_length(2, 2, 'valid', 2))
self.assertEqual(3, utils.conv_input_length(4, 2, 'full', 1))
self.assertEqual(4, utils.conv_input_length(3, 2, 'full', 2))
def testDeconvOutputLength(self):
self.assertEqual(4, utils.deconv_output_length(4, 2, 'same', 1))
self.assertEqual(8, utils.deconv_output_length(4, 2, 'same', 2))
self.assertEqual(5, utils.deconv_output_length(4, 2, 'valid', 1))
self.assertEqual(8, utils.deconv_output_length(4, 2, 'valid', 2))
self.assertEqual(3, utils.deconv_output_length(4, 2, 'full', 1))
self.assertEqual(6, utils.deconv_output_length(4, 2, 'full', 2))
class ConstantValueTest(test.TestCase):
@test_util.run_deprecated_v1
def testConstantValue(self):
f1 = lambda: constant_op.constant(5)
f2 = lambda: constant_op.constant(32)
# Boolean pred
self.assertEqual(5, utils.constant_value(utils.smart_cond(True, f1, f2)))
self.assertEqual(32, utils.constant_value(utils.smart_cond(False, f1, f2)))
# Integer pred
self.assertEqual(5, utils.constant_value(utils.smart_cond(1, f1, f2)))
self.assertEqual(32, utils.constant_value(utils.smart_cond(0, f1, f2)))
# Unknown pred
pred = array_ops.placeholder_with_default(True, shape=())
self.assertIsNone(utils.constant_value(utils.smart_cond(pred, f1, f2)))
#Error case
with self.assertRaises(TypeError):
utils.constant_value(5)
class GetReachableFromInputsTest(test.TestCase):
@test_util.run_deprecated_v1
def testGetReachableFromInputs(self):
pl_1 = array_ops.placeholder(shape=None, dtype='float32')
pl_2 = array_ops.placeholder(shape=None, dtype='float32')
pl_3 = array_ops.placeholder(shape=None, dtype='float32')
x_1 = pl_1 + pl_2
x_2 = pl_2 * 2
x_3 = pl_3 + 1
x_4 = x_1 + x_2
x_5 = x_3 * pl_1
self.assertEqual({pl_1, x_1, x_4, x_5},
utils.get_reachable_from_inputs([pl_1]))
self.assertEqual({pl_1, pl_2, x_1, x_2, x_4, x_5},
utils.get_reachable_from_inputs([pl_1, pl_2]))
self.assertEqual({pl_3, x_3, x_5}, utils.get_reachable_from_inputs([pl_3]))
self.assertEqual({x_3, x_5}, utils.get_reachable_from_inputs([x_3]))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/layers/utils_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import convolutional as conv_layers
from tensorflow.python.layers import normalization as normalization_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
@test_util.run_v1_only('b/120545219')
class BNTest(test.TestCase):
def _simple_model(self, image, fused, freeze_mode):
output_channels, kernel_size = 2, 3
conv = conv_layers.conv2d(
image,
output_channels,
kernel_size,
use_bias=False,
kernel_initializer=init_ops.ones_initializer())
bn_layer = normalization_layers.BatchNormalization(fused=fused)
bn_layer._bessels_correction_test_only = False
training = not freeze_mode
bn = bn_layer.apply(conv, training=training)
loss = math_ops.reduce_sum(math_ops.abs(bn))
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
if not freeze_mode:
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
with ops.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
else:
train_op = optimizer.minimize(loss)
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
return loss, train_op, saver
def _train(self,
checkpoint_path,
shape,
use_gpu,
is_fused,
restore=False,
freeze_mode=False,
dtype=dtypes.float32):
ops.reset_default_graph()
graph = ops.get_default_graph()
with self.session(graph=graph, use_gpu=use_gpu) as sess:
image = array_ops.placeholder(dtype=dtype, shape=shape)
loss, train_op, saver = self._simple_model(image, is_fused, freeze_mode)
if restore:
saver.restore(sess, checkpoint_path)
else:
self.evaluate(variables.global_variables_initializer())
np.random.seed(0)
for _ in range(2):
image_val = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
sess.run([loss, train_op], feed_dict={image: image_val})
if restore:
all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
all_vars_values = [var.eval() for var in all_vars]
return all_vars_values
else:
saver.save(sess, checkpoint_path)
def _infer(self, checkpoint_path, image_val, shape, use_gpu, is_fused):
dtype = image_val.dtype
ops.reset_default_graph()
graph = ops.get_default_graph()
with self.session(graph=graph, use_gpu=use_gpu) as sess:
image = array_ops.placeholder(dtype=dtype, shape=shape)
loss, _, saver = self._simple_model(image, is_fused, True)
saver.restore(sess, checkpoint_path)
loss_val = sess.run(loss, feed_dict={image: image_val})
return loss_val
def _trainEvalSequence(self, dtype, train1_use_gpu, train2_use_gpu,
infer_use_gpu):
batch, height, width, input_channels = 2, 4, 5, 3
shape = [batch, height, width, input_channels]
# Not all characters in a dtype string representation are allowed in
# filenames in all operating systems. This map will sanitize these.
dtype_to_valid_fn = {
dtypes.float16: 'float16',
dtypes.float32: 'float32',
}
checkpoint = os.path.join(
self.get_temp_dir(), 'cp_%s_%s_%s_%s' % (
dtype_to_valid_fn[dtype], train1_use_gpu, train2_use_gpu,
infer_use_gpu))
self._train(
checkpoint,
shape,
use_gpu=train1_use_gpu,
is_fused=True,
restore=False,
freeze_mode=False,
dtype=dtype)
train_vars = self._train(
checkpoint,
shape,
use_gpu=train2_use_gpu,
is_fused=True,
restore=True,
freeze_mode=False,
dtype=dtype)
np.random.seed(0)
image_val = np.random.rand(batch, height, width, input_channels).astype(
dtype.as_numpy_dtype)
loss_val = self._infer(
checkpoint, image_val, shape, use_gpu=infer_use_gpu, is_fused=True)
return train_vars, loss_val
def testHalfPrecision(self):
ref_vars, ref_loss = self._trainEvalSequence(
dtype=dtypes.float32,
train1_use_gpu=True,
train2_use_gpu=True,
infer_use_gpu=True)
self.assertEqual(len(ref_vars), 5)
for train1_use_gpu in [True, False]:
for train2_use_gpu in [True, False]:
for infer_use_gpu in [True, False]:
test_vars, test_loss = self._trainEvalSequence(
dtypes.float16, train1_use_gpu, train2_use_gpu, infer_use_gpu)
self.assertEqual(len(test_vars), 5)
for test_var, ref_var in zip(test_vars, ref_vars):
self.assertAllClose(test_var, ref_var, rtol=1.e-3, atol=1.e-3)
self.assertAllClose(test_loss, ref_loss, rtol=1.e-3, atol=1.e-3)
def _testCheckpoint(self, is_fused_checkpoint_a, is_fused_checkpoint_b,
use_gpu_checkpoint_a, use_gpu_checkpoint_b,
use_gpu_test_a, use_gpu_test_b, freeze_mode):
batch, height, width, input_channels = 2, 4, 5, 3
shape = [batch, height, width, input_channels]
base_path = '%s_%s_%s_%s_%s_%s' % (is_fused_checkpoint_a,
is_fused_checkpoint_b,
use_gpu_checkpoint_a,
use_gpu_checkpoint_b, use_gpu_test_a,
use_gpu_test_b)
checkpoint_path_a = os.path.join(self.get_temp_dir(),
'checkpoint_a_%s' % base_path)
self._train(
checkpoint_path_a,
shape,
use_gpu_checkpoint_a,
is_fused_checkpoint_a,
restore=False,
freeze_mode=freeze_mode)
checkpoint_path_b = os.path.join(self.get_temp_dir(),
'checkpoint_b_%s' % base_path)
self._train(
checkpoint_path_b,
shape,
use_gpu_checkpoint_b,
is_fused_checkpoint_b,
restore=False,
freeze_mode=freeze_mode)
vars_fused = self._train(
checkpoint_path_a,
shape,
use_gpu_test_a,
True,
restore=True,
freeze_mode=freeze_mode)
vars_nonfused = self._train(
checkpoint_path_b,
shape,
use_gpu_test_b,
False,
restore=True,
freeze_mode=freeze_mode)
self.assertEqual(len(vars_fused), 5)
self.assertEqual(len(vars_nonfused), 5)
for var_fused, var_nonfused in zip(vars_fused, vars_nonfused):
self.assertAllClose(var_fused, var_nonfused, atol=1e-5)
image_val = np.random.rand(batch, height, width,
input_channels).astype(np.float32)
loss_fused_val = self._infer(checkpoint_path_a, image_val, shape,
use_gpu_test_a, True)
loss_nonfused_val = self._infer(checkpoint_path_b, image_val, shape,
use_gpu_test_b, False)
self.assertAllClose(loss_fused_val, loss_nonfused_val, atol=1e-6, rtol=3e-4)
def _testCheckpointCrossDevice(self, ckpt_a_fused, ckpt_a_use_gpu,
ckpt_b_fused, ckpt_b_use_gpu):
for use_gpu_test_a in [True, False]:
for use_gpu_test_b in [True, False]:
for freeze_mode in [True, False]:
self._testCheckpoint(ckpt_a_fused, ckpt_a_use_gpu, ckpt_b_fused,
ckpt_b_use_gpu, use_gpu_test_a, use_gpu_test_b,
freeze_mode)
def testCheckpointFusedCPUAndFusedGPU(self):
self._testCheckpointCrossDevice(True, False, True, True)
def testCheckpointFusedCPUAndFusedCPU(self):
self._testCheckpointCrossDevice(True, False, True, False)
def testCheckpointFusedGPUAndFusedGPU(self):
self._testCheckpointCrossDevice(True, True, True, True)
def testCheckpointNonFusedCPUAndNonFusedGPU(self):
self._testCheckpointCrossDevice(False, False, False, True)
def testCheckpointNonFusedCPUAndNonFusedCPU(self):
self._testCheckpointCrossDevice(False, False, False, False)
def testCheckpointNonFusedGPUAndNonFusedGPU(self):
self._testCheckpointCrossDevice(False, True, False, True)
def testCheckpointNonFusedGPUAndFusedGPU(self):
self._testCheckpointCrossDevice(False, True, True, True)
def testCheckpointNonFusedGPUAndFusedCPU(self):
self._testCheckpointCrossDevice(False, True, True, False)
def testCheckpointNonFusedCPUAndFusedCPU(self):
self._testCheckpointCrossDevice(False, False, True, False)
def testCreateBN(self):
# Call layer.
bn = normalization_layers.BatchNormalization(axis=1)
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
# Verify shape.
self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3])
# Verify layer attributes.
self.assertEqual(len(bn.updates), 2)
self.assertEqual(len(bn.variables), 4)
self.assertEqual(len(bn.trainable_variables), 2)
self.assertEqual(len(bn.non_trainable_variables), 2)
# Test that updates were created and added to UPDATE_OPS.
self.assertEqual(len(bn.updates), 2)
self.assertListEqual(
ops.get_collection(ops.GraphKeys.UPDATE_OPS), bn.updates)
# Test that weights were created and added to TRAINABLE_VARIABLES.
self.assertListEqual(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES),
bn.trainable_variables)
def testCreateFusedBNFloat16(self):
# Call layer.
bn = normalization_layers.BatchNormalization(axis=1, fused=True)
inputs = random_ops.random_uniform(
(5, 4, 3, 3), seed=1, dtype=dtypes.float16)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
# Verify shape.
self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3, 3])
# Verify layer attributes.
self.assertEqual(len(bn.updates), 2)
self.assertEqual(len(bn.variables), 4)
self.assertEqual(len(bn.trainable_variables), 2)
self.assertEqual(len(bn.non_trainable_variables), 2)
for var in bn.variables:
self.assertEqual(var.dtype, dtypes.float32_ref)
# Test that updates were created and added to UPDATE_OPS.
self.assertEqual(len(bn.updates), 2)
self.assertListEqual(
ops.get_collection(ops.GraphKeys.UPDATE_OPS), bn.updates)
# Test that weights were created and added to TRAINABLE_VARIABLES.
self.assertListEqual(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES),
bn.trainable_variables)
def test3DInputAxis1(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=1, epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 4, 3)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.cached_session() as sess:
# Test training with placeholder learning phase.
self.evaluate(variables.global_variables_initializer())
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 4, 1))
np_beta = np.reshape(np_beta, (1, 4, 1))
for _ in range(100):
np_output, _, _ = sess.run([outputs] + bn.updates,
feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = self.evaluate(
[bn.moving_mean, bn.moving_variance])
np_inputs = self.evaluate(inputs)
mean = np.mean(np_inputs, axis=(0, 2))
std = np.std(np_inputs, axis=(0, 2))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def test3DInputAxis2(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=2, epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 4, 3)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.cached_session() as sess:
# Test training with placeholder learning phase.
self.evaluate(variables.global_variables_initializer())
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 1, 3))
np_beta = np.reshape(np_beta, (1, 1, 3))
for _ in range(100):
np_output, _, _ = sess.run([outputs] + bn.updates,
feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = self.evaluate(
[bn.moving_mean, bn.moving_variance])
np_inputs = self.evaluate(inputs)
mean = np.mean(np_inputs, axis=(0, 1))
std = np.std(np_inputs, axis=(0, 1))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def test4DInputAxis1(self):
if test.is_gpu_available(cuda_only=True):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=1, epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.session(use_gpu=True) as sess:
# Test training with placeholder learning phase.
self.evaluate(variables.global_variables_initializer())
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 4, 1, 1))
np_beta = np.reshape(np_beta, (1, 4, 1, 1))
for _ in range(100):
np_output, _, _ = sess.run(
[outputs] + bn.updates, feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = self.evaluate(
[bn.moving_mean, bn.moving_variance])
np_inputs = self.evaluate(inputs)
mean = np.mean(np_inputs, axis=(0, 2, 3))
std = np.std(np_inputs, axis=(0, 2, 3))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def test4DInputAxis2(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=2, epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.cached_session() as sess:
# Test training with placeholder learning phase.
self.evaluate(variables.global_variables_initializer())
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 1, 3, 1))
np_beta = np.reshape(np_beta, (1, 1, 3, 1))
for _ in range(100):
np_output, _, _ = sess.run([outputs] + bn.updates,
feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = self.evaluate(
[bn.moving_mean, bn.moving_variance])
np_inputs = self.evaluate(inputs)
mean = np.mean(np_inputs, axis=(0, 1, 3))
std = np.std(np_inputs, axis=(0, 1, 3))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def test4DInputAxis3(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=3, epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.cached_session() as sess:
# Test training with placeholder learning phase.
self.evaluate(variables.global_variables_initializer())
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 1, 1, 6))
np_beta = np.reshape(np_beta, (1, 1, 1, 6))
for _ in range(100):
np_output, _, _ = sess.run([outputs] + bn.updates,
feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = self.evaluate(
[bn.moving_mean, bn.moving_variance])
np_inputs = self.evaluate(inputs)
mean = np.mean(np_inputs, axis=(0, 1, 2))
std = np.std(np_inputs, axis=(0, 1, 2))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def test4DInputAxis3Fused(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=3, epsilon=epsilon, momentum=0.9, fused=True)
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.cached_session() as sess:
# Test training with placeholder learning phase.
self.evaluate(variables.global_variables_initializer())
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 1, 1, 6))
np_beta = np.reshape(np_beta, (1, 1, 1, 6))
for _ in range(100):
np_output, _, _ = sess.run(
[outputs] + bn.updates, feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = self.evaluate(
[bn.moving_mean, bn.moving_variance])
np_inputs = self.evaluate(inputs)
mean = np.mean(np_inputs, axis=(0, 1, 2))
std = np.std(np_inputs, axis=(0, 1, 2))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def test4DInputAxis1Fused(self):
if test.is_gpu_available(cuda_only=True):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=1, epsilon=epsilon, momentum=0.9, fused=True)
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.cached_session() as sess:
# Test training with placeholder learning phase.
self.evaluate(variables.global_variables_initializer())
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 4, 1, 1))
np_beta = np.reshape(np_beta, (1, 4, 1, 1))
for _ in range(100):
np_output, _, _ = sess.run(
[outputs] + bn.updates, feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = self.evaluate(
[bn.moving_mean, bn.moving_variance])
np_inputs = self.evaluate(inputs)
mean = np.mean(np_inputs, axis=(0, 2, 3))
std = np.std(np_inputs, axis=(0, 2, 3))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def testNegativeAxis(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=-1, epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.cached_session() as sess:
# Test training with placeholder learning phase.
self.evaluate(variables.global_variables_initializer())
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 1, 1, 6))
np_beta = np.reshape(np_beta, (1, 1, 1, 6))
for _ in range(100):
np_output, _, _ = sess.run([outputs] + bn.updates,
feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = self.evaluate(
[bn.moving_mean, bn.moving_variance])
np_inputs = self.evaluate(inputs)
mean = np.mean(np_inputs, axis=(0, 1, 2))
std = np.std(np_inputs, axis=(0, 1, 2))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def testBooleanLearningPhase(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=-1, epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32)
outputs_training = bn.apply(inputs, training=True)
outputs_infer = bn.apply(inputs, training=False)
with self.cached_session() as sess:
# Test training with placeholder learning phase.
self.evaluate(variables.global_variables_initializer())
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
np_gamma = np.reshape(np_gamma, (1, 1, 1, 6))
np_beta = np.reshape(np_beta, (1, 1, 1, 6))
for _ in range(100):
np_output, _, _ = sess.run([outputs_training] + bn.updates)
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=2)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = self.evaluate(
[bn.moving_mean, bn.moving_variance])
np_inputs = self.evaluate(inputs)
mean = np.mean(np_inputs, axis=(0, 1, 2))
std = np.std(np_inputs, axis=(0, 1, 2))
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = self.evaluate(outputs_infer)
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def testFunctionalNoReuse(self):
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)), dtype=dtypes.float32)
epsilon = 1e-3
training = array_ops.placeholder(dtype='bool')
outputs = normalization_layers.batch_norm(
inputs,
axis=-1,
momentum=0.9,
epsilon=epsilon,
training=training,
name='bn')
updates = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
all_vars = dict([(v.name, v) for v in variables.global_variables()])
moving_mean = all_vars['bn/moving_mean:0']
moving_variance = all_vars['bn/moving_variance:0']
beta = all_vars['bn/beta:0']
gamma = all_vars['bn/gamma:0']
with self.cached_session() as sess:
# Test training with placeholder learning phase.
self.evaluate(variables.global_variables_initializer())
np_gamma, np_beta = self.evaluate([gamma, beta])
np_gamma = np.reshape(np_gamma, (1, 1, 1, 6))
np_beta = np.reshape(np_beta, (1, 1, 1, 6))
for _ in range(100):
np_output, _, _ = sess.run([outputs] + updates,
feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
np_moving_mean, np_moving_var = self.evaluate(
[moving_mean, moving_variance])
np_inputs = self.evaluate(inputs)
np_mean = np.mean(np_inputs, axis=(0, 1, 2))
np_std = np.std(np_inputs, axis=(0, 1, 2))
np_variance = np.square(np_std)
self.assertAllClose(np_mean, np_moving_mean, atol=1e-2)
self.assertAllClose(np_variance, np_moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def testFunctionalReuse(self):
inputs1 = variables.Variable(
np.random.random((5, 4, 3, 6)), dtype=dtypes.float32)
inputs2 = variables.Variable(
np.random.random((5, 4, 3, 6)), dtype=dtypes.float32)
epsilon = 1e-3
training = array_ops.placeholder(dtype='bool')
_ = normalization_layers.batch_norm(
inputs1,
axis=-1,
momentum=0.9,
epsilon=epsilon,
training=training,
name='bn')
outputs2 = normalization_layers.batch_norm(
inputs2,
axis=-1,
momentum=0.9,
epsilon=epsilon,
training=training,
name='bn',
reuse=True)
# Last 2 update ops
updates = ops.get_collection(ops.GraphKeys.UPDATE_OPS)[-2:]
all_vars = dict([(v.name, v) for v in variables.global_variables()])
moving_mean = all_vars['bn/moving_mean:0']
moving_variance = all_vars['bn/moving_variance:0']
beta = all_vars['bn/beta:0']
gamma = all_vars['bn/gamma:0']
with self.cached_session() as sess:
# Test training with placeholder learning phase.
self.evaluate(variables.global_variables_initializer())
for _ in range(100):
np_output, _, _ = sess.run([outputs2] + updates,
feed_dict={training: True})
# Verify that the statistics are updated during training.
np_moving_mean, np_moving_var = self.evaluate(
[moving_mean, moving_variance])
np_inputs = self.evaluate(inputs2)
np_mean = np.mean(np_inputs, axis=(0, 1, 2))
np_std = np.std(np_inputs, axis=(0, 1, 2))
np_variance = np.square(np_std)
self.assertAllClose(np_mean, np_moving_mean, atol=1e-2)
self.assertAllClose(np_variance, np_moving_var, atol=1e-2)
# Verify that the axis is normalized during training.
np_gamma, np_beta = self.evaluate([gamma, beta])
np_gamma = np.reshape(np_gamma, (1, 1, 1, 6))
np_beta = np.reshape(np_beta, (1, 1, 1, 6))
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=2)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs2, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=2)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def testFunctionalReuseFromScope(self):
inputs = variables.Variable(
np.random.random((5, 4, 3, 6)), dtype=dtypes.float32)
epsilon = 1e-3
training = array_ops.placeholder(dtype='bool')
with variable_scope.variable_scope('scope'):
_ = normalization_layers.batch_norm(
inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training)
self.assertEqual(len(variables.global_variables()), 5)
with variable_scope.variable_scope('scope', reuse=True):
_ = normalization_layers.batch_norm(
inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training)
self.assertEqual(len(variables.global_variables()), 5)
def testNoCenter(self):
bn = normalization_layers.BatchNormalization(axis=1, center=False)
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
# Verify shape.
self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3])
# Verify layer attributes.
self.assertEqual(len(bn.updates), 2)
self.assertEqual(len(bn.variables), 3)
self.assertEqual(len(bn.trainable_variables), 1)
self.assertEqual(len(bn.non_trainable_variables), 2)
def testNoScale(self):
bn = normalization_layers.BatchNormalization(axis=1, scale=False)
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
# Verify shape.
self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3])
# Verify layer attributes.
self.assertEqual(len(bn.updates), 2)
self.assertEqual(len(bn.variables), 3)
self.assertEqual(len(bn.trainable_variables), 1)
self.assertEqual(len(bn.non_trainable_variables), 2)
def testRegularizers(self):
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
bn = normalization_layers.BatchNormalization(axis=1, beta_regularizer=reg)
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
training = array_ops.placeholder(dtype='bool')
_ = bn.apply(inputs, training=training)
self.assertEqual(len(bn.losses), 1)
bn = normalization_layers.BatchNormalization(axis=1, gamma_regularizer=reg)
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
training = array_ops.placeholder(dtype='bool')
_ = bn.apply(inputs, training=training)
self.assertEqual(len(bn.losses), 1)
def testConstraints(self):
g_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
bn = normalization_layers.BatchNormalization(axis=1,
gamma_constraint=g_constraint,
beta_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
bn(inputs)
self.assertEqual(bn.gamma_constraint, g_constraint)
self.assertEqual(bn.beta_constraint, b_constraint)
def testRenorm(self):
shape = (4, 3)
xt = array_ops.placeholder(dtypes.float32, shape)
momentum = 0.99
renorm_momentum = 0.8
rmax = 1.1
rmin = 0.9
dmax = 0.1
gamma = 2.
beta = 3.
epsilon = 0.001
bn = normalization_layers.BatchNormalization(
axis=1,
gamma_initializer=init_ops.constant_initializer(gamma),
beta_initializer=init_ops.constant_initializer(beta),
epsilon=epsilon,
momentum=momentum,
renorm=True,
renorm_clipping={'rmax': rmax, 'rmin': rmin, 'dmax': dmax},
renorm_momentum=renorm_momentum)
training = array_ops.placeholder(dtypes.bool)
yt = bn.apply(xt, training=training)
moving_mean = 0.
moving_variance = 1.
renorm_mean = 0.
renorm_stddev = 1.
renorm_weight = 0.
with self.session(use_gpu=True) as sess:
self.evaluate(variables.global_variables_initializer())
for _ in range(5):
x = np.random.random(shape)
mean = x.mean(0)
variance = x.var(0)
stddev = np.sqrt(variance + epsilon)
r = (stddev / renorm_stddev).clip(rmin, rmax)
d = ((mean - renorm_mean) / renorm_stddev).clip(-dmax, dmax)
y_train = ((x - mean) / stddev * r + d) * gamma + beta
renorm_mean += (mean - renorm_mean) * (1. - renorm_momentum)
renorm_stddev += (stddev - renorm_stddev) * (1. - renorm_momentum)
moving_mean += (mean - moving_mean) * (1. - momentum)
moving_variance += (variance - moving_variance) * (1. - momentum)
y_test = ((x - moving_mean) / (moving_variance + epsilon) ** 0.5 *
gamma) + beta
yt_val_train, _, _ = sess.run([yt] + bn.updates,
feed_dict={xt: x, training: True})
yt_val_test, _, _ = sess.run([yt] + bn.updates,
feed_dict={xt: x, training: False})
self.assertAllClose(y_train, yt_val_train, atol=1e-5)
self.assertAllClose(y_test, yt_val_test, atol=1e-5)
def testAdjustment(self):
shape = (4, 3)
xt = array_ops.placeholder(dtypes.float32, shape)
momentum = 0.99
gamma = 2.
beta = 3.
epsilon = 0.001
adjust_scale = random_ops.random_uniform(shape[-1:], 0.5, 1.5)
adjust_bias = random_ops.random_uniform(shape[-1:], -.2, .2)
bn = normalization_layers.BatchNormalization(
axis=1,
gamma_initializer=init_ops.constant_initializer(gamma),
beta_initializer=init_ops.constant_initializer(beta),
epsilon=epsilon,
momentum=momentum,
adjustment=lambda _: (adjust_scale, adjust_bias))
training = array_ops.placeholder(dtypes.bool)
yt = bn.apply(xt, training=training)
moving_mean = 0.
moving_variance = 1.
with self.session(use_gpu=True) as sess:
self.evaluate(variables.global_variables_initializer())
for _ in range(5):
x = np.random.random(shape)
yt_val_train, adj_scale_val, adj_bias_val = sess.run(
[yt, adjust_scale, adjust_bias] + bn.updates,
feed_dict={xt: x, training: True})[:3]
yt_val_test = sess.run([yt] + bn.updates,
feed_dict={xt: x, training: False})[0]
mean = x.mean(0)
variance = x.var(0)
y_train = (((x - mean) / (variance + epsilon) ** 0.5) * adj_scale_val +
adj_bias_val) * gamma + beta
moving_mean += (mean - moving_mean) * (1. - momentum)
moving_variance += (variance - moving_variance) * (1. - momentum)
y_test = ((x - moving_mean) / (moving_variance + epsilon) ** 0.5 *
gamma) + beta
self.assertAllClose(y_train, yt_val_train, atol=1e-5)
self.assertAllClose(y_test, yt_val_test, atol=1e-5)
def testRenormWithAdjustment(self):
shape = (4, 3)
xt = array_ops.placeholder(dtypes.float32, shape)
momentum = 0.99
renorm_momentum = 0.8
rmax = 1.1
rmin = 0.9
dmax = 0.1
gamma = 2.
beta = 3.
epsilon = 0.001
adjust_scale = random_ops.random_uniform(shape[-1:], 0.5, 1.5)
adjust_bias = random_ops.random_uniform(shape[-1:], -.2, .2)
bn = normalization_layers.BatchNormalization(
axis=1,
gamma_initializer=init_ops.constant_initializer(gamma),
beta_initializer=init_ops.constant_initializer(beta),
epsilon=epsilon,
momentum=momentum,
renorm=True,
renorm_clipping={'rmax': rmax, 'rmin': rmin, 'dmax': dmax},
renorm_momentum=renorm_momentum,
adjustment=lambda _: (adjust_scale, adjust_bias))
training = array_ops.placeholder(dtypes.bool)
yt = bn.apply(xt, training=training)
moving_mean = 0.
moving_variance = 1.
renorm_mean = 0.
renorm_stddev = 1.
with self.session(use_gpu=True) as sess:
self.evaluate(variables.global_variables_initializer())
for _ in range(5):
x = np.random.random(shape)
yt_val_train, adj_scale_val, adj_bias_val = sess.run(
[yt, adjust_scale, adjust_bias] + bn.updates,
feed_dict={xt: x, training: True})[:3]
yt_val_test = sess.run([yt] + bn.updates,
feed_dict={xt: x, training: False})[0]
mean = x.mean(0)
variance = x.var(0)
stddev = np.sqrt(variance + epsilon)
r = (stddev / renorm_stddev).clip(rmin, rmax)
d = ((mean - renorm_mean) / renorm_stddev).clip(-dmax, dmax)
y_train = (((x - mean) / stddev * r + d) * adj_scale_val +
adj_bias_val) * gamma + beta
renorm_mean += (mean - renorm_mean) * (1. - renorm_momentum)
renorm_stddev += (stddev - renorm_stddev) * (1. - renorm_momentum)
moving_mean += (mean - moving_mean) * (1. - momentum)
moving_variance += (variance - moving_variance) * (1. - momentum)
y_test = ((x - moving_mean) / (moving_variance + epsilon) ** 0.5 *
gamma) + beta
self.assertAllClose(y_train, yt_val_train, atol=1e-5)
self.assertAllClose(y_test, yt_val_test, atol=1e-5)
def testGhostBNNegativeVirtualBatch(self):
shape = [6, 5, 4, 3]
inp = random_ops.random_uniform(shape, seed=1)
with self.assertRaises(ValueError):
normalization_layers.batch_normalization(
inp, virtual_batch_size=-1)
def testGhostBNVirtualBatchFull(self):
shape = [6, 5, 4, 3]
inp = random_ops.random_uniform(shape, seed=1)
out1 = normalization_layers.batch_normalization(inp)
out2 = normalization_layers.batch_normalization(
inp, virtual_batch_size=6)
self.assertListEqual(
out1.shape.as_list(), out2.shape.as_list())
with self.session(use_gpu=True) as sess:
self.evaluate(variables.global_variables_initializer())
x = np.random.random(shape)
y1, y2 = sess.run([out1, out2], feed_dict={inp: x})
self.assertAllClose(y1, y2, atol=1e-5)
def testGhostBNInputOutputShapesMatch(self):
shape = [6, 4, 3]
inp = random_ops.random_uniform(shape, seed=1)
out = normalization_layers.batch_normalization(
inp, virtual_batch_size=3)
self.assertListEqual(out.shape.as_list(), shape)
def testGhostBNUnknownBatchSize(self):
np_shape = [10, 5, 4]
tf_shape = [None, 5, 4]
inp = array_ops.placeholder(dtypes.float32, tf_shape)
out = normalization_layers.batch_normalization(
inp, virtual_batch_size=2)
with self.session(use_gpu=True) as sess:
self.evaluate(variables.global_variables_initializer())
x = np.random.random(np_shape)
y = sess.run(out, feed_dict={inp: x})
self.assertListEqual(list(y.shape), np_shape)
def testGhostBN2Dims(self):
shape = [6, 2]
virtual_batch_size = 3
beta = 2.
gamma = 3.
momentum = 0.8
epsilon = 1e-3
moving_means = np.zeros([2, 2], dtype=np.float32)
moving_vars = np.ones([2, 2], dtype=np.float32)
inp = array_ops.placeholder(dtypes.float32, shape)
is_training = array_ops.placeholder(dtypes.bool)
bn = normalization_layers.BatchNormalization(
momentum=momentum,
epsilon=epsilon,
beta_initializer=init_ops.constant_initializer(beta),
gamma_initializer=init_ops.constant_initializer(gamma),
virtual_batch_size=virtual_batch_size)
out = bn.apply(inp, training=is_training)
ghost_shape = ([virtual_batch_size,
shape[0] // virtual_batch_size,
shape[1]])
with self.session(use_gpu=True) as sess:
self.evaluate(variables.global_variables_initializer())
for _ in range(5):
x = np.random.random(shape)
sub_batched = np.reshape(x, ghost_shape)
means = np.mean(sub_batched, axis=0, keepdims=True)
variances = np.var(sub_batched, axis=0, keepdims=True)
avg_means = np.mean(means, axis=1, keepdims=True)
avg_variances = np.mean(variances, axis=1, keepdims=True)
moving_means = moving_means * momentum + avg_means * (1. - momentum)
moving_vars = moving_vars * momentum + avg_variances * (1. - momentum)
y_train = ((sub_batched - means) /
(variances + epsilon) ** 0.5 * gamma) + beta
y_test = ((sub_batched - moving_means) /
(moving_vars + epsilon) ** 0.5 * gamma) + beta
y_train = np.reshape(y_train, shape)
y_test = np.reshape(y_test, shape)
y_val_train, _, _ = sess.run([out] + bn.updates,
feed_dict={inp: x, is_training: True})
y_val_test = sess.run(out, feed_dict={inp: x, is_training: False})
self.assertAllClose(y_train, y_val_train, atol=1e-5)
self.assertAllClose(y_test, y_val_test, atol=1e-5)
def testGhostBN4DimsAxis3(self):
shape = [6, 10, 10, 3]
virtual_batch_size = 2
beta = 2.
gamma = 3.
momentum = 0.8
epsilon = 1e-3
moving_means = np.zeros([1, 1, 1, 1, 3], dtype=np.float32)
moving_vars = np.ones([1, 1, 1, 1, 3], dtype=np.float32)
inp = array_ops.placeholder(dtypes.float32, shape)
is_training = array_ops.placeholder(dtypes.bool)
bn = normalization_layers.BatchNormalization(
axis=3,
momentum=momentum,
epsilon=epsilon,
beta_initializer=init_ops.constant_initializer(beta),
gamma_initializer=init_ops.constant_initializer(gamma),
virtual_batch_size=virtual_batch_size)
out = bn.apply(inp, training=is_training)
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
shape[1:])
with self.session(use_gpu=True) as sess:
self.evaluate(variables.global_variables_initializer())
for _ in range(5):
x = np.random.random(shape)
sub_batched = np.reshape(x, ghost_shape)
means = np.mean(sub_batched, axis=(0, 2, 3), keepdims=True)
variances = np.var(sub_batched, axis=(0, 2, 3), keepdims=True)
avg_means = np.mean(means, axis=1, keepdims=True)
avg_variances = np.mean(variances, axis=1, keepdims=True)
moving_means = moving_means * momentum + avg_means * (1. - momentum)
moving_vars = moving_vars * momentum + avg_variances * (1. - momentum)
y_train = ((sub_batched - means) /
(variances + epsilon) ** 0.5 * gamma) + beta
y_test = ((sub_batched - moving_means) /
(moving_vars + epsilon) ** 0.5 * gamma) + beta
y_train = np.reshape(y_train, shape)
y_test = np.reshape(y_test, shape)
y_val_train, _, _ = sess.run([out] + bn.updates,
feed_dict={inp: x, is_training: True})
y_val_test = sess.run(out, feed_dict={inp: x, is_training: False})
self.assertAllClose(y_train, y_val_train, atol=1e-2)
self.assertAllClose(y_test, y_val_test, atol=1e-2)
def testGhostBN4DimsAxis1(self):
shape = [6, 3, 10, 10]
virtual_batch_size = 2
beta = 2.
gamma = 3.
momentum = 0.8
epsilon = 1e-3
moving_means = np.zeros([1, 1, 3, 1, 1], dtype=np.float32)
moving_vars = np.ones([1, 1, 3, 1, 1], dtype=np.float32)
inp = array_ops.placeholder(dtypes.float32, shape)
is_training = array_ops.placeholder(dtypes.bool)
bn = normalization_layers.BatchNormalization(
axis=1,
momentum=momentum,
epsilon=epsilon,
beta_initializer=init_ops.constant_initializer(beta),
gamma_initializer=init_ops.constant_initializer(gamma),
virtual_batch_size=virtual_batch_size,
fused=False) # NCHW is unsupported by CPU fused batch norm
out = bn.apply(inp, training=is_training)
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
shape[1:])
with self.session(use_gpu=True) as sess:
self.evaluate(variables.global_variables_initializer())
for _ in range(5):
x = np.random.random(shape)
sub_batched = np.reshape(x, ghost_shape)
means = np.mean(sub_batched, axis=(0, 3, 4), keepdims=True)
variances = np.var(sub_batched, axis=(0, 3, 4), keepdims=True)
avg_means = np.mean(means, axis=1, keepdims=True)
avg_variances = np.mean(variances, axis=1, keepdims=True)
moving_means = moving_means * momentum + avg_means * (1. - momentum)
moving_vars = moving_vars * momentum + avg_variances * (1. - momentum)
y_train = ((sub_batched - means) /
(variances + epsilon) ** 0.5 * gamma) + beta
y_test = ((sub_batched - moving_means) /
(moving_vars + epsilon) ** 0.5 * gamma) + beta
y_train = np.reshape(y_train, shape)
y_test = np.reshape(y_test, shape)
y_val_train, _, _ = sess.run([out] + bn.updates,
feed_dict={inp: x, is_training: True})
y_val_test = sess.run(out, feed_dict={inp: x, is_training: False})
self.assertAllClose(y_train, y_val_train, atol=1e-2)
self.assertAllClose(y_test, y_val_test, atol=1e-2)
def testMultiAxisInvalid(self):
shape = [6, 5, 4, 3]
inp = random_ops.random_uniform(shape, seed=1)
with self.assertRaises(ValueError):
normalization_layers.batch_normalization(
inp, axis=[1, 4]) # out of bounds
with self.assertRaises(ValueError):
normalization_layers.batch_normalization(
inp, axis=[-5, 1]) # out of bounds
with self.assertRaises(ValueError):
normalization_layers.batch_normalization(
inp, axis=[1, 2, 1]) # duplicate
def test3DInputMultiAxis12(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=[1, 2], epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 4, 3)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.cached_session() as sess:
# Test training with placeholder learning phase.
self.evaluate(variables.global_variables_initializer())
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
for _ in range(100):
np_output, _, _ = sess.run([outputs] + bn.updates,
feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = self.evaluate(
[bn.moving_mean, bn.moving_variance])
np_inputs = self.evaluate(inputs)
mean = np.mean(np_inputs, axis=0, keepdims=True)
std = np.std(np_inputs, axis=0, keepdims=True)
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def test5DInputMultiAxis123(self):
epsilon = 1e-3
bn = normalization_layers.BatchNormalization(
axis=[1, 2, 3], epsilon=epsilon, momentum=0.9)
inputs = variables.Variable(
np.random.random((5, 3, 4, 4, 3)) + 100, dtype=dtypes.float32)
training = array_ops.placeholder(dtype='bool')
outputs = bn.apply(inputs, training=training)
with self.cached_session() as sess:
# Test training with placeholder learning phase.
self.evaluate(variables.global_variables_initializer())
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
for _ in range(100):
np_output, _, _ = sess.run([outputs] + bn.updates,
feed_dict={training: True})
# Verify that the axis is normalized during training.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
# Verify that the statistics are updated during training.
moving_mean, moving_var = self.evaluate(
[bn.moving_mean, bn.moving_variance])
np_inputs = self.evaluate(inputs)
mean = np.mean(np_inputs, axis=(0, 4), keepdims=True)
std = np.std(np_inputs, axis=(0, 4), keepdims=True)
variance = np.square(std)
self.assertAllClose(mean, moving_mean, atol=1e-2)
self.assertAllClose(variance, moving_var, atol=1e-2)
# Test inference with placeholder learning phase.
np_output = sess.run(outputs, feed_dict={training: False})
# Verify that the axis is normalized during inference.
normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
def testGhostBN5DimsMultiAxis14(self):
shape = [6, 3, 10, 10, 4]
virtual_batch_size = 3
beta = 2.
gamma = 3.
momentum = 0.8
epsilon = 1e-3
moving_means = np.zeros([1, 1, 3, 1, 1, 4], dtype=np.float32)
moving_vars = np.ones([1, 1, 3, 1, 1, 4], dtype=np.float32)
inp = array_ops.placeholder(dtypes.float32, shape)
is_training = array_ops.placeholder(dtypes.bool)
bn = normalization_layers.BatchNormalization(
axis=[1, 4],
momentum=momentum,
epsilon=epsilon,
beta_initializer=init_ops.constant_initializer(beta),
gamma_initializer=init_ops.constant_initializer(gamma),
virtual_batch_size=virtual_batch_size,
fused=False)
out = bn.apply(inp, training=is_training)
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
shape[1:])
with self.session(use_gpu=True) as sess:
self.evaluate(variables.global_variables_initializer())
for _ in range(5):
x = np.random.random(shape)
sub_batched = np.reshape(x, ghost_shape)
means = np.mean(sub_batched, axis=(0, 3, 4), keepdims=True)
variances = np.var(sub_batched, axis=(0, 3, 4), keepdims=True)
avg_means = np.mean(means, axis=1, keepdims=True)
avg_variances = np.mean(variances, axis=1, keepdims=True)
moving_means = moving_means * momentum + avg_means * (1. - momentum)
moving_vars = moving_vars * momentum + avg_variances * (1. - momentum)
y_train = ((sub_batched - means) /
(variances + epsilon) ** 0.5 * gamma) + beta
y_test = ((sub_batched - moving_means) /
(moving_vars + epsilon) ** 0.5 * gamma) + beta
y_train = np.reshape(y_train, shape)
y_test = np.reshape(y_test, shape)
y_val_train, _, _ = sess.run([out] + bn.updates,
feed_dict={inp: x, is_training: True})
y_val_test = sess.run(out, feed_dict={inp: x, is_training: False})
self.assertAllClose(y_train, y_val_train, atol=1e-2)
self.assertAllClose(y_test, y_val_test, atol=1e-2)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/layers/normalization_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains the pooling layer classes and their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import layers as keras_layers
from tensorflow.python.layers import base
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['layers.AveragePooling1D'])
class AveragePooling1D(keras_layers.AveragePooling1D, base.Layer):
"""Average Pooling layer for 1D inputs.
Arguments:
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(AveragePooling1D, self).__init__(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
**kwargs)
@deprecation.deprecated(
date=None, instructions='Use keras.layers.AveragePooling1D instead.')
@tf_export(v1=['layers.average_pooling1d'])
def average_pooling1d(inputs, pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Average Pooling layer for 1D inputs.
Arguments:
inputs: The tensor over which to pool. Must have rank 3.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
Returns:
The output tensor, of rank 3.
Raises:
ValueError: if eager execution is enabled.
"""
layer = AveragePooling1D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return layer.apply(inputs)
@tf_export(v1=['layers.MaxPooling1D'])
class MaxPooling1D(keras_layers.MaxPooling1D, base.Layer):
"""Max Pooling layer for 1D inputs.
Arguments:
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(MaxPooling1D, self).__init__(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
**kwargs)
@deprecation.deprecated(
date=None, instructions='Use keras.layers.MaxPooling1D instead.')
@tf_export(v1=['layers.max_pooling1d'])
def max_pooling1d(inputs, pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Max Pooling layer for 1D inputs.
Arguments:
inputs: The tensor over which to pool. Must have rank 3.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
Returns:
The output tensor, of rank 3.
Raises:
ValueError: if eager execution is enabled.
"""
layer = MaxPooling1D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return layer.apply(inputs)
@tf_export(v1=['layers.AveragePooling2D'])
class AveragePooling2D(keras_layers.AveragePooling2D, base.Layer):
"""Average pooling layer for 2D inputs (e.g. images).
Arguments:
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(AveragePooling2D, self).__init__(
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
@deprecation.deprecated(
date=None, instructions='Use keras.layers.AveragePooling2D instead.')
@tf_export(v1=['layers.average_pooling2d'])
def average_pooling2d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Average pooling layer for 2D inputs (e.g. images).
Arguments:
inputs: The tensor over which to pool. Must have rank 4.
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = AveragePooling2D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
@tf_export(v1=['layers.MaxPooling2D'])
class MaxPooling2D(keras_layers.MaxPooling2D, base.Layer):
"""Max pooling layer for 2D inputs (e.g. images).
Arguments:
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(MaxPooling2D, self).__init__(
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
@deprecation.deprecated(
date=None, instructions='Use keras.layers.MaxPooling2D instead.')
@tf_export(v1=['layers.max_pooling2d'])
def max_pooling2d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Max pooling layer for 2D inputs (e.g. images).
Arguments:
inputs: The tensor over which to pool. Must have rank 4.
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = MaxPooling2D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
@tf_export(v1=['layers.AveragePooling3D'])
class AveragePooling3D(keras_layers.AveragePooling3D, base.Layer):
"""Average pooling layer for 3D inputs (e.g. volumes).
Arguments:
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(AveragePooling3D, self).__init__(
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
@deprecation.deprecated(
date=None, instructions='Use keras.layers.AveragePooling3D instead.')
@tf_export(v1=['layers.average_pooling3d'])
def average_pooling3d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Average pooling layer for 3D inputs (e.g. volumes).
Arguments:
inputs: The tensor over which to pool. Must have rank 5.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = AveragePooling3D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
@tf_export(v1=['layers.MaxPooling3D'])
class MaxPooling3D(keras_layers.MaxPooling3D, base.Layer):
"""Max pooling layer for 3D inputs (e.g. volumes).
Arguments:
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(MaxPooling3D, self).__init__(
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
@deprecation.deprecated(
date=None, instructions='Use keras.layers.MaxPooling3D instead.')
@tf_export(v1=['layers.max_pooling3d'])
def max_pooling3d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Max pooling layer for 3D inputs (e.g.
volumes).
Arguments:
inputs: The tensor over which to pool. Must have rank 5.
pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height,
pool_width) specifying the size of the pooling window. Can be a single
integer to specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 3 integers, specifying the strides of
the pooling operation. Can be a single integer to specify the same value
for all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape `(batch, depth, height,
width, channels)` while `channels_first` corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = MaxPooling3D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
# Aliases
AvgPool2D = AveragePooling2D
MaxPool2D = MaxPooling2D
max_pool2d = max_pooling2d
avg_pool2d = average_pooling2d
|
tensorflow-master
|
tensorflow/python/layers/pooling.py
|
tensorflow-master
|
tensorflow/python/layers/__init__.py
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains the core layers: Dense, Dropout.
Also contains their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import layers as keras_layers
from tensorflow.python.layers import base
from tensorflow.python.ops import init_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['layers.Dense'])
class Dense(keras_layers.Dense, base.Layer):
"""Densely-connected layer class.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.compat.v1.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
_reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the kernel matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the kernel matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel_constraint: Constraint function for the kernel matrix.
bias_constraint: Constraint function for the bias.
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Dense, self).__init__(units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
@deprecation.deprecated(
date=None, instructions='Use keras.layers.Dense instead.')
@tf_export(v1=['layers.dense'])
def dense(
inputs, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the densely-connected layer.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Arguments:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.compat.v1.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor the same shape as `inputs` except the last dimension is of
size `units`.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Dense(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
@tf_export(v1=['layers.Dropout'])
class Dropout(keras_layers.Dropout, base.Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed`.
for behavior.
name: The name of the layer (string).
"""
def __init__(self, rate=0.5,
noise_shape=None,
seed=None,
name=None,
**kwargs):
super(Dropout, self).__init__(rate=rate,
noise_shape=noise_shape,
seed=seed,
name=name,
**kwargs)
def call(self, inputs, training=False):
return super(Dropout, self).call(inputs, training=training)
@deprecation.deprecated(
date=None,
instructions='Use keras.layers.dropout instead.')
@tf_export(v1=['layers.dropout'])
def dropout(inputs,
rate=0.5,
noise_shape=None,
seed=None,
training=False,
name=None):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
inputs: Tensor input.
rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed`
for behavior.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(apply dropout) or in inference mode (return the input untouched).
name: The name of the layer (string).
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)
return layer.apply(inputs, training=training)
@tf_export(v1=['layers.Flatten'])
class Flatten(keras_layers.Flatten, base.Layer):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Arguments:
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
Examples:
```
x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, 16)`
x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, None)`
```
"""
pass
@deprecation.deprecated(
date=None,
instructions='Use keras.layers.flatten instead.')
@tf_export(v1=['layers.flatten'])
def flatten(inputs, name=None, data_format='channels_last'):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Arguments:
inputs: Tensor input.
name: The name of the layer (string).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
Returns:
Reshaped tensor.
Examples:
```
x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, 16)`
x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, None)`
```
"""
layer = Flatten(name=name, data_format=data_format)
return layer.apply(inputs)
# Aliases
FullyConnected = Dense
fully_connected = dense
|
tensorflow-master
|
tensorflow/python/layers/core.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DenseTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testDenseProperties(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(dense.units, 2)
self.assertEqual(dense.activation, nn_ops.relu)
self.assertEqual(dense.kernel_regularizer, None)
self.assertEqual(dense.bias_regularizer, None)
self.assertEqual(dense.activity_regularizer, None)
self.assertEqual(dense.use_bias, True)
# Test auto-naming
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_1')
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_2')
@test_util.run_deprecated_v1
def testVariableInput(self):
with self.cached_session():
v = variable_scope.get_variable(
'X', initializer=init_ops.zeros_initializer(), shape=(1, 1))
x = core_layers.Dense(1)(v)
variables.global_variables_initializer().run()
self.assertAllEqual(x.eval(), [[0.0]])
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testCall(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 2], outputs.get_shape().as_list())
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias.name, 'my_dense/bias:0')
@test_util.assert_no_new_pyobjects_executing_eagerly
def testNoEagerLeak(self):
# Tests that repeatedly constructing and building a Layer does not leak
# Python objects.
inputs = random_ops.random_uniform((5, 4), seed=1)
core_layers.Dense(5)(inputs)
core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')(inputs)
@test_util.run_in_graph_and_eager_modes
def testCallTensorDot(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 4, 2], outputs.get_shape().as_list())
@test_util.run_in_graph_and_eager_modes
def testNoBias(self):
dense = core_layers.Dense(2, use_bias=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel])
self.assertListEqual(dense.trainable_variables, [dense.kernel])
self.assertListEqual(dense.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias, None)
@test_util.run_in_graph_and_eager_modes
def testNonTrainable(self):
dense = core_layers.Dense(2, trainable=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 0)
@test_util.run_in_graph_and_eager_modes
def testOutputShape(self):
dense = core_layers.Dense(7, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 7])
inputs = random_ops.random_uniform((5, 2, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 2, 7])
inputs = random_ops.random_uniform((1, 2, 4, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 2, 4, 7])
@test_util.run_deprecated_v1
def testCallOnPlaceHolder(self):
inputs = array_ops.placeholder(dtype=dtypes.float32)
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
@test_util.run_in_graph_and_eager_modes
def testActivation(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if not context.executing_eagerly():
self.assertEqual(outputs.op.name, 'dense1/Relu')
dense = core_layers.Dense(2, name='dense2')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if not context.executing_eagerly():
self.assertEqual(outputs.op.name, 'dense2/BiasAdd')
@test_util.run_deprecated_v1
def testActivityRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', activity_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
@test_util.run_deprecated_v1
def testKernelRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', kernel_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in dense.variables])
self.assertAllEqual(self.evaluate(dense.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testKernelRegularizerWithReuse(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer, reuse=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
@test_util.run_deprecated_v1
def testBiasRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(2, name='my_dense', bias_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in dense.variables])
self.assertAllEqual(self.evaluate(dense.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testFunctionalDense(self):
with self.cached_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = core_layers.dense(
inputs, 2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(outputs.op.name, 'my_dense/Relu')
@test_util.run_deprecated_v1
def testFunctionalDenseTwice(self):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
vars1 = _get_variable_dict_from_varstore().values()
core_layers.dense(inputs, 2)
vars2 = _get_variable_dict_from_varstore().values()
self.assertEqual(len(vars1), 2)
self.assertEqual(len(vars2), 4)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuse(self):
with self.cached_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
vars1 = variables.trainable_variables()
core_layers.dense(inputs, 2, name='my_dense', reuse=True)
vars2 = variables.trainable_variables()
self.assertEqual(vars1, vars2)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuseFromScope(self):
with self.cached_session():
with variable_scope.variable_scope('scope'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
vars1 = variables.trainable_variables()
with variable_scope.variable_scope('scope', reuse=True):
core_layers.dense(inputs, 2, name='my_dense')
vars2 = variables.trainable_variables()
self.assertEqual(vars1, vars2)
@test_util.run_deprecated_v1
def testFunctionalDenseInitializerFromScope(self):
with variable_scope.variable_scope(
'scope',
initializer=init_ops.ones_initializer()), self.cached_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
variables.global_variables_initializer().run()
weights = _get_variable_dict_from_varstore()
self.assertEqual(len(weights), 2)
# Check that the matrix weights got initialized to ones (from scope).
self.assertAllClose(weights['scope/dense/kernel'].read_value().eval(),
np.ones((3, 2)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights['scope/dense/bias'].read_value().eval(),
np.zeros((2)))
def testEagerExecution(self):
with context.eager_mode():
container = variable_scope.EagerVariableStore()
x = constant_op.constant([[2.0]])
with container.as_default():
y = core_layers.dense(
x, 1, name='my_dense',
kernel_initializer=init_ops.ones_initializer())
self.assertAllEqual(y, [[2.0]])
self.assertEqual(len(container.variables()), 2)
# Recreate the layer to test reuse.
with container.as_default():
core_layers.dense(
x, 1, name='my_dense',
kernel_initializer=init_ops.ones_initializer())
self.assertEqual(len(container.variables()), 2)
def testFunctionalDenseWithCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope('test', custom_getter=custom_getter):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
self.assertEqual(called[0], 2)
@test_util.run_deprecated_v1
def testFunctionalDenseInScope(self):
with self.cached_session():
with variable_scope.variable_scope('test'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
var_dict = _get_variable_dict_from_varstore()
var_key = 'test/my_dense/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
with variable_scope.variable_scope('test1') as scope:
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name=scope)
var_dict = _get_variable_dict_from_varstore()
var_key = 'test1/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
with variable_scope.variable_scope('test2'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
var_dict = _get_variable_dict_from_varstore()
var_key = 'test2/dense/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
@test_util.run_in_graph_and_eager_modes
def testComputeOutputShape(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
ts = tensor_shape.TensorShape
# pylint: disable=protected-access
with self.assertRaises(ValueError):
dense.compute_output_shape(ts(None))
with self.assertRaises(ValueError):
dense.compute_output_shape(ts([]))
with self.assertRaises(ValueError):
dense.compute_output_shape(ts([1]))
self.assertEqual(
[None, 2],
dense.compute_output_shape((None, 3)).as_list())
self.assertEqual(
[None, 2],
dense.compute_output_shape(ts([None, 3])).as_list())
self.assertEqual(
[None, 4, 2],
dense.compute_output_shape(ts([None, 4, 3])).as_list())
# pylint: enable=protected-access
@test_util.run_in_graph_and_eager_modes
def testConstraints(self):
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
dense = core_layers.Dense(2,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3), seed=1)
dense(inputs)
self.assertEqual(dense.kernel_constraint, k_constraint)
self.assertEqual(dense.bias_constraint, b_constraint)
def _get_variable_dict_from_varstore():
var_dict = variable_scope._get_default_variable_store()._vars # pylint: disable=protected-access
sorted_var_dict = collections.OrderedDict(
sorted(var_dict.items(), key=lambda t: t[0]))
return sorted_var_dict
class DropoutTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testDropoutProperties(self):
dp = core_layers.Dropout(0.5, name='dropout')
self.assertEqual(dp.rate, 0.5)
self.assertEqual(dp.noise_shape, None)
dp.apply(array_ops.ones(()))
self.assertEqual(dp.name, 'dropout')
@test_util.run_in_graph_and_eager_modes
def testBooleanLearningPhase(self):
dp = core_layers.Dropout(0.5)
inputs = array_ops.ones((5, 3))
dropped = dp.apply(inputs, training=True)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
dropped = dp.apply(inputs, training=False)
np_output = self.evaluate(dropped)
self.assertAllClose(np.ones((5, 3)), np_output)
@test_util.run_deprecated_v1
def testDynamicLearningPhase(self):
with self.cached_session() as sess:
dp = core_layers.Dropout(0.5, seed=1)
inputs = array_ops.ones((5, 5))
training = array_ops.placeholder(dtype='bool')
dropped = dp.apply(inputs, training=training)
self.evaluate(variables.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={training: True})
self.assertAlmostEqual(0., np_output.min())
np_output = sess.run(dropped, feed_dict={training: False})
self.assertAllClose(np.ones((5, 5)), np_output)
@test_util.run_in_graph_and_eager_modes
def testDynamicNoiseShape(self):
inputs = array_ops.ones((5, 3, 2))
noise_shape = [None, 1, None]
dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1)
dropped = dp.apply(inputs, training=True)
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :])
def testCustomNoiseShape(self):
inputs = array_ops.ones((5, 3, 2))
noise_shape = [5, 1, 2]
dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1)
dropped = dp.apply(inputs, training=True)
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :])
@test_util.run_deprecated_v1
def testFunctionalDropout(self):
with self.cached_session():
inputs = array_ops.ones((5, 5))
dropped = core_layers.dropout(inputs, 0.5, training=True, seed=1)
variables.global_variables_initializer().run()
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
dropped = core_layers.dropout(inputs, 0.5, training=False, seed=1)
np_output = self.evaluate(dropped)
self.assertAllClose(np.ones((5, 5)), np_output)
@test_util.run_deprecated_v1
def testDynamicRate(self):
with self.cached_session() as sess:
rate = array_ops.placeholder(dtype='float32', name='rate')
dp = core_layers.Dropout(rate, name='dropout')
inputs = array_ops.ones((5, 5))
dropped = dp.apply(inputs, training=True)
self.evaluate(variables.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={rate: 0.5})
self.assertAlmostEqual(0., np_output.min())
np_output = sess.run(dropped, feed_dict={rate: 0.0})
self.assertAllClose(np.ones((5, 5)), np_output)
class FlattenTest(test.TestCase):
@test_util.run_deprecated_v1
def testCreateFlatten(self):
with self.cached_session() as sess:
x = array_ops.placeholder(shape=(None, 2, 3), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((3, 2, 3))})
self.assertEqual(list(np_output.shape), [3, 6])
self.assertEqual(y.get_shape().as_list(), [None, 6])
x = array_ops.placeholder(shape=(1, 2, 3, 2), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((1, 2, 3, 2))})
self.assertEqual(list(np_output.shape), [1, 12])
self.assertEqual(y.get_shape().as_list(), [1, 12])
def testComputeShape(self):
shape = core_layers.Flatten().compute_output_shape((1, 2, 3, 2))
self.assertEqual(shape.as_list(), [1, 12])
shape = core_layers.Flatten().compute_output_shape((None, 3, 2))
self.assertEqual(shape.as_list(), [None, 6])
shape = core_layers.Flatten().compute_output_shape((None, 3, None))
self.assertEqual(shape.as_list(), [None, None])
@test_util.run_deprecated_v1
def testDataFormat5d(self):
np_input_channels_last = np.arange(
120, dtype='float32').reshape([1, 5, 4, 3, 2])
with self.test_session() as sess:
x = array_ops.placeholder(shape=(1, 5, 4, 3, 2), dtype='float32')
y = core_layers.Flatten(data_format='channels_last')(x)
np_output_cl = sess.run(y, feed_dict={x: np_input_channels_last})
x = array_ops.placeholder(shape=(1, 2, 5, 4, 3), dtype='float32')
y = core_layers.Flatten(data_format='channels_first')(x)
np_input_channels_first = np.transpose(np_input_channels_last,
[0, 4, 1, 2, 3])
np_output_cf = sess.run(y, feed_dict={x: np_input_channels_first})
self.assertAllEqual(np_output_cl, np_output_cf)
@test_util.run_deprecated_v1
def testDataFormat4d(self):
np_input_channels_last = np.arange(
24, dtype='float32').reshape([1, 4, 3, 2])
with self.test_session() as sess:
x = array_ops.placeholder(shape=(1, 4, 3, 2), dtype='float32')
y = core_layers.Flatten(data_format='channels_last')(x)
np_output_cl = sess.run(y, feed_dict={x: np_input_channels_last})
x = array_ops.placeholder(shape=(1, 2, 4, 3), dtype='float32')
y = core_layers.Flatten(data_format='channels_first')(x)
np_input_channels_first = np.transpose(np_input_channels_last,
[0, 3, 1, 2])
np_output_cf = sess.run(y, feed_dict={x: np_input_channels_first})
self.assertAllEqual(np_output_cl, np_output_cf)
@test_util.run_deprecated_v1
def testFunctionalFlatten(self):
x = array_ops.placeholder(shape=(None, 2, 3), dtype='float32')
y = core_layers.flatten(x, name='flatten')
self.assertEqual(y.get_shape().as_list(), [None, 6])
@test_util.run_deprecated_v1
def testFlatten0D(self):
x = array_ops.placeholder(shape=(None,), dtype='float32')
y = core_layers.Flatten()(x)
with self.cached_session() as sess:
np_output = sess.run(y, feed_dict={x: np.zeros((5,))})
self.assertEqual(list(np_output.shape), [5, 1])
self.assertEqual(y.shape.as_list(), [None, 1])
@test_util.run_deprecated_v1
def testFlattenUnknownAxes(self):
with self.cached_session() as sess:
x = array_ops.placeholder(shape=(5, None, None), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((5, 2, 3))})
self.assertEqual(list(np_output.shape), [5, 6])
self.assertEqual(y.get_shape().as_list(), [5, None])
x = array_ops.placeholder(shape=(5, None, 2), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((5, 3, 2))})
self.assertEqual(list(np_output.shape), [5, 6])
self.assertEqual(y.get_shape().as_list(), [5, None])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/layers/core_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.pooling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.layers import pooling as pooling_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class PoolingTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
pooling_layers.max_pooling2d(images, 3, strides=2, data_format='invalid')
def testInvalidStrides(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
pooling_layers.max_pooling2d(images, 3, strides=(1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'strides'):
pooling_layers.max_pooling2d(images, 3, strides=None)
def testInvalidPoolSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'pool_size'):
pooling_layers.max_pooling2d(images, (1, 2, 3), strides=2)
with self.assertRaisesRegexp(ValueError, 'pool_size'):
pooling_layers.max_pooling2d(images, None, strides=2)
def testCreateMaxPooling2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = pooling_layers.MaxPooling2D([2, 2], strides=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 3, 4, 4])
def testCreateAveragePooling2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = pooling_layers.AveragePooling2D([2, 2], strides=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 3, 4, 4])
@test_util.run_deprecated_v1
def testCreateMaxPooling2DChannelsFirst(self):
height, width = 7, 9
images = random_ops.random_uniform((5, 2, height, width))
layer = pooling_layers.MaxPooling2D([2, 2],
strides=1,
data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 2, 6, 8])
@test_util.run_deprecated_v1
def testCreateAveragePooling2DChannelsFirst(self):
height, width = 5, 6
images = random_ops.random_uniform((3, 4, height, width))
layer = pooling_layers.AveragePooling2D((2, 2),
strides=(1, 1),
padding='valid',
data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [3, 4, 4, 5])
@test_util.run_deprecated_v1
def testCreateAveragePooling2DChannelsFirstWithNoneBatch(self):
height, width = 5, 6
images = array_ops.placeholder(dtype='float32',
shape=(None, 4, height, width))
layer = pooling_layers.AveragePooling2D((2, 2),
strides=(1, 1),
padding='valid',
data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [None, 4, 4, 5])
def testCreateMaxPooling1D(self):
width = 7
channels = 3
images = random_ops.random_uniform((5, width, channels))
layer = pooling_layers.MaxPooling1D(2, strides=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, width // 2, channels])
def testCreateAveragePooling1D(self):
width = 7
channels = 3
images = random_ops.random_uniform((5, width, channels))
layer = pooling_layers.AveragePooling1D(2, strides=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, width // 2, channels])
def testCreateMaxPooling1DChannelsFirst(self):
width = 7
channels = 3
images = random_ops.random_uniform((5, channels, width))
layer = pooling_layers.MaxPooling1D(
2, strides=2, data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, channels, width // 2])
def testCreateAveragePooling1DChannelsFirst(self):
width = 7
channels = 3
images = random_ops.random_uniform((5, channels, width))
layer = pooling_layers.AveragePooling1D(
2, strides=2, data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, channels, width // 2])
def testCreateMaxPooling3D(self):
depth, height, width = 6, 7, 9
images = random_ops.random_uniform((5, depth, height, width, 4))
layer = pooling_layers.MaxPooling3D([2, 2, 2], strides=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 4, 4])
def testCreateAveragePooling3D(self):
depth, height, width = 6, 7, 9
images = random_ops.random_uniform((5, depth, height, width, 4))
layer = pooling_layers.AveragePooling3D([2, 2, 2], strides=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 4, 4])
def testMaxPooling3DChannelsFirst(self):
depth, height, width = 6, 7, 9
images = random_ops.random_uniform((5, 2, depth, height, width))
layer = pooling_layers.MaxPooling3D(
[2, 2, 2], strides=2, data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 3, 4])
def testAveragePooling3DChannelsFirst(self):
depth, height, width = 6, 7, 9
images = random_ops.random_uniform((5, 2, depth, height, width))
layer = pooling_layers.AveragePooling3D(
[2, 2, 2], strides=2, data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 3, 4])
def testCreateMaxPooling2DIntegerPoolSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = pooling_layers.MaxPooling2D(2, strides=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 3, 4, 4])
def testMaxPooling2DPaddingSame(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4), seed=1)
layer = pooling_layers.MaxPooling2D(
images.get_shape()[1:3], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 4, 5, 4])
def testCreatePooling2DWithStrides(self):
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layer = pooling_layers.MaxPooling2D([2, 2], strides=(2, 2), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 3])
# Test strides integer
layer = pooling_layers.MaxPooling2D([2, 2], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 3])
# Test unequal strides
layer = pooling_layers.MaxPooling2D([2, 2], strides=(2, 1), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width, 3])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/layers/pooling_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains layer utilies for input validation and format conversion.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import variables
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.framework import smart_cond as smart_module
from tensorflow.python.util import nest
def convert_data_format(data_format, ndim):
if data_format == 'channels_last':
if ndim == 3:
return 'NWC'
elif ndim == 4:
return 'NHWC'
elif ndim == 5:
return 'NDHWC'
else:
raise ValueError('Input rank not supported:', ndim)
elif data_format == 'channels_first':
if ndim == 3:
return 'NCW'
elif ndim == 4:
return 'NCHW'
elif ndim == 5:
return 'NCDHW'
else:
raise ValueError('Input rank not supported:', ndim)
else:
raise ValueError('Invalid data_format:', data_format)
def normalize_tuple(value, n, name):
"""Transforms a single integer or iterable of integers into an integer tuple.
Arguments:
value: The value to validate and convert. Could an int, or any iterable
of ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
if len(value_tuple) != n:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value) + ' '
'including element ' + str(single_value) + ' of type' +
' ' + str(type(single_value)))
return value_tuple
def normalize_data_format(value):
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
def normalize_padding(value):
padding = value.lower()
if padding not in {'valid', 'same'}:
raise ValueError('The `padding` argument must be one of "valid", "same". '
'Received: ' + str(padding))
return padding
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
dilation: dilation rate, integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding == 'same':
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
def conv_input_length(output_length, filter_size, padding, stride):
"""Determines input length of a convolution given output length.
Arguments:
output_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The input length (integer).
"""
if output_length is None:
return None
assert padding in {'same', 'valid', 'full'}
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
return (output_length - 1) * stride - 2 * pad + filter_size
def deconv_output_length(input_length, filter_size, padding, stride):
"""Determines output length of a transposed convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
input_length *= stride
if padding == 'valid':
input_length += max(filter_size - stride, 0)
elif padding == 'full':
input_length -= (stride + filter_size - 2)
return input_length
def smart_cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if isinstance(pred, variables.Variable):
return control_flow_ops.cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name)
return smart_module.smart_cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name)
def constant_value(pred):
"""Return the bool value for `pred`, or None if `pred` had a dynamic value.
Arguments:
pred: A scalar, either a Python bool or a TensorFlow boolean variable
or tensor, or the Python integer 1 or 0.
Returns:
True or False if `pred` has a constant boolean value, None otherwise.
Raises:
TypeError: If `pred` is not a Variable, Tensor or bool, or Python
interger 1 or 0.
"""
# Allow integer booleans.
if isinstance(pred, int):
if pred == 1:
pred = True
elif pred == 0:
pred = False
if isinstance(pred, variables.Variable):
return None
return smart_module.smart_constant_value(pred)
def object_list_uid(object_list):
"""Creates a single string from object ids."""
object_list = nest.flatten(object_list)
return ', '.join([str(abs(id(x))) for x in object_list])
def static_shape(x):
"""Get the static shape of a Tensor, or None if it is unavailable."""
if x is None:
return None
try:
return tuple(x.get_shape().as_list())
except ValueError:
return None
def get_reachable_from_inputs(inputs, targets=None):
"""Returns the set of tensors reachable from `inputs`.
Stops if all targets have been found (target is optional).
Only valid in Symbolic mode, not Eager mode.
Args:
inputs: List of tensors.
targets: List of tensors.
Returns:
A set of tensors reachable from the inputs (includes the inputs themselves).
"""
reachable = set(inputs)
if targets:
targets = set(targets)
queue = inputs[:]
while queue:
x = queue.pop()
outputs = []
try:
consumers = x.consumers()
except AttributeError:
# Case where x is a variable type
consumers = [x.op]
for z in consumers:
consumer_outputs = z.outputs
if consumer_outputs: # May be None
outputs += consumer_outputs
for y in outputs:
if y not in reachable:
reachable.add(y)
queue.insert(0, y)
if targets and targets.issubset(reachable):
return reachable
return reachable
|
tensorflow-master
|
tensorflow/python/layers/utils.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""This library provides a set of high-level neural networks layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-import-order,unused-import
# Base objects.
from tensorflow.python.layers.base import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
# Core layers.
from tensorflow.python.layers.core import Dense
from tensorflow.python.layers.core import Dropout
from tensorflow.python.layers.core import Flatten
from tensorflow.python.layers.core import dense
from tensorflow.python.layers.core import dropout
from tensorflow.python.layers.core import flatten
# Convolutional layers.
from tensorflow.python.layers.convolutional import SeparableConv1D
from tensorflow.python.layers.convolutional import SeparableConv2D
from tensorflow.python.layers.convolutional import SeparableConvolution2D
from tensorflow.python.layers.convolutional import Conv2DTranspose
from tensorflow.python.layers.convolutional import Convolution2DTranspose
from tensorflow.python.layers.convolutional import Conv3DTranspose
from tensorflow.python.layers.convolutional import Convolution3DTranspose
from tensorflow.python.layers.convolutional import Conv1D
from tensorflow.python.layers.convolutional import Convolution1D
from tensorflow.python.layers.convolutional import Conv2D
from tensorflow.python.layers.convolutional import Convolution2D
from tensorflow.python.layers.convolutional import Conv3D
from tensorflow.python.layers.convolutional import Convolution3D
from tensorflow.python.layers.convolutional import separable_conv1d
from tensorflow.python.layers.convolutional import separable_conv2d
from tensorflow.python.layers.convolutional import conv2d_transpose
from tensorflow.python.layers.convolutional import conv3d_transpose
from tensorflow.python.layers.convolutional import conv1d
from tensorflow.python.layers.convolutional import conv2d
from tensorflow.python.layers.convolutional import conv3d
# Pooling layers.
from tensorflow.python.layers.pooling import AveragePooling1D
from tensorflow.python.layers.pooling import MaxPooling1D
from tensorflow.python.layers.pooling import AveragePooling2D
from tensorflow.python.layers.pooling import MaxPooling2D
from tensorflow.python.layers.pooling import AveragePooling3D
from tensorflow.python.layers.pooling import MaxPooling3D
from tensorflow.python.layers.pooling import average_pooling1d
from tensorflow.python.layers.pooling import max_pooling1d
from tensorflow.python.layers.pooling import average_pooling2d
from tensorflow.python.layers.pooling import max_pooling2d
from tensorflow.python.layers.pooling import average_pooling3d
from tensorflow.python.layers.pooling import max_pooling3d
# Normalization layers.
from tensorflow.python.layers.normalization import BatchNormalization
from tensorflow.python.layers.normalization import batch_normalization
# pylint: enable=g-bad-import-order,unused-import
|
tensorflow-master
|
tensorflow/python/layers/layers.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains the normalization layer classes and their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import layers as keras_layers
from tensorflow.python.layers import base
from tensorflow.python.ops import init_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['layers.BatchNormalization'])
class BatchNormalization(keras_layers.BatchNormalization, base.Layer):
"""Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Keras APIs handle BatchNormalization updates to the moving_mean and
moving_variance as part of their `fit()` and `evaluate()` loops. However, if a
custom training loop is used with an instance of `Model`, these updates need
to be explicitly included. Here's a simple example of how it can be done:
```python
# model is an instance of Model that contains BatchNormalization layer.
update_ops = model.get_updates_for(None) + model.get_updates_for(features)
train_op = optimizer.minimize(loss)
train_op = tf.group([train_op, update_ops])
```
Arguments:
axis: An `int` or list of `int`, the axis or axes that should be
normalized, typically the features axis/axes. For instance, after a
`Conv2D` layer with `data_format="channels_first"`, set `axis=1`. If a
list of axes is provided, each axis in `axis` will be normalized
simultaneously. Default is `-1` which uses the last axis. Note: when
using multi-axis batch norm, the `beta`, `gamma`, `moving_mean`, and
`moving_variance` variables are the same rank as the input Tensor, with
dimension size 1 in all reduced (non-axis) dimensions).
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `None` or `True`, use a faster, fused implementation if possible.
If `False`, use the system recommended implementation.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random.uniform(shape[-1:], 0.93, 1.07),
tf.random.uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
name: A string, the name of the layer.
"""
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
virtual_batch_size=None,
adjustment=None,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
fused=fused,
trainable=trainable,
virtual_batch_size=virtual_batch_size,
adjustment=adjustment,
name=name,
**kwargs)
def call(self, inputs, training=False):
return super(BatchNormalization, self).call(inputs, training=training)
@deprecation.deprecated(
date=None, instructions='Use keras.layers.BatchNormalization instead. In '
'particular, `tf.control_dependencies(tf.GraphKeys.UPDATE_OPS)` should not '
'be used (consult the `tf.keras.layers.batch_normalization` '
'documentation).')
@tf_export(v1=['layers.batch_normalization'])
def batch_normalization(inputs,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
training=False,
trainable=True,
name=None,
reuse=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
virtual_batch_size=None,
adjustment=None):
"""Functional interface for the batch normalization layer.
Reference: http://arxiv.org/abs/1502.03167
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Note: when training, the moving_mean and moving_variance need to be updated.
By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they
need to be executed alongside the `train_op`. Also, be sure to add any
batch_normalization ops before getting the update_ops collection. Otherwise,
update_ops will be empty, and training/inference will not work properly. For
example:
```python
x_norm = tf.compat.v1.layers.batch_normalization(x, training=training)
# ...
update_ops = tf.compat.v1.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = optimizer.minimize(loss)
train_op = tf.group([train_op, update_ops])
```
Arguments:
inputs: Tensor input.
axis: An `int`, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(normalized with statistics of the current batch) or in inference mode
(normalized with moving statistics). **NOTE**: make sure to set this
parameter correctly, or else your training/inference will not work
properly.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `None` or `True`, use a faster, fused implementation if possible.
If `False`, use the system recommended implementation.
virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random.uniform(shape[-1:], 0.93, 1.07),
tf.random.uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = BatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
fused=fused,
trainable=trainable,
virtual_batch_size=virtual_batch_size,
adjustment=adjustment,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs, training=training)
# Aliases
BatchNorm = BatchNormalization
batch_norm = batch_normalization
|
tensorflow-master
|
tensorflow/python/layers/normalization.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.convolutional."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import convolutional as conv_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ConvTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv_layers.conv2d(images, 32, 3, data_format='invalid')
def testInvalidStrides(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv2d(images, 32, 3, strides=(1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv2d(images, 32, 3, strides=None)
def testInvalidKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv2d(images, 32, (1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv2d(images, 32, None)
@test_util.run_deprecated_v1
def testCreateConv2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(32, [3, 3], activation=nn_ops.relu)
output = layer.apply(images)
self.assertEqual(output.op.name, 'conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv2DFloat16(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4), dtype='float16')
output = conv_layers.conv2d(images, 32, [3, 3], activation=nn_ops.relu)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
def testCreateConv2DIntegerKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(32, 3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
@test_util.run_deprecated_v1
def testCreateConv2DChannelsFirst(self):
height, width = 7, 9
images = random_ops.random_uniform((5, 4, height, width))
layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height - 2, width - 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
@test_util.run_deprecated_v1
def testUnknownInputChannels(self):
images = array_ops.placeholder(dtypes.float32, (5, 7, 9, None))
layer = conv_layers.Conv2D(32, [3, 3], activation=nn_ops.relu)
with self.assertRaisesRegexp(ValueError,
'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(images)
images = array_ops.placeholder(dtypes.float32, (5, None, 7, 9))
layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first')
with self.assertRaisesRegexp(ValueError,
'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(images)
def testConv2DPaddingSame(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layer = conv_layers.Conv2D(64, images.get_shape()[1:3], padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 64])
def testCreateConvWithStrides(self):
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layer = conv_layers.Conv2D(32, [3, 3], strides=(2, 2), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test strides integer
layer = conv_layers.Conv2D(32, [3, 3], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test unequal strides
layer = conv_layers.Conv2D(32, [3, 3], strides=(2, 1), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width, 32])
@test_util.run_deprecated_v1
def testCreateConv1D(self):
width = 7
data = random_ops.random_uniform((5, width, 4))
layer = conv_layers.Conv1D(32, 3, activation=nn_ops.relu)
output = layer.apply(data)
self.assertEqual(output.op.name, 'conv1d/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv1DFloat16(self):
width = 7
data = random_ops.random_uniform((5, width, 4), dtype='float16')
output = conv_layers.conv1d(data, 32, 3, activation=nn_ops.relu)
self.assertListEqual(output.get_shape().as_list(), [5, width - 2, 32])
@test_util.run_deprecated_v1
def testCreateConv1DChannelsFirst(self):
width = 7
data = random_ops.random_uniform((5, 4, width))
layer = conv_layers.Conv1D(32, 3, data_format='channels_first')
output = layer.apply(data)
self.assertListEqual(output.get_shape().as_list(), [5, 32, width - 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
@test_util.run_deprecated_v1
def testUnknownInputChannelsConv1D(self):
data = array_ops.placeholder(dtypes.float32, (5, 4, None))
layer = conv_layers.Conv1D(32, 3, activation=nn_ops.relu)
with self.assertRaisesRegexp(ValueError,
'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(data)
data = array_ops.placeholder(dtypes.float32, (5, None, 4))
layer = conv_layers.Conv1D(32, 3, data_format='channels_first')
with self.assertRaisesRegexp(ValueError,
'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(data)
@test_util.run_deprecated_v1
def testCreateConv3D(self):
depth, height, width = 6, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 4))
layer = conv_layers.Conv3D(32, [3, 3, 3], activation=nn_ops.relu)
output = layer.apply(volumes)
self.assertEqual(output.op.name, 'conv3d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, depth - 2, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
@test_util.run_deprecated_v1
def testUnknownInputChannelsConv3D(self):
volumes = array_ops.placeholder(dtypes.float32, (5, 6, 7, 9, None))
layer = conv_layers.Conv3D(32, [3, 3, 3], activation=nn_ops.relu)
with self.assertRaisesRegexp(ValueError,
'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(volumes)
@test_util.run_deprecated_v1
def testConv2DKernelRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2D(32, [3, 3], kernel_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(self.evaluate(layer.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testConv2DBiasRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2D(32, [3, 3], bias_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(self.evaluate(layer.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testConv2DNoBias(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(
32, [3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(images)
self.assertEqual(output.op.name, 'conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertEqual(layer.bias, None)
def testDilatedConv2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(32, [3, 3], dilation_rate=3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 3, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
# Test tuple dilation rate
layer = conv_layers.Conv2D(32, [3, 3], dilation_rate=(1, 3))
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height - 2, 3, 32])
@test_util.run_deprecated_v1
def testFunctionalConv2DReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d(images, 32, [3, 3], name='conv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 2)
@test_util.run_deprecated_v1
def testFunctionalConv2DReuseFromScope(self):
with variable_scope.variable_scope('scope'):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
self.assertEqual(len(variables.trainable_variables()), 2)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
self.assertEqual(len(variables.trainable_variables()), 2)
@test_util.run_deprecated_v1
def testFunctionalConv2DInitializerFromScope(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('kernel' in weights[0].name)
self.assertTrue('bias' in weights[1].name)
self.evaluate(variables.global_variables_initializer())
weights = self.evaluate(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 3, 32)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((32)))
@test_util.run_deprecated_v1
def testFunctionalConv2DNoReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 4)
def testConstraints(self):
# Conv1D
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
conv1d = conv_layers.Conv1D(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 5), seed=1)
conv1d(inputs)
self.assertEqual(conv1d.kernel_constraint, k_constraint)
self.assertEqual(conv1d.bias_constraint, b_constraint)
# Conv2D
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
conv2d = conv_layers.Conv2D(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 5), seed=1)
conv2d(inputs)
self.assertEqual(conv2d.kernel_constraint, k_constraint)
self.assertEqual(conv2d.bias_constraint, b_constraint)
# Conv3D
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
conv3d = conv_layers.Conv3D(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 3, 5), seed=1)
conv3d(inputs)
self.assertEqual(conv3d.kernel_constraint, k_constraint)
self.assertEqual(conv3d.bias_constraint, b_constraint)
@test_util.run_deprecated_v1
def testConv3DChannelsFirst(self):
# Test case for GitHub issue 15655
images = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, 1, 32, 32, 32])
conv_layers.conv3d(images, 32, 9, data_format='channels_first')
class SeparableConv1DTest(test.TestCase):
def testInvalidDataFormat(self):
length = 9
data = random_ops.random_uniform((5, length, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv_layers.separable_conv1d(data, 32, 3, data_format='invalid')
def testInvalidStrides(self):
length = 9
data = random_ops.random_uniform((5, length, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.separable_conv1d(data, 32, 3, strides=(1, 2))
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.separable_conv1d(data, 32, 3, strides=None)
def testInvalidKernelSize(self):
length = 9
data = random_ops.random_uniform((5, length, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.separable_conv1d(data, 32, (1, 2))
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.separable_conv1d(data, 32, None)
@test_util.run_deprecated_v1
def testCreateSeparableConv1D(self):
length = 9
data = random_ops.random_uniform((5, length, 4))
layer = conv_layers.SeparableConv1D(32, 3, activation=nn_ops.relu)
output = layer.apply(data)
self.assertEqual(output.op.name, 'separable_conv1d/Relu')
self.assertEqual(output.get_shape().as_list(), [5, length - 2, 32])
self.assertEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 4, 1])
self.assertEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 4, 32])
self.assertEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv1DDepthMultiplier(self):
length = 9
data = random_ops.random_uniform((5, length, 4))
layer = conv_layers.SeparableConv1D(32, 3, depth_multiplier=2)
output = layer.apply(data)
self.assertEqual(output.get_shape().as_list(), [5, length - 2, 32])
self.assertEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 4, 2])
self.assertEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 8, 32])
self.assertEqual(layer.bias.get_shape().as_list(), [32])
@test_util.run_deprecated_v1
def testCreateSeparableConv1DChannelsFirst(self):
length = 9
data = random_ops.random_uniform((5, 4, length))
layer = conv_layers.SeparableConv1D(32, 3, data_format='channels_first')
output = layer.apply(data)
self.assertEqual(output.get_shape().as_list(), [5, 32, length - 2])
self.assertEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 4, 1])
self.assertEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 4, 32])
self.assertEqual(layer.bias.get_shape().as_list(), [32])
def testSeparableConv1DPaddingSame(self):
length = 9
data = random_ops.random_uniform((5, length, 32), seed=1)
layer = conv_layers.SeparableConv1D(
64, length, padding='same')
output = layer.apply(data)
self.assertEqual(output.get_shape().as_list(), [5, length, 64])
def testCreateSeparableConv1DWithStrides(self):
length = 10
data = random_ops.random_uniform((5, length, 3), seed=1)
layer = conv_layers.SeparableConv1D(32, 3, strides=2, padding='same')
output = layer.apply(data)
self.assertEqual(output.get_shape().as_list(), [5, length // 2, 32])
@test_util.run_deprecated_v1
def testCreateSeparableConv1DWithStridesChannelsFirst(self):
data_format = 'channels_first'
length = 10
data = random_ops.random_uniform((5, 3, length), seed=1)
layer = conv_layers.SeparableConv1D(
32, 3, strides=2, padding='same', data_format=data_format)
output = layer.apply(data)
self.assertEqual(output.get_shape().as_list(), [5, 32, length // 2])
@test_util.run_deprecated_v1
def testFunctionalConv1DReuse(self):
length = 10
data = random_ops.random_uniform((5, length, 3), seed=1)
conv_layers.separable_conv1d(data, 32, 3, name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
conv_layers.separable_conv1d(data, 32, 3, name='sepconv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 3)
@test_util.run_deprecated_v1
def testFunctionalConv1DReuseFromScope(self):
with variable_scope.variable_scope('scope'):
length = 10
data = random_ops.random_uniform((5, length, 3), seed=1)
conv_layers.separable_conv1d(data, 32, 3, name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.separable_conv1d(data, 32, 3, name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
@test_util.run_deprecated_v1
def testFunctionalConv1DNoReuse(self):
length = 10
data = random_ops.random_uniform((5, length, 3), seed=1)
conv_layers.separable_conv1d(data, 32, 3)
self.assertEqual(len(variables.trainable_variables()), 3)
conv_layers.separable_conv1d(data, 32, 3)
self.assertEqual(len(variables.trainable_variables()), 6)
@test_util.run_deprecated_v1
def testSeparableConv1DDepthwiseRegularizer(self):
length = 9
data = random_ops.random_uniform((5, length, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv1D(32, 3, depthwise_regularizer=reg)
layer.apply(data)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(self.evaluate(layer.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testSeparableConv1DPointwiseRegularizer(self):
length = 9
data = random_ops.random_uniform((5, length, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv1D(32, 3, pointwise_regularizer=reg)
layer.apply(data)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(self.evaluate(layer.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testSeparableConv1DBiasRegularizer(self):
length = 9
data = random_ops.random_uniform((5, length, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv1D(32, 3, bias_regularizer=reg)
layer.apply(data)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(self.evaluate(layer.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testSeparableConv1DNoBias(self):
length = 9
data = random_ops.random_uniform((5, length, 4))
layer = conv_layers.SeparableConv1D(
32, 3, activation=nn_ops.relu, use_bias=False)
output = layer.apply(data)
self.assertEqual(output.op.name, 'separable_conv1d/Relu')
self.assertEqual(layer.bias, None)
def testConstraints(self):
d_constraint = lambda x: x / math_ops.reduce_sum(x)
p_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
layer = conv_layers.SeparableConv1D(2, 3,
depthwise_constraint=d_constraint,
pointwise_constraint=p_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.depthwise_constraint, d_constraint)
self.assertEqual(layer.pointwise_constraint, p_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
class SeparableConv2DTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv_layers.separable_conv2d(images, 32, 3, data_format='invalid')
def testInvalidStrides(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.separable_conv2d(images, 32, 3, strides=(1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.separable_conv2d(images, 32, 3, strides=None)
def testInvalidKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.separable_conv2d(images, 32, (1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.separable_conv2d(images, 32, None)
@test_util.run_deprecated_v1
def testCreateSeparableConv2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(32, [3, 3], activation=nn_ops.relu)
output = layer.apply(images)
self.assertEqual(output.op.name, 'separable_conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv2DDepthMultiplier(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(32, [3, 3], depth_multiplier=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 2])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 8, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv2DIntegerKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(32, 3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
@test_util.run_deprecated_v1
def testCreateSeparableConv2DChannelsFirst(self):
height, width = 7, 9
images = random_ops.random_uniform((5, 4, height, width))
layer = conv_layers.SeparableConv2D(
32, [3, 3], data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height - 2, width - 2])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testSeparableConv2DPaddingSame(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layer = conv_layers.SeparableConv2D(
64, images.get_shape()[1:3], padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 64])
@test_util.run_deprecated_v1
def testCreateSeparableConvWithStrides(self):
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 2), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test strides integer
layer = conv_layers.SeparableConv2D(32, [3, 3], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test unequal strides
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 1), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width, 32])
@test_util.run_deprecated_v1
def testCreateSeparableConvWithStridesChannelsFirst(self):
data_format = 'channels_first'
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, 3, height, width), seed=1)
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 2), padding='same', data_format=data_format)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height / 2, width / 2])
# Test strides integer
layer = conv_layers.SeparableConv2D(32, [3, 3], strides=2, padding='same',
data_format=data_format)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height / 2, width / 2])
# Test unequal strides
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 1), padding='same', data_format=data_format)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height / 2, width])
@test_util.run_deprecated_v1
def testFunctionalConv2DReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
conv_layers.separable_conv2d(
images, 32, [3, 3], name='sepconv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 3)
@test_util.run_deprecated_v1
def testFunctionalConv2DReuseFromScope(self):
with variable_scope.variable_scope('scope'):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
@test_util.run_deprecated_v1
def testFunctionalConv2DInitializerFromScope(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('depthwise_kernel' in weights[0].name)
self.assertTrue('pointwise_kernel' in weights[1].name)
self.assertTrue('bias' in weights[2].name)
self.evaluate(variables.global_variables_initializer())
weights = self.evaluate(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 3, 1)))
self.assertAllClose(weights[1], np.ones((1, 1, 3, 32)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[2], np.zeros((32)))
@test_util.run_deprecated_v1
def testFunctionalConv2DNoReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 3)
conv_layers.separable_conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 6)
@test_util.run_deprecated_v1
def testSeparableConv2DDepthwiseRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv2D(32, [3, 3], depthwise_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(self.evaluate(layer.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testSeparableConv2DPointwiseRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv2D(32, [3, 3], pointwise_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(self.evaluate(layer.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testSeparableConv2DBiasRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv2D(32, [3, 3], bias_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(self.evaluate(layer.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testSeparableConv2DNoBias(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(
32, [3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(images)
self.assertEqual(output.op.name, 'separable_conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertEqual(layer.bias, None)
def testConstraints(self):
d_constraint = lambda x: x / math_ops.reduce_sum(x)
p_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
layer = conv_layers.SeparableConv2D(2, 3,
depthwise_constraint=d_constraint,
pointwise_constraint=p_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.depthwise_constraint, d_constraint)
self.assertEqual(layer.pointwise_constraint, p_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
class Conv2DTransposeTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv_layers.conv2d_transpose(images, 32, 3, data_format='invalid')
def testInvalidStrides(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv2d_transpose(images, 32, 3, strides=(1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv2d_transpose(images, 32, 3, strides=None)
def testInvalidKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv2d_transpose(images, 32, (1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv2d_transpose(images, 32, None)
@test_util.run_deprecated_v1
def testCreateConv2DTranspose(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2DTranspose(32, [3, 3], activation=nn_ops.relu)
output = layer.apply(images)
self.assertEqual(output.op.name, 'conv2d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv2DTransposeFloat16(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4), dtype='float16')
output = conv_layers.conv2d_transpose(images, 32, [3, 3],
activation=nn_ops.relu)
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
def testCreateConv2DTransposeIntegerKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2DTranspose(32, 3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateConv2DTransposeChannelsFirst(self):
height, width = 7, 9
images = random_ops.random_uniform((5, 4, height, width))
layer = conv_layers.Conv2DTranspose(
32, [3, 3], data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height + 2, width + 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv2DTransposePaddingSame(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layer = conv_layers.Conv2DTranspose(
64, images.get_shape()[1:3], padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 64])
def testCreateConv2DTransposeWithStrides(self):
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layer = conv_layers.Conv2DTranspose(
32, [3, 3], strides=(2, 2), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height * 2, width * 2, 32])
# Test strides integer
layer = conv_layers.Conv2DTranspose(32, [3, 3], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height * 2, width * 2, 32])
# Test unequal strides
layer = conv_layers.Conv2DTranspose(
32, [3, 3], strides=(2, 1), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height * 2, width, 32])
@test_util.run_deprecated_v1
def testConv2DTransposeKernelRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2DTranspose(32, [3, 3], kernel_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(self.evaluate(layer.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testConv2DTransposeBiasRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2DTranspose(32, [3, 3], bias_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(self.evaluate(layer.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testConv2DTransposeNoBias(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2DTranspose(
32, [3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(images)
self.assertEqual(output.op.name, 'conv2d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertEqual(layer.bias, None)
@test_util.run_deprecated_v1
def testFunctionalConv2DTransposeReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 2)
@test_util.run_deprecated_v1
def testFunctionalConv2DTransposeReuseFromScope(self):
with variable_scope.variable_scope('scope'):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
@test_util.run_deprecated_v1
def testFunctionalConv2DTransposeInitializerFromScope(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('kernel' in weights[0].name)
self.assertTrue('bias' in weights[1].name)
self.evaluate(variables.global_variables_initializer())
weights = self.evaluate(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 32, 3)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((32)))
@test_util.run_deprecated_v1
def testFunctionalConv2DTransposeNoReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d_transpose(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 4)
def testConstraints(self):
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
layer = conv_layers.Conv2DTranspose(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.kernel_constraint, k_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
class Conv3DTransposeTest(test.TestCase):
def testInvalidDataFormat(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv_layers.conv3d_transpose(volumes, 4, 3, data_format='invalid')
def testInvalidStrides(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv3d_transpose(volumes, 4, 3, strides=(1, 2))
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv3d_transpose(volumes, 4, 3, strides=None)
def testInvalidKernelSize(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv3d_transpose(volumes, 4, (1, 2))
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv3d_transpose(volumes, 4, None)
@test_util.run_deprecated_v1
def testCreateConv3DTranspose(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], activation=nn_ops.relu)
output = layer.apply(volumes)
self.assertEqual(output.op.name, 'conv3d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, depth + 2, height + 2, width + 2, 4])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [4])
def testCreateConv3DTransposeIntegerKernelSize(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
layer = conv_layers.Conv3DTranspose(4, 3)
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth + 2, height + 2, width + 2, 4])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [4])
@test_util.run_deprecated_v1
def testCreateConv3DTransposeChannelsFirst(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, 32, depth, height, width))
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], data_format='channels_first')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, 4, depth + 2, height + 2, width + 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [4])
def testConv3DTransposePaddingSame(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 64), seed=1)
layer = conv_layers.Conv3DTranspose(
32, volumes.get_shape()[1:4], padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth, height, width, 32])
def testCreateConv3DTransposeWithStrides(self):
depth, height, width = 4, 6, 8
# Test strides tuple.
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], strides=(2, 2, 2), padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth * 2, height * 2, width * 2, 4])
# Test strides integer.
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], strides=2, padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth * 2, height * 2, width * 2, 4])
# Test unequal strides.
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], strides=(2, 1, 1), padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth * 2, height, width, 4])
@test_util.run_deprecated_v1
def testConv3DTransposeKernelRegularizer(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], kernel_regularizer=reg)
layer.apply(volumes)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(self.evaluate(layer.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testConv3DTransposeBiasRegularizer(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], bias_regularizer=reg)
layer.apply(volumes)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(self.evaluate(layer.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testConv3DTransposeNoBias(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(volumes)
self.assertEqual(output.op.name, 'conv3d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, depth + 2, height + 2, width + 2, 4])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertEqual(layer.bias, None)
@test_util.run_deprecated_v1
def testFunctionalConv3DTransposeReuse(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv3d_transpose(
volumes, 4, [3, 3, 3], name='deconv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 2)
@test_util.run_deprecated_v1
def testFunctionalConv3DTransposeReuseFromScope(self):
with variable_scope.variable_scope('scope'):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
@test_util.run_deprecated_v1
def testFunctionalConv3DTransposeInitializerFromScope(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform(
(5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('kernel' in weights[0].name)
self.assertTrue('bias' in weights[1].name)
self.evaluate(variables.global_variables_initializer())
weights = self.evaluate(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 3, 4, 32)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((4)))
@test_util.run_deprecated_v1
def testFunctionalConv3DTransposeNoReuse(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3])
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3])
self.assertEqual(len(variables.trainable_variables()), 4)
def testConstraints(self):
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
layer = conv_layers.Conv3DTranspose(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.kernel_constraint, k_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/layers/convolutional_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains the base Layer class, from which all layers inherit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# Avoid breaking users who directly import this symbol from this file.
# TODO(fchollet): remove this.
InputSpec = base_layer.InputSpec # pylint: disable=invalid-name
_KERAS_STYLE_SCOPE = False
@tf_export(v1=['layers.experimental.keras_style_scope'])
@tf_contextlib.contextmanager
def keras_style_scope():
"""Use Keras-style variable management.
All tf.layers and tf RNN cells created in this scope use Keras-style
variable management. Creating such layers with a scope= argument is
disallowed, and reuse=True is disallowed.
The purpose of this scope is to allow users of existing layers to
slowly transition to a Keras layers API without breaking existing
functionality.
One example of this is when using TensorFlow's RNN classes with Keras
Models or Networks. Because Keras models do not properly set variable
scopes, users of RNNs may either accidentally share scopes between two
different models, or get errors about variables that already exist.
Example:
```python
class RNNModel(tf.keras.Model):
def __init__(self, name):
super(RNNModel, self).__init__(name=name)
self.rnn = tf.compat.v1.nn.rnn_cell.MultiRNNCell(
[tf.compat.v1.nn.rnn_cell.LSTMCell(64) for _ in range(2)])
def call(self, input, state):
return self.rnn(input, state)
model_1 = RNNModel("model_1")
model_2 = RNNModel("model_2")
# OK
output_1, next_state_1 = model_1(input, state)
# Raises an error about trying to create an already existing variable.
output_2, next_state_2 = model_2(input, state)
```
The solution is to wrap the model construction and execution in a keras-style
scope:
```python
with keras_style_scope():
model_1 = RNNModel("model_1")
model_2 = RNNModel("model_2")
# model_1 and model_2 are guaranteed to create their own variables.
output_1, next_state_1 = model_1(input, state)
output_2, next_state_2 = model_2(input, state)
assert len(model_1.weights) > 0
assert len(model_2.weights) > 0
assert(model_1.weights != model_2.weights)
```
Yields:
A keras layer style scope.
"""
global _KERAS_STYLE_SCOPE
stack = _KERAS_STYLE_SCOPE
_KERAS_STYLE_SCOPE = True
try:
yield
finally:
_KERAS_STYLE_SCOPE = stack
@tf_export(v1=['layers.experimental.set_keras_style'])
def set_keras_style():
"""Use Keras-style variable management.
All tf.layers and tf RNN cells created after keras style ha been enabled
use Keras-style variable management. Creating such layers with a
scope= argument is disallowed, and reuse=True is disallowed.
The purpose of this function is to allow users of existing layers to
slowly transition to Keras layers API without breaking existing
functionality.
For more details, see the documentation for `keras_style_scope`.
Note, once keras style has been set, it is set globally for the entire
program and cannot be unset.
Example:
```python
set_keras_style()
model_1 = RNNModel(name="model_1")
model_2 = RNNModel(name="model_2")
# model_1 and model_2 are guaranteed to create their own variables.
output_1, next_state_1 = model_1(input, state)
output_2, next_state_2 = model_2(input, state)
assert len(model_1.weights) > 0
assert len(model_2.weights) > 0
assert(model_1.weights != model_2.weights)
```
"""
global _KERAS_STYLE_SCOPE
_KERAS_STYLE_SCOPE = True
def _is_in_keras_style_scope():
global _KERAS_STYLE_SCOPE
return _KERAS_STYLE_SCOPE
@tf_export(v1=['layers.Layer'])
class Layer(base_layer.Layer):
"""Base layer class.
It is considered legacy, and we recommend the use of `tf.keras.layers.Layer`
instead.
Arguments:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
Read-only properties:
name: The name of the layer (string).
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
trainable_variables: List of trainable variables.
non_trainable_variables: List of non-trainable variables.
variables: List of all variables of this layer, trainable and
non-trainable.
updates: List of update ops of this layer.
losses: List of losses added by this layer.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
Mutable properties:
trainable: Whether the layer should be trained (boolean).
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
"""
def __init__(self, trainable=True, name=None, dtype=None,
**kwargs):
# For backwards compatibility, legacy layers do not use `ResourceVariable`
# by default.
self._use_resource_variables = False
scope = kwargs.pop('_scope', None)
self._reuse = kwargs.pop('_reuse', None)
# Avoid an incorrect lint error
self._trainable_weights = []
self.built = False
super(Layer, self).__init__(trainable=trainable, name=name, dtype=dtype,
**kwargs)
if _is_in_keras_style_scope():
if scope is not None:
raise ValueError(
'scope argument not allowed when keras style layers are enabled, '
'but saw: {}'.format(scope))
if self._reuse is not None:
raise ValueError(
'reuse argument not allowed when keras style layers are enabled, '
'but saw: {}'.format(self._reuse))
self._keras_style = True
else:
self._keras_style = False
self._call_has_scope_arg = 'scope' in self._call_fn_args
if scope:
with vs.variable_scope(scope) as captured_scope:
self._scope = captured_scope
else:
self._scope = None
self._current_scope = None
# We no longer track graph in tf.layers layers. This property is only kept to
# maintain API backward compatibility.
@property
@deprecation.deprecated(
date=None,
instructions='Stop using this property because tf.layers layers no '
'longer track their graph.')
def graph(self):
if context.executing_eagerly():
raise RuntimeError('Layer.graph not supported when executing eagerly.')
return None
def _init_set_name(self, name):
# Determine layer name (non-unique).
if isinstance(name, vs.VariableScope):
base_name = name.name
self._name, _ = self._make_unique_name()
else:
base_name = name
self._name = name
if not name:
self._name, base_name = self._make_unique_name()
self._base_name = base_name
def _make_unique_name(self, name_uid_map=None, avoid_names=None,
namespace='', zero_based=False):
base_name = base_layer.to_snake_case(self.__class__.__name__)
name = backend.unique_object_name(
base_name,
name_uid_map=name_uid_map,
avoid_names=avoid_names,
namespace=namespace,
zero_based=zero_based)
return (name, base_name)
@property
def scope_name(self):
if not self._scope:
raise ValueError('No name available for layer scope because the layer "' +
self._name + '" has not been used yet. The scope name ' +
' is determined the first time the layer instance is ' +
'called. You must therefore call the layer before ' +
'querying `scope_name`.')
return self._scope.name
def add_loss(self, losses, inputs=None):
previous_losses_length = len(self._losses)
previous_callable_losses_length = len(self._callable_losses)
super(Layer, self).add_loss(losses, inputs=inputs)
if not context.executing_eagerly():
# TODO(fchollet): deprecate collection below.
new_losses = self._losses[previous_losses_length:]
new_callable_losses = self._callable_losses[
previous_callable_losses_length:]
for regularizer in new_callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
new_losses.append(loss_tensor)
_add_elements_to_collection(
new_losses,
ops.GraphKeys.REGULARIZATION_LOSSES)
def _name_scope(self):
"""Determines op naming for the Layer."""
if self._keras_style:
return super(Layer, self)._name_scope()
return self._current_scope.original_name_scope
def _set_scope(self, scope=None):
if self._scope is None:
# If constructed with _scope=None, lazy setting of scope.
if self._reuse:
with vs.variable_scope(
scope if scope is not None else self._base_name) as captured_scope:
self._scope = captured_scope
else:
with vs.variable_scope(
scope, default_name=self._base_name) as captured_scope:
self._scope = captured_scope
def add_weight(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
use_resource=None,
synchronization=vs.VariableSynchronization.AUTO,
aggregation=vs.VariableAggregation.NONE,
partitioner=None,
**kwargs):
"""Adds a new variable to the layer, or gets an existing one; returns it.
Arguments:
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: initializer instance (callable).
regularizer: regularizer instance (callable).
trainable: whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
Note, if the current variable scope is marked as non-trainable
then this parameter is ignored and any added variables are also
marked as non-trainable. `trainable` defaults to `True` unless
`synchronization` is set to `ON_READ`.
constraint: constraint instance (callable).
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
partitioner: (optional) partitioner instance (callable). If
provided, when the requested variable is created it will be split
into multiple partitions according to `partitioner`. In this case,
an instance of `PartitionedVariable` is returned. Available
partitioners include `tf.compat.v1.fixed_size_partitioner` and
`tf.compat.v1.variable_axis_size_partitioner`. For more details, see
the documentation of `tf.compat.v1.get_variable` and the "Variable
Partitioners and Sharding" section of the API guide.
**kwargs: Additional keyword arguments.
Returns:
The created variable. Usually either a `Variable` or `ResourceVariable`
instance. If `partitioner` is not `None`, a `PartitionedVariable`
instance is returned.
Raises:
RuntimeError: If called with partitioned variable regularization and
eager execution is enabled.
ValueError: When trainable has been set to True with synchronization
set as `ON_READ`.
"""
for kwarg in kwargs:
if kwarg != 'experimental_autocast':
raise TypeError('Unknown keyword argument:', kwarg)
if self._keras_style:
return super(Layer, self).add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable and self.trainable,
constraint=constraint,
use_resource=use_resource,
synchronization=vs.VariableSynchronization.AUTO,
aggregation=vs.VariableAggregation.NONE,
partitioner=partitioner,
**kwargs)
if synchronization == vs.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.')
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
def _should_add_regularizer(variable, existing_variable_set):
if isinstance(variable, tf_variables.PartitionedVariable):
for var in variable:
if var in existing_variable_set:
return False
return True
else:
return variable not in existing_variable_set
init_graph = None
if not context.executing_eagerly():
default_graph = ops.get_default_graph()
if default_graph.building_function:
with ops.init_scope():
# Retrieve the variables from the graph into which variables
# will be lifted; if initialization ops will be lifted into
# the eager context, then there is nothing to retrieve, since variable
# collections are not supported when eager execution is enabled.
if not context.executing_eagerly():
init_graph = ops.get_default_graph()
existing_variables = set(tf_variables.global_variables())
else:
# Initialization ops will not be lifted out of the default graph.
init_graph = default_graph
existing_variables = set(tf_variables.global_variables())
if dtype is None:
dtype = self.dtype or dtypes.float32
self._set_scope(None)
reuse = self.built or self._reuse
prev_len_trainable = len(self._trainable_weights)
with vs.variable_scope(
self._scope, reuse=reuse, auxiliary_name_scope=False) as scope:
self._current_scope = scope
with ops.name_scope(self._name_scope()):
use_resource = (use_resource or
self._use_resource_variables or
scope.use_resource)
if initializer is None:
initializer = scope.initializer
variable = super(Layer, self).add_weight(
name,
shape,
dtype=dtypes.as_dtype(dtype),
initializer=initializer,
trainable=trainable and self.trainable,
constraint=constraint,
partitioner=partitioner,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
getter=vs.get_variable,
**kwargs)
if regularizer:
if (ops.executing_eagerly_outside_functions()
or _should_add_regularizer(variable, existing_variables)):
self._handle_weight_regularization(name, variable, regularizer)
if init_graph is not None:
# Handle edge case where a custom getter has overridden `trainable`.
# There is one known occurrence of this, in unit test
# testBasicRNNCellNotTrainable in
# contrib.rnn.python.kernel_tests.core_rnn_cell_test
with init_graph.as_default():
trainable_variables = tf_variables.trainable_variables()
if (trainable and self.trainable and
variable not in trainable_variables):
# A custom getter / variable scope overrode the trainable flag.
extra_trainable_vars = self._trainable_weights[prev_len_trainable:]
self._trainable_weights = self._trainable_weights[
:prev_len_trainable]
self._non_trainable_weights += extra_trainable_vars
return variable
def __call__(self, inputs, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Arguments:
inputs: input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
**Note**: kwarg `scope` is reserved for use by the layer.
Returns:
Output tensor(s).
Note:
- If the layer's `call` method takes a `scope` keyword argument,
this argument will be automatically set to the current variable scope.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
"""
scope = kwargs.pop('scope', None)
if self._keras_style:
if scope is not None:
raise ValueError(
'scope argument not allowed when keras style layers are enabled, '
'but saw: {}'.format(scope))
return super(Layer, self).__call__(inputs, *args, **kwargs)
self._set_scope(scope)
if self.built:
try:
# Some classes which inherit from Layer do not use its constructor, so
# rather than initializing to None we check for an AttributeError.
scope_context_manager = self._always_reuse_variable_scope
except AttributeError:
# From this point we will always set reuse=True, so create a "final"
# variable scope with this setting. We avoid re-creating variable scopes
# after this point as an optimization.
self._always_reuse_variable_scope = vs.variable_scope(
self._scope, reuse=True, auxiliary_name_scope=False)
scope_context_manager = self._always_reuse_variable_scope
else:
scope_context_manager = vs.variable_scope(
self._scope, reuse=self._reuse, auxiliary_name_scope=False)
with scope_context_manager as scope:
self._current_scope = scope
try:
call_has_scope_arg = self._call_has_scope_arg
except AttributeError:
self._call_fn_args = function_utils.fn_args(self.call)
self._call_has_scope_arg = 'scope' in self._call_fn_args
call_has_scope_arg = self._call_has_scope_arg
if call_has_scope_arg:
kwargs['scope'] = scope
# Actually call layer
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
if not context.executing_eagerly():
# Update global default collections.
_add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
return outputs
def __deepcopy__(self, memo):
no_copy = set(['_graph'])
shallow_copy = set(['_scope', '_always_reuse_variable_scope'])
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k in no_copy:
setattr(result, k, v)
elif k in shallow_copy:
setattr(result, k, copy.copy(v))
elif base_layer.is_tensor_or_tensor_list(v):
setattr(result, k, v)
else:
setattr(result, k, copy.deepcopy(v, memo))
return result
def __setattr__(self, value, name):
# By-pass the automatic dependency tracking performed by the parent Layer.
super(trackable.Trackable, self).__setattr__(value, name)
@property
def _is_legacy_layer(self):
"""Used by keras to check compatibility. This should not be overridden."""
return True
def _add_elements_to_collection(elements, collection_list):
if context.executing_eagerly():
raise RuntimeError('Using collections from Layers not supported in Eager '
'mode. Tried to add %s to %s' % (elements,
collection_list))
elements = nest.flatten(elements)
collection_list = nest.flatten(collection_list)
for name in collection_list:
collection = ops.get_collection_ref(name)
collection_set = set(collection)
for element in elements:
if element not in collection_set:
collection.append(element)
|
tensorflow-master
|
tensorflow/python/layers/base.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for writing decorators (which modify docstrings)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
def get_qualified_name(function):
# Python 3
if hasattr(function, '__qualname__'):
return function.__qualname__
# Python 2
if hasattr(function, 'im_class'):
return function.im_class.__name__ + '.' + function.__name__
return function.__name__
def _normalize_docstring(docstring):
"""Normalizes the docstring.
Replaces tabs with spaces, removes leading and trailing blanks lines, and
removes any indentation.
Copied from PEP-257:
https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
Args:
docstring: the docstring to normalize
Returns:
The normalized docstring
"""
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
# (we use sys.maxsize because sys.maxint doesn't exist in Python 3)
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def add_notice_to_docstring(
doc, instructions, no_doc_str, suffix_str, notice):
"""Adds a deprecation notice to a docstring.
Args:
doc: The original docstring.
instructions: A string, describing how to fix the problem.
no_doc_str: The default value to use for `doc` if `doc` is empty.
suffix_str: Is added to the end of the first line.
notice: A list of strings. The main notice warning body.
Returns:
A new docstring, with the notice attached.
Raises:
ValueError: If `notice` is empty.
"""
if not doc:
lines = [no_doc_str]
else:
lines = _normalize_docstring(doc).splitlines()
lines[0] += ' ' + suffix_str
if not notice:
raise ValueError('The `notice` arg must not be empty.')
notice[0] = 'Warning: ' + notice[0]
notice = [''] + notice + ([instructions] if instructions else [])
if len(lines) > 1:
# Make sure that we keep our distance from the main body
if lines[1].strip():
notice.append('')
lines[1:1] = notice
else:
lines += notice
return '\n'.join(lines)
def validate_callable(func, decorator_name):
if not hasattr(func, '__call__'):
raise ValueError(
'%s is not a function. If this is a property, make sure'
' @property appears before @%s in your source code:'
'\n\n@property\n@%s\ndef method(...)' % (
func, decorator_name, decorator_name))
class classproperty(object): # pylint: disable=invalid-name
"""Class property decorator.
Example usage:
class MyClass(object):
@classproperty
def value(cls):
return '123'
> print MyClass.value
123
"""
def __init__(self, func):
self._func = func
def __get__(self, owner_self, owner_cls):
return self._func(owner_cls)
|
tensorflow-master
|
tensorflow/python/util/decorator_utils.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ExampleParserConfiguration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.core.example import example_parser_configuration_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.util.example_parser_configuration import extract_example_parser_configuration
BASIC_PROTO = """
feature_map {
key: "x"
value {
fixed_len_feature {
dtype: DT_FLOAT
shape {
dim {
size: 1
}
}
default_value {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 1
}
}
float_val: 33.0
}
values_output_tensor_name: "ParseExample/ParseExample:3"
}
}
}
feature_map {
key: "y"
value {
var_len_feature {
dtype: DT_STRING
values_output_tensor_name: "ParseExample/ParseExample:1"
indices_output_tensor_name: "ParseExample/ParseExample:0"
shapes_output_tensor_name: "ParseExample/ParseExample:2"
}
}
}
"""
class ExampleParserConfigurationTest(test.TestCase):
def testBasic(self):
golden_config = example_parser_configuration_pb2.ExampleParserConfiguration(
)
text_format.Parse(BASIC_PROTO, golden_config)
with session.Session() as sess:
examples = array_ops.placeholder(dtypes.string, shape=[1])
feature_to_type = {
'x': parsing_ops.FixedLenFeature([1], dtypes.float32, 33.0),
'y': parsing_ops.VarLenFeature(dtypes.string)
}
_ = parsing_ops.parse_example(examples, feature_to_type)
parse_example_op = sess.graph.get_operation_by_name(
'ParseExample/ParseExample')
config = extract_example_parser_configuration(parse_example_op, sess)
self.assertProtoEquals(golden_config, config)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/util/example_parser_configuration_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extract parse_example op configuration to a proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.example import example_parser_configuration_pb2
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
def extract_example_parser_configuration(parse_example_op, sess):
"""Returns an ExampleParserConfig proto.
Args:
parse_example_op: A ParseExample `Operation`
sess: A tf.compat.v1.Session needed to obtain some configuration values.
Returns:
A ExampleParserConfig proto.
Raises:
ValueError: If attributes are inconsistent.
"""
config = example_parser_configuration_pb2.ExampleParserConfiguration()
num_sparse = parse_example_op.get_attr("Nsparse")
num_dense = parse_example_op.get_attr("Ndense")
total_features = num_dense + num_sparse
sparse_types = parse_example_op.get_attr("sparse_types")
dense_types = parse_example_op.get_attr("Tdense")
dense_shapes = parse_example_op.get_attr("dense_shapes")
if len(sparse_types) != num_sparse:
raise ValueError("len(sparse_types) attribute does not match "
"Nsparse attribute (%d vs %d)" %
(len(sparse_types), num_sparse))
if len(dense_types) != num_dense:
raise ValueError("len(dense_types) attribute does not match "
"Ndense attribute (%d vs %d)" %
(len(dense_types), num_dense))
if len(dense_shapes) != num_dense:
raise ValueError("len(dense_shapes) attribute does not match "
"Ndense attribute (%d vs %d)" %
(len(dense_shapes), num_dense))
# Skip over the serialized input, and the names input.
fetch_list = parse_example_op.inputs[2:]
# Fetch total_features key names and num_dense default values.
if len(fetch_list) != (total_features + num_dense):
raise ValueError("len(fetch_list) does not match total features + "
"num_dense (%d vs %d)" %
(len(fetch_list), (total_features + num_dense)))
fetched = sess.run(fetch_list)
if len(fetched) != len(fetch_list):
raise ValueError("len(fetched) does not match len(fetch_list) "
"(%d vs %d)" % (len(fetched), len(fetch_list)))
# Fetch indices.
sparse_keys_start = 0
dense_keys_start = sparse_keys_start + num_sparse
dense_def_start = dense_keys_start + num_dense
# Output tensor indices.
sparse_indices_start = 0
sparse_values_start = num_sparse
sparse_shapes_start = sparse_values_start + num_sparse
dense_values_start = sparse_shapes_start + num_sparse
# Dense features.
for i in range(num_dense):
key = fetched[dense_keys_start + i]
feature_config = config.feature_map[key]
# Convert the default value numpy array fetched from the session run
# into a TensorProto.
fixed_config = feature_config.fixed_len_feature
fixed_config.default_value.CopyFrom(
tensor_util.make_tensor_proto(fetched[dense_def_start + i]))
# Convert the shape from the attributes
# into a TensorShapeProto.
fixed_config.shape.CopyFrom(
tensor_shape.TensorShape(dense_shapes[i]).as_proto())
fixed_config.dtype = dense_types[i].as_datatype_enum
# Get the output tensor name.
fixed_config.values_output_tensor_name = parse_example_op.outputs[
dense_values_start + i].name
# Sparse features.
for i in range(num_sparse):
key = fetched[sparse_keys_start + i]
feature_config = config.feature_map[key]
var_len_feature = feature_config.var_len_feature
var_len_feature.dtype = sparse_types[i].as_datatype_enum
var_len_feature.indices_output_tensor_name = parse_example_op.outputs[
sparse_indices_start + i].name
var_len_feature.values_output_tensor_name = parse_example_op.outputs[
sparse_values_start + i].name
var_len_feature.shapes_output_tensor_name = parse_example_op.outputs[
sparse_shapes_start + i].name
return config
|
tensorflow-master
|
tensorflow/python/util/example_parser_configuration.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tf_contextlib."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
@tf_contextlib.contextmanager
def test_yield_append_before_and_after_yield(x, before, after):
x.append(before)
yield
x.append(after)
@tf_contextlib.contextmanager
def test_yield_return_x_plus_1(x):
yield x + 1
@tf_contextlib.contextmanager
def test_params_and_defaults(a, b=2, c=True, d='hello'):
return [a, b, c, d]
class TfContextlibTest(test.TestCase):
def testRunsCodeBeforeYield(self):
x = []
with test_yield_append_before_and_after_yield(x, 'before', ''):
self.assertEqual('before', x[-1])
def testRunsCodeAfterYield(self):
x = []
with test_yield_append_before_and_after_yield(x, '', 'after'):
pass
self.assertEqual('after', x[-1])
def testNestedWith(self):
x = []
with test_yield_append_before_and_after_yield(x, 'before', 'after'):
with test_yield_append_before_and_after_yield(x, 'inner', 'outer'):
with test_yield_return_x_plus_1(1) as var:
x.append(var)
self.assertEqual(['before', 'inner', 2, 'outer', 'after'], x)
def testMultipleCallsOfSeparateInstances(self):
x = []
with test_yield_append_before_and_after_yield(x, 1, 2):
pass
with test_yield_append_before_and_after_yield(x, 3, 4):
pass
self.assertEqual([1, 2, 3, 4], x)
def testReturnsResultFromYield(self):
with test_yield_return_x_plus_1(3) as result:
self.assertEqual(4, result)
def testUnwrapContextManager(self):
decorators, target = tf_decorator.unwrap(test_params_and_defaults)
self.assertEqual(1, len(decorators))
self.assertTrue(isinstance(decorators[0], tf_decorator.TFDecorator))
self.assertEqual('contextmanager', decorators[0].decorator_name)
self.assertFalse(isinstance(target, tf_decorator.TFDecorator))
def testGetArgSpecReturnsWrappedArgSpec(self):
argspec = tf_inspect.getargspec(test_params_and_defaults)
self.assertEqual(['a', 'b', 'c', 'd'], argspec.args)
self.assertEqual((2, True, 'hello'), argspec.defaults)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/util/tf_contextlib_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
This module can perform operations on nested structures. A nested structure is a
Python sequence, tuple (including `namedtuple`), or dict that can contain
further sequences, tuples, and dicts.
attr.s decorated classes (http://www.attrs.org) are also supported, in the
same way as `namedtuple`.
The utilities here assume (and do not check) that the nested structures form a
'tree', i.e., no references in the structure of the input of these functions
should be recursive.
Example structures: `((3, 4), 5, (6, 7, (9, 10), 8))`, `(np.array(0),
(np.array([3, 4]), tf.constant([3, 4])))`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.util.tf_export import tf_export
_SHALLOW_TREE_HAS_INVALID_KEYS = (
"The shallow_tree's keys are not a subset of the input_tree's keys. The "
"shallow_tree has the following keys that are not in the input_tree: {}.")
_STRUCTURES_HAVE_MISMATCHING_TYPES = (
"The two structures don't have the same sequence type. Input structure has "
"type {input_type}, while shallow structure has type {shallow_type}.")
_STRUCTURES_HAVE_MISMATCHING_LENGTHS = (
"The two structures don't have the same sequence length. Input "
"structure has length {input_length}, while shallow structure has length "
"{shallow_length}."
)
_INPUT_TREE_SMALLER_THAN_SHALLOW_TREE = (
"The input_tree has fewer elements than the shallow_tree. Input structure "
"has length {input_size}, while shallow structure has length "
"{shallow_size}.")
_IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ = (
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: {}.")
def _get_attrs_items(obj):
"""Returns a list of (name, value) pairs from an attrs instance.
The list will be sorted by name.
Args:
obj: an object.
Returns:
A list of (attr_name, attr_value) pairs, sorted by attr_name.
"""
attrs = getattr(obj.__class__, "__attrs_attrs__")
attr_names = [a.name for a in attrs]
return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names]
def _sorted(dict_):
"""Returns a sorted list of the dict keys, with error if keys not sortable."""
try:
return sorted(dict_)
except TypeError:
raise TypeError("nest only supports dicts with sortable keys.")
def _is_namedtuple(instance, strict=False):
"""Returns True iff `instance` is a `namedtuple`.
Args:
instance: An instance of a Python object.
strict: If True, `instance` is considered to be a `namedtuple` only if
it is a "plain" namedtuple. For instance, a class inheriting
from a `namedtuple` will be considered to be a `namedtuple`
iff `strict=False`.
Returns:
True if `instance` is a `namedtuple`.
"""
return _pywrap_tensorflow.IsNamedtuple(instance, strict)
# See the swig file (util.i) for documentation.
_is_mapping = _pywrap_tensorflow.IsMapping
_is_attrs = _pywrap_tensorflow.IsAttrs
_is_composite_tensor = _pywrap_tensorflow.IsCompositeTensor
_is_type_spec = _pywrap_tensorflow.IsTypeSpec
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, `namedtuple`, `dict`,
`collections.OrderedDict`, or `composite_tensor.Composite_Tensor`
or `type_spec.TypeSpec`.
args: elements to be converted to the `instance` type.
Returns:
`args` with the type of `instance`.
"""
if _is_mapping(instance):
# Pack dictionaries in a deterministic order by sorting the keys.
# Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
result = dict(zip(_sorted(instance), args))
return type(instance)((key, result[key]) for key in instance)
elif _is_namedtuple(instance) or _is_attrs(instance):
return type(instance)(*args)
elif _is_composite_tensor(instance):
assert len(args) == 1
spec = instance._type_spec # pylint: disable=protected-access
return spec._from_components(args[0]) # pylint: disable=protected-access
elif _is_type_spec(instance):
# Pack a CompositeTensor's components according to a TypeSpec.
assert len(args) == 1
if args[0] and _is_type_spec(args[0][0]):
raise ValueError("Can not pack TypeSpec into a TypeSpec.")
return instance._from_components(args[0]) # pylint: disable=protected-access
elif isinstance(instance, _six.moves.range):
return _sequence_like(list(instance), args)
else:
# Not a namedtuple
return type(instance)(args)
def _yield_value(iterable):
for _, v in _yield_sorted_items(iterable):
yield v
def _yield_sorted_items(iterable):
"""Yield (key, value) pairs for `iterable` in a deterministic order.
For Sequences, the key will be an int, the array index of a value.
For Mappings, the key will be the dictionary key.
For objects (e.g. namedtuples), the key will be the attribute name.
In all cases, the keys will be iterated in sorted order.
Args:
iterable: an iterable.
Yields:
The iterable's (key, value) pairs, in order of sorted keys.
"""
if isinstance(iterable, _collections.Mapping):
# Iterate through dictionaries in a deterministic order by sorting the
# keys. Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
for key in _sorted(iterable):
yield key, iterable[key]
elif _is_attrs(iterable):
for item in _get_attrs_items(iterable):
yield item
elif _is_namedtuple(iterable):
for field in iterable._fields:
yield field, getattr(iterable, field)
elif _is_composite_tensor(iterable):
yield type(iterable).__name__, iterable._to_components() # pylint: disable=protected-access
elif _is_type_spec(iterable):
# Note: to allow CompositeTensors and their TypeSpecs to have matching
# structures, we need to use the same key string here.
yield iterable.value_type.__name__, iterable._component_specs # pylint: disable=protected-access
else:
for item in enumerate(iterable):
yield item
# See the swig file (util.i) for documentation.
is_sequence = _pywrap_tensorflow.IsSequence
# See the swig file (util.i) for documentation.
is_sequence_or_composite = _pywrap_tensorflow.IsSequenceOrComposite
@tf_export("nest.is_nested")
def is_nested(seq):
"""Returns true if its input is a collections.Sequence (except strings).
Args:
seq: an input sequence.
Returns:
True if the sequence is a not a string and is a collections.Sequence or a
dict.
"""
return is_sequence(seq)
@tf_export("nest.flatten")
def flatten(structure, expand_composites=False):
"""Returns a flat list from a given nested structure.
If nest is not a sequence, tuple, or dict, then returns a single-element list:
[nest].
In the case of dict instances, the sequence consists of the values, sorted by
key to ensure deterministic behavior. This is true also for OrderedDict
instances: their sequence order is ignored, the sorting order of keys is used
instead. The same convention is followed in pack_sequence_as. This correctly
repacks dicts and OrderedDicts after they have been flattened, and also allows
flattening an OrderedDict and then repacking it back using a corresponding
plain dict, or vice-versa. Dictionaries with non-sortable keys cannot be
flattened.
Users must not modify any collections used in nest while this function is
running.
Args:
structure: an arbitrarily nested structure or a scalar object. Note, numpy
arrays are considered scalars.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
Returns:
A Python list, the flattened version of the input.
Raises:
TypeError: The nest is or contains a dict with non-sortable keys.
"""
return _pywrap_tensorflow.Flatten(structure, expand_composites)
# See the swig file (util.i) for documentation.
_same_namedtuples = _pywrap_tensorflow.SameNamedtuples
class _DotString(object):
def __str__(self):
return "."
def __repr__(self):
return "."
_DOT = _DotString()
@tf_export("nest.assert_same_structure")
def assert_same_structure(nest1, nest2, check_types=True,
expand_composites=False):
"""Asserts that two structures are nested in the same way.
Note that namedtuples with identical name and fields are always considered
to have the same shallow structure (even with `check_types=True`).
For instance, this code will print `True`:
```python
def nt(a, b):
return collections.namedtuple('foo', 'a b')(a, b)
print(assert_same_structure(nt(0, 1), nt(2, 3)))
```
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences are checked as well,
including the keys of dictionaries. If set to `False`, for example a
list and a tuple of objects will look the same if they have the same
size. Note that namedtuples with identical name and fields are always
considered to have the same shallow structure. Two types will also be
considered the same if they are both list subtypes (which allows "list"
and "_ListWrapper" from trackable dependency tracking to compare
equal).
expand_composites: If true, then composite tensors such as `tf.SparseTensor`
and `tf.RaggedTensor` are expanded into their component tensors.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
try:
_pywrap_tensorflow.AssertSameStructure(nest1, nest2, check_types,
expand_composites)
except (ValueError, TypeError) as e:
str1 = str(map_structure(lambda _: _DOT, nest1))
str2 = str(map_structure(lambda _: _DOT, nest2))
raise type(e)("%s\n"
"Entire first structure:\n%s\n"
"Entire second structure:\n%s"
% (str(e), str1, str2))
def flatten_dict_items(dictionary):
"""Returns a dictionary with flattened keys and values.
This function flattens the keys and values of a dictionary, which can be
arbitrarily nested structures, and returns the flattened version of such
structures:
```python
example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))}
result = {4: "a", 5: "b", 6: "c", 8: "d"}
flatten_dict_items(example_dictionary) == result
```
The input dictionary must satisfy two properties:
1. Its keys and values should have the same exact nested structure.
2. The set of all flattened keys of the dictionary must not contain repeated
keys.
Args:
dictionary: the dictionary to zip
Returns:
The zipped dictionary.
Raises:
TypeError: If the input is not a dictionary.
ValueError: If any key and value do not have the same structure layout, or
if keys are not unique.
"""
if not isinstance(dictionary, (dict, _collections.Mapping)):
raise TypeError("input must be a dictionary")
flat_dictionary = {}
for i, v in _six.iteritems(dictionary):
if not is_sequence(i):
if i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique." % i)
flat_dictionary[i] = v
else:
flat_i = flatten(i)
flat_v = flatten(v)
if len(flat_i) != len(flat_v):
raise ValueError(
"Could not flatten dictionary. Key had %d elements, but value had "
"%d elements. Key: %s, value: %s."
% (len(flat_i), len(flat_v), flat_i, flat_v))
for new_i, new_v in zip(flat_i, flat_v):
if new_i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique."
% (new_i))
flat_dictionary[new_i] = new_v
return flat_dictionary
def _packed_nest_with_indices(structure, flat, index, is_seq):
"""Helper function for pack_sequence_as.
Args:
structure: Substructure (list / tuple / dict) to mimic.
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
is_seq: Function used to test if a value should be treated as a sequence.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in _yield_value(structure):
if is_seq(s):
new_index, child = _packed_nest_with_indices(s, flat, index, is_seq)
packed.append(_sequence_like(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
@tf_export("nest.pack_sequence_as")
def pack_sequence_as(structure, flat_sequence, expand_composites=False):
"""Returns a given flattened sequence packed into a given structure.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
If `structure` is or contains a dict instance, the keys will be sorted to
pack the flat sequence in deterministic order. This is true also for
`OrderedDict` instances: their sequence order is ignored, the sorting order of
keys is used instead. The same convention is followed in `flatten`.
This correctly repacks dicts and `OrderedDict`s after they have been
flattened, and also allows flattening an `OrderedDict` and then repacking it
back using a corresponding plain dict, or vice-versa.
Dictionaries with non-sortable keys cannot be flattened.
Args:
structure: Nested structure, whose structure is given by nested lists,
tuples, and dicts. Note: numpy arrays and strings are considered
scalars.
flat_sequence: flat sequence to pack.
expand_composites: If true, then composite tensors such as `tf.SparseTensor`
and `tf.RaggedTensor` are expanded into their component tensors.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If `flat_sequence` and `structure` have different
element counts.
TypeError: `structure` is or contains a dict with non-sortable keys.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
if not is_seq(flat_sequence):
raise TypeError("flat_sequence must be a sequence")
if not is_seq(structure):
if len(flat_sequence) != 1:
raise ValueError("Structure is a scalar but len(flat_sequence) == %d > 1"
% len(flat_sequence))
return flat_sequence[0]
try:
final_index, packed = _packed_nest_with_indices(structure, flat_sequence,
0, is_seq)
if final_index < len(flat_sequence):
raise IndexError
except IndexError:
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but "
"flat_sequence had %d elements. Structure: %s, flat_sequence: %s." %
(len(flat_structure), len(flat_sequence), structure, flat_sequence))
return _sequence_like(structure, packed)
@tf_export("nest.map_structure")
def map_structure(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain results with the same structure layout.
Args:
func: A callable that accepts as many arguments as there are structures.
*structure: scalar, or tuple or list of constructed scalars and/or other
tuples/lists, or scalars. Note: numpy arrays are considered as scalars.
**kwargs: Valid keyword args are:
* `check_types`: If set to `True` (default) the types of
iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Note that namedtuples with identical name and fields are always
considered to have the same shallow structure.
* `expand_composites`: If set to `True`, then composite tensors such
as `tf.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors. If `False` (the default), then composite tensors
are not expanded.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
check_types = True
expand_composites = False
if kwargs:
check_types = kwargs.pop("check_types", check_types)
expand_composites = kwargs.pop("expand_composites", expand_composites)
if kwargs:
raise ValueError("Only valid keyword arguments are check_types "
"and expand_composites")
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types,
expand_composites=expand_composites)
flat_structure = [flatten(s, expand_composites) for s in structure]
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries],
expand_composites=expand_composites)
def map_structure_with_paths(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(path, x[0], x[1], ..., **kwargs)` where x[i] is an entry in
`structure[i]` and `path` is the common path to x[i] in the structures. All
structures in `structure` must have the same arity, and the return value will
contain the results with the same structure layout. Special kwarg
`check_types` determines whether the types of iterables within the structure
must be the same-- see **kwargs definition below.
Args:
func: A callable with the signature func(path, *values, **kwargs) that is
evaluated on the leaves of the structure.
*structure: A variable number of compatible structures to process.
**kwargs: Optional kwargs to be passed through to func. Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.,
`map_structure(func, [1], (1,))` raises a `TypeError` exception). By
default, the types must match. To allow iteration over structures of
different types (but common arity), set this kwarg to `False`.
Returns:
A structure of the same form as the input structures whose leaves are the
result of evaluating func on corresponding leaves of the input structures.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
TypeError: If `check_types` is not `False` and the two structures differ in
the type of sequence in any of their substructures.
ValueError: If no structures are provided.
"""
def wrapper_func(tuple_path, *inputs, **kwargs):
string_path = "/".join(str(s) for s in tuple_path)
return func(string_path, *inputs, **kwargs)
return map_structure_with_tuple_paths_up_to(structure[0],
wrapper_func,
*structure,
**kwargs)
def map_structure_with_tuple_paths(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(tuple_path, x[0], x[1], ..., **kwargs)` where `x[i]` is an entry
in `structure[i]` and `tuple_path` is a tuple of indices and/or dictionary
keys (as returned by `nest.yield_flat_paths`), which uniquely specifies the
common path to x[i] in the structures. All structures in `structure` must have
the same arity, and the return value will contain the results in the same
structure. Special kwarg `check_types` determines whether the types of
iterables within the structure must be the same-- see **kwargs definition
below.
Args:
func: A callable with the signature `func(tuple_path, *values, **kwargs)`
that is evaluated on the leaves of the structure.
*structure: A variable number of compatible structures to process.
**kwargs: Optional kwargs to be passed through to func. Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Returns:
A structure of the same form as the input structures whose leaves are the
result of evaluating func on corresponding leaves of the input structures.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
TypeError: If `check_types` is not `False` and the two structures differ in
the type of sequence in any of their substructures.
ValueError: If no structures are provided.
"""
return map_structure_with_tuple_paths_up_to(structure[0],
func,
*structure,
**kwargs)
def _yield_flat_up_to(shallow_tree, input_tree, is_seq, path=()):
"""Yields (path, value) pairs of input_tree flattened up to shallow_tree.
Args:
shallow_tree: Nested structure. Traverse no further than its leaf nodes.
input_tree: Nested structure. Return the paths and values from this tree.
Must have the same upper structure as shallow_tree.
is_seq: Function used to test if a value should be treated as a sequence.
path: Tuple. Optional argument, only used when recursing. The path from the
root of the original shallow_tree, down to the root of the shallow_tree
arg of this recursive call.
Yields:
Pairs of (path, value), where path the tuple path of a leaf node in
shallow_tree, and value is the value of the corresponding node in
input_tree.
"""
if not is_seq(shallow_tree):
yield (path, input_tree)
else:
input_tree = dict(_yield_sorted_items(input_tree))
for shallow_key, shallow_subtree in _yield_sorted_items(shallow_tree):
subpath = path + (shallow_key,)
input_subtree = input_tree[shallow_key]
for leaf_path, leaf_value in _yield_flat_up_to(shallow_subtree,
input_subtree, is_seq,
path=subpath):
yield (leaf_path, leaf_value)
def assert_shallow_structure(shallow_tree,
input_tree,
check_types=True,
expand_composites=False,
check_subtrees_length=True):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = {"a": "A", "b": "B"}
input_tree = {"a": 1, "c": 2}
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will not raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree,
check_subtrees_length=False)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same. Note that even with check_types==True,
this function will consider two different namedtuple classes with the same
name and _fields attribute to be the same class.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
check_subtrees_length: if `True` (default) the subtrees `shallow_tree` and
`input_tree` have to be the same length. If `False` sequences are treated
as key-value like mappings allowing them to be considered as valid
subtrees. Note that this may drop parts of the `input_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
if is_seq(shallow_tree):
if not is_seq(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if check_types and not isinstance(input_tree, type(shallow_tree)):
# Duck-typing means that nest should be fine with two different
# namedtuples with identical name and fields.
shallow_is_namedtuple = _is_namedtuple(shallow_tree, False)
input_is_namedtuple = _is_namedtuple(input_tree, False)
if shallow_is_namedtuple and input_is_namedtuple:
if not _same_namedtuples(shallow_tree, input_tree):
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree),
shallow_type=type(shallow_tree)))
elif ((_is_composite_tensor(shallow_tree) or
_is_composite_tensor(input_tree)) and
(_is_type_spec(shallow_tree) or _is_type_spec(input_tree))):
pass # Compatibility will be checked below.
elif not (isinstance(shallow_tree, _collections.Mapping)
and isinstance(input_tree, _collections.Mapping)):
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree),
shallow_type=type(shallow_tree)))
if _is_composite_tensor(shallow_tree) or _is_composite_tensor(input_tree):
if not (
(_is_composite_tensor(input_tree) or _is_type_spec(input_tree)) and
(_is_composite_tensor(shallow_tree) or _is_type_spec(shallow_tree))):
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree),
shallow_type=type(shallow_tree)))
type_spec_1 = (shallow_tree if _is_type_spec(shallow_tree) else
shallow_tree._type_spec) # pylint: disable=protected-access
type_spec_2 = (input_tree if _is_type_spec(input_tree) else
input_tree._type_spec) # pylint: disable=protected-access
try:
_ = type_spec_1.most_specific_compatible_type(type_spec_2)
except (TypeError, ValueError) as e:
raise ValueError(
"Incompatible CompositeTensor TypeSpecs: %s vs. %s -- %s" %
(type_spec_1, type_spec_2, e))
elif _is_type_spec(shallow_tree):
if not _is_type_spec(input_tree):
raise TypeError("If shallow structure is a TypeSpec, input must also "
"be a TypeSpec. Input has type: %s."
% type(input_tree))
else:
if check_subtrees_length and len(input_tree) != len(shallow_tree):
raise ValueError(
_STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree), shallow_length=len(shallow_tree)))
elif len(input_tree) < len(shallow_tree):
raise ValueError(
_INPUT_TREE_SMALLER_THAN_SHALLOW_TREE.format(
input_size=len(input_tree), shallow_size=len(shallow_tree)))
if isinstance(shallow_tree, _collections.Mapping):
absent_keys = set(shallow_tree) - set(input_tree)
if absent_keys:
raise ValueError(_SHALLOW_TREE_HAS_INVALID_KEYS
.format(sorted(absent_keys)))
for shallow_branch, input_branch in zip(_yield_value(shallow_tree),
_yield_value(input_tree)):
assert_shallow_structure(shallow_branch, input_branch,
check_types=check_types,
expand_composites=expand_composites,
check_subtrees_length=check_subtrees_length)
def flatten_up_to(shallow_tree, input_tree, check_types=True,
expand_composites=False, check_subtrees_length=True):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure layout
as `shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Non-Full-Subtree case:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
flattened = flatten_up_to(shallow_tree, input_tree,
check_subtrees_length=False)
# Output is:
# ["c", ["d", "e"]]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
check_types: bool. If True, check that each node in shallow_tree has the
same type as the corresponding node in input_tree.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
check_subtrees_length: if `True` (default) the subtrees `shallow_tree` and
`input_tree` have to be the same length. If `False` sequences are treated
as key-value like mappings allowing them to be considered as valid
subtrees. Note that this may drop parts of the `input_tree`.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
assert_shallow_structure(shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites,
check_subtrees_length=check_subtrees_length)
# Discard paths returned by _yield_flat_up_to.
return list(v for _, v in _yield_flat_up_to(shallow_tree, input_tree, is_seq))
def flatten_with_tuple_paths_up_to(shallow_tree,
input_tree,
check_types=True,
expand_composites=False,
check_subtrees_length=True):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flattened output.
Returns a list of (path, value) pairs, where value a leaf node in the
flattened tree, and path is the tuple path of that leaf in input_tree.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[((), input_tree)]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure layout
as `shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_with_tuple_paths_up_to(shallow_tree,
input_tree)
flattened_shallow_tree = flatten_with_tuple_paths_up_to(shallow_tree,
shallow_tree)
# Output is:
# [((0, 0), [2, 2]),
# ((0, 1), [3, 3]),
# ((1, 0), [4, 9]),
# ((1, 1), [5, 5])]
#
# [((0, 0), True),
# ((0, 1), True),
# ((1, 0), False),
# ((1, 1), True)]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [((0, 0), ('a', 1)),
# ((0, 1, 0), ('b', 2)),
# ((0, 1, 1, 0), ('c', 3)),
# ((0, 1, 1, 1), ('d', 4))]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_with_tuple_paths_up_to(0, 0) # Output: [(), 0]
flatten_with_tuple_paths_up_to(0, [0, 1, 2]) # Output: [(), [0, 1, 2]]
flatten_with_tuple_paths_up_to([0, 1, 2], 0) # Output: TypeError
flatten_with_tuple_paths_up_to([0, 1, 2], [0, 1, 2])
# Output: [((0,) 0), ((1,), 1), ((2,), 2)]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
check_types: bool. If True, check that each node in shallow_tree has the
same type as the corresponding node in input_tree.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
check_subtrees_length: if `True` (default) the subtrees `shallow_tree` and
`input_tree` have to be the same length. If `False` sequences are treated
as key-value like mappings allowing them to be considered as valid
subtrees. Note that this may drop parts of the `input_tree`.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
assert_shallow_structure(shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites,
check_subtrees_length=check_subtrees_length)
return list(_yield_flat_up_to(shallow_tree, input_tree, is_seq))
def map_structure_up_to(shallow_tree, func, *inputs, **kwargs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure layout as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function therefore will return something with the same base structure as
`shallow_tree`.
Examples:
```python
shallow_tree = [None, None]
inp_val = [1, 2, 3]
out = map_structure_up_to(shallow_tree, lambda x: 2 * x, inp_val)
# Output is: [2, 4]
```
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
**kwargs: kwargs to feed to func(). Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with the same structure layout as
`shallow_tree`.
"""
return map_structure_with_tuple_paths_up_to(
shallow_tree,
lambda _, *values: func(*values), # Discards the path arg.
*inputs,
**kwargs)
def map_structure_with_tuple_paths_up_to(shallow_tree, func, *inputs, **kwargs):
"""Applies a function or op to a number of partially flattened inputs.
Like map_structure_up_to(), except that the 'func' argument takes a path
tuple as its first argument, followed by the corresponding values from
*inputs.
Example:
lowercase = {'a': 'a', 'b': ('b0', 'b1')}
uppercase = {'a': 'A', 'b': ('B0', 'B1')}
def print_path_and_values(path, *values):
print("path: {}, values: {}".format(path, values))
shallow_tree = {'a': None}
map_structure_with_tuple_paths_up_to(shallow_tree,
print_path_and_values,
lowercase,
uppercase)
>>> path: ('a',), values: ('a', 'A')
>>> path: ('b', 0), values: ('b0', 'B0')
>>> path: ('b', 1), values: ('b1', 'B1')
shallow_tree = {'b': None}
map_structure_with_tuple_paths_up_to(shallow_tree,
print_path_and_values,
lowercase,
uppercase,
check_types=False)
>>> path: ('b', 1), values: (('bo', 'b1'), ('B0', 'B1'))
shallow_tree = {'a': None, 'b': {1: None}}
map_structure_with_tuple_paths_up_to(shallow_tree,
print_path_and_values,
lowercase,
uppercase,
check_types=False)
>>> path: ('a',), values: ('a', 'A')
>>> path: ('b', 1), values: ('b1', B1')
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable that takes args (path, inputs_0_value, ... , inputs_N_value),
where path is a tuple path to a leaf node in shallow_tree, and
inputs_i_value is the corresponding value from inputs[i].
*inputs: nested structures that are all structurally compatible with
shallow_tree.
**kwargs: kwargs to feed to func(). Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Raises:
TypeError: If `shallow_tree` is a sequence but one of `*inputs` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
Result of repeatedly applying `func`. Has the same structure layout as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
check_types = kwargs.pop("check_types", True)
expand_composites = kwargs.pop("expand_composites", False)
check_subtrees_length = kwargs.pop("check_subtrees_length", True)
is_seq = is_sequence_or_composite if expand_composites else is_sequence
for input_tree in inputs:
assert_shallow_structure(
shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites,
check_subtrees_length=check_subtrees_length)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
flat_value_lists = [
flatten_up_to( # pylint: disable=g-complex-comprehension
shallow_tree,
input_tree,
check_types,
expand_composites=expand_composites,
check_subtrees_length=check_subtrees_length) for input_tree in inputs
]
flat_path_list = [path for path, _
in _yield_flat_up_to(shallow_tree, inputs[0], is_seq)]
results = [func(*args, **kwargs) for args in zip(flat_path_list,
*flat_value_lists)]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results,
expand_composites=expand_composites)
def get_traverse_shallow_structure(traverse_fn, structure,
expand_composites=False):
"""Generates a shallow structure from a `traverse_fn` and `structure`.
`traverse_fn` must accept any possible subtree of `structure` and return
a depth=1 structure containing `True` or `False` values, describing which
of the top-level subtrees may be traversed. It may also
return scalar `True` or `False` "traversal is OK / not OK for all subtrees."
Examples are available in the unit tests (nest_test.py).
Args:
traverse_fn: Function taking a substructure and returning either a scalar
`bool` (whether to traverse that substructure or not) or a depth=1
shallow structure of the same type, describing which parts of the
substructure to traverse.
structure: The structure to traverse.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
Returns:
A shallow structure containing python bools, which can be passed to
`map_structure_up_to` and `flatten_up_to`.
Raises:
TypeError: if `traverse_fn` returns a sequence for a non-sequence input,
or a structure with depth higher than 1 for a sequence input,
or if any leaf values in the returned structure or scalar are not type
`bool`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
to_traverse = traverse_fn(structure)
if not is_seq(structure):
if not isinstance(to_traverse, bool):
raise TypeError("traverse_fn returned structure: %s for non-structure: %s"
% (to_traverse, structure))
return to_traverse
level_traverse = []
if isinstance(to_traverse, bool):
if not to_traverse:
# Do not traverse this substructure at all. Exit early.
return False
else:
# Traverse the entire substructure.
for branch in _yield_value(structure):
level_traverse.append(
get_traverse_shallow_structure(traverse_fn, branch,
expand_composites=expand_composites))
elif not is_seq(to_traverse):
raise TypeError("traverse_fn returned a non-bool scalar: %s for input: %s"
% (to_traverse, structure))
else:
# Traverse some subset of this substructure.
assert_shallow_structure(to_traverse, structure,
expand_composites=expand_composites)
for t, branch in zip(_yield_value(to_traverse),
_yield_value(structure)):
if not isinstance(t, bool):
raise TypeError(
"traverse_fn didn't return a depth=1 structure of bools. saw: %s "
" for structure: %s" % (to_traverse, structure))
if t:
level_traverse.append(
get_traverse_shallow_structure(traverse_fn, branch))
else:
level_traverse.append(False)
return _sequence_like(structure, level_traverse)
def yield_flat_paths(nest, expand_composites=False):
"""Yields paths for some nested structure.
Paths are lists of objects which can be str-converted, which may include
integers or other types which are used as indices in a dict.
The flat list will be in the corresponding order as if you called
`snt.nest.flatten` on the structure. This is handy for naming Tensors such
the TF scope structure matches the tuple structure.
E.g. if we have a tuple `value = Foo(a=3, b=Bar(c=23, d=42))`
```shell
>>> nest.flatten(value)
[3, 23, 42]
>>> list(nest.yield_flat_paths(value))
[('a',), ('b', 'c'), ('b', 'd')]
```
```shell
>>> list(nest.yield_flat_paths({'a': [3]}))
[('a', 0)]
>>> list(nest.yield_flat_paths({'a': 3}))
[('a',)]
```
Args:
nest: the value to produce a flattened paths list for.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
Yields:
Tuples containing index or key values which form the path to a specific
leaf value in the nested structure.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
for k, _ in _yield_flat_up_to(nest, nest, is_seq):
yield k
def flatten_with_joined_string_paths(structure, separator="/",
expand_composites=False):
"""Returns a list of (string path, data element) tuples.
The order of tuples produced matches that of `nest.flatten`. This allows you
to flatten a nested structure while keeping information about where in the
structure each data element was located. See `nest.yield_flat_paths`
for more information.
Args:
structure: the nested structure to flatten.
separator: string to separate levels of hierarchy in the results, defaults
to '/'.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
Returns:
A list of (string, data element) tuples.
"""
flat_paths = yield_flat_paths(structure, expand_composites=expand_composites)
def stringify_and_join(path_elements):
return separator.join(str(path_element) for path_element in path_elements)
flat_string_paths = [stringify_and_join(path) for path in flat_paths]
return list(zip(flat_string_paths,
flatten(structure, expand_composites=expand_composites)))
def flatten_with_tuple_paths(structure, expand_composites=False):
"""Returns a list of `(tuple_path, leaf_element)` tuples.
The order of pairs produced matches that of `nest.flatten`. This allows you
to flatten a nested structure while keeping information about where in the
structure each data element was located. See `nest.yield_flat_paths`
for more information about tuple paths.
Args:
structure: the nested structure to flatten.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
Returns:
A list of `(tuple_path, leaf_element)` tuples. Each `tuple_path` is a tuple
of indices and/or dictionary keys that uniquely specify the path to
`leaf_element` within `structure`.
"""
return list(zip(yield_flat_paths(structure,
expand_composites=expand_composites),
flatten(structure, expand_composites=expand_composites)))
_pywrap_tensorflow.RegisterType("Mapping", _collections.Mapping)
_pywrap_tensorflow.RegisterType("Sequence", _collections.Sequence)
|
tensorflow-master
|
tensorflow/python/util/nest.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A LazyLoader class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import types
from tensorflow.python.platform import tf_logging as logging
class LazyLoader(types.ModuleType):
"""Lazily import a module, mainly to avoid pulling in large dependencies.
`contrib`, and `ffmpeg` are examples of modules that are large and not always
needed, and this allows them to only be loaded when they are used.
"""
# The lint error here is incorrect.
def __init__(self, local_name, parent_module_globals, name, warning=None): # pylint: disable=super-on-old-class
self._local_name = local_name
self._parent_module_globals = parent_module_globals
self._warning = warning
super(LazyLoader, self).__init__(name)
def _load(self):
"""Load the module and insert it into the parent's globals."""
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Emit a warning if one was specified
if self._warning:
logging.warning(self._warning)
# Make sure to only warn once.
self._warning = None
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
|
tensorflow-master
|
tensorflow/python/util/lazy_loader.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for Python 2 vs. 3 compatibility.
## Conversion routines
In addition to the functions below, `as_str` converts an object to a `str`.
## Types
The compatibility module also provides the following types:
* `bytes_or_text_types`
* `complex_types`
* `integral_types`
* `real_types`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers as _numbers
import numpy as _np
import six as _six
from tensorflow.python.util.tf_export import tf_export
def as_bytes(bytes_or_text, encoding='utf-8'):
"""Converts `bytearray`, `bytes`, or unicode python input types to `bytes`.
Uses utf-8 encoding for text by default.
Args:
bytes_or_text: A `bytearray`, `bytes`, `str`, or `unicode` object.
encoding: A string indicating the charset for encoding unicode.
Returns:
A `bytes` object.
Raises:
TypeError: If `bytes_or_text` is not a binary or unicode string.
"""
if isinstance(bytes_or_text, bytearray):
return bytes(bytes_or_text)
elif isinstance(bytes_or_text, _six.text_type):
return bytes_or_text.encode(encoding)
elif isinstance(bytes_or_text, bytes):
return bytes_or_text
else:
raise TypeError('Expected binary or unicode string, got %r' %
(bytes_or_text,))
def as_text(bytes_or_text, encoding='utf-8'):
"""Converts any string-like python input types to unicode.
Returns the input as a unicode string. Uses utf-8 encoding for text
by default.
Args:
bytes_or_text: A `bytes`, `str`, or `unicode` object.
encoding: A string indicating the charset for decoding unicode.
Returns:
A `unicode` (Python 2) or `str` (Python 3) object.
Raises:
TypeError: If `bytes_or_text` is not a binary or unicode string.
"""
if isinstance(bytes_or_text, _six.text_type):
return bytes_or_text
elif isinstance(bytes_or_text, bytes):
return bytes_or_text.decode(encoding)
else:
raise TypeError('Expected binary or unicode string, got %r' % bytes_or_text)
# Convert an object to a `str` in both Python 2 and 3.
if _six.PY2:
as_str = as_bytes
tf_export('compat.as_bytes', 'compat.as_str')(as_bytes)
tf_export('compat.as_text')(as_text)
else:
as_str = as_text
tf_export('compat.as_bytes')(as_bytes)
tf_export('compat.as_text', 'compat.as_str')(as_text)
@tf_export('compat.as_str_any')
def as_str_any(value):
"""Converts input to `str` type.
Uses `str(value)`, except for `bytes` typed inputs, which are converted
using `as_str`.
Args:
value: A object that can be converted to `str`.
Returns:
A `str` object.
"""
if isinstance(value, bytes):
return as_str(value)
else:
return str(value)
@tf_export('compat.path_to_str')
def path_to_str(path):
r"""Converts input which is a `PathLike` object to `str` type.
Converts from any python constant representation of a `PathLike` object to
a string. If the input is not a `PathLike` object, simply returns the input.
Args:
path: An object that can be converted to path representation.
Returns:
A `str` object.
Usage:
In case a simplified `str` version of the path is needed from an
`os.PathLike` object
Examples:
```python3
>>> tf.compat.path_to_str('C:\XYZ\tensorflow\./.././tensorflow')
'C:\XYZ\tensorflow\./.././tensorflow' # Windows OS
>>> tf.compat.path_to_str(Path('C:\XYZ\tensorflow\./.././tensorflow'))
'C:\XYZ\tensorflow\..\tensorflow' # Windows OS
>>> tf.compat.path_to_str(Path('./corpus'))
'corpus' # Linux OS
>>> tf.compat.path_to_str('./.././Corpus')
'./.././Corpus' # Linux OS
>>> tf.compat.path_to_str(Path('./.././Corpus'))
'../Corpus' # Linux OS
>>> tf.compat.path_to_str(Path('./..////../'))
'../..' # Linux OS
```
"""
if hasattr(path, '__fspath__'):
path = as_str_any(path.__fspath__())
return path
# Numpy 1.8 scalars don't inherit from numbers.Integral in Python 3, so we
# need to check them specifically. The same goes from Real and Complex.
integral_types = (_numbers.Integral, _np.integer)
tf_export('compat.integral_types').export_constant(__name__, 'integral_types')
real_types = (_numbers.Real, _np.integer, _np.floating)
tf_export('compat.real_types').export_constant(__name__, 'real_types')
complex_types = (_numbers.Complex, _np.number)
tf_export('compat.complex_types').export_constant(__name__, 'complex_types')
# Either bytes or text.
bytes_or_text_types = (bytes, _six.text_type)
tf_export('compat.bytes_or_text_types').export_constant(__name__,
'bytes_or_text_types')
|
tensorflow-master
|
tensorflow/python/util/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions related to Python memory management."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(b/115366440): Delete this function when a custom OrderedDict is added
def dismantle_ordered_dict(ordered_dict):
"""Remove reference cycle in OrderedDict `ordered_dict`.
Helpful for making sure the garbage collector doesn't need to run after
using an OrderedDict.
Args:
ordered_dict: A `OrderedDict` object to destroy. This object is unusable
after this function runs.
"""
# OrderedDict, makes a simple reference loop
# and hides it in an __attribute in some Python versions. We don't need to
# throw an error if we can't find it, but if we do find it we can break the
# loop to avoid creating work for the garbage collector.
problematic_cycle = ordered_dict.__dict__.get("_OrderedDict__root", None) # pylint: disable=protected-access
if problematic_cycle:
try:
del problematic_cycle[0][:]
except TypeError:
# This is probably not one of the problematic Python versions. Continue
# with the rest of our cleanup.
pass
|
tensorflow-master
|
tensorflow/python/util/memory.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for operator dispatch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
class CustomTensor(object):
"""A fake composite tensor class, for testing type-based dispatching."""
def __init__(self, tensor, score):
self.tensor = ops.convert_to_tensor(tensor)
self.score = score
@tf_export("test_op")
@dispatch.add_dispatch_support
def test_op(x, y, z):
"""A fake op for testing dispatch of Python ops."""
return x + (2 * y) + (3 * z)
@test_util.run_all_in_graph_and_eager_modes
class DispatchTest(test_util.TensorFlowTestCase):
def testAddDispatchForTypes_With_CppOp(self):
original_handlers = gen_math_ops.add._tf_dispatchers[:]
# Override the behavior of gen_math_ops.add.
@dispatch.dispatch_for_types(gen_math_ops.add, CustomTensor)
def custom_add(x, y, name=None): # pylint: disable=unused-variable
return CustomTensor(gen_math_ops.add(x.tensor, y.tensor, name),
(x.score+y.score) / 2.0)
self.assertEqual(len(math_ops.add._tf_dispatchers),
len(original_handlers) + 1)
# Test that we see the overridden behavior when using CustomTensors.
x = CustomTensor([1, 2, 3], 2.0)
y = CustomTensor([7, 8, 2], 0.0)
x_plus_y = gen_math_ops.add(x, y)
self.assertAllEqual(self.evaluate(x_plus_y.tensor), [8, 10, 5])
self.assertNear(x_plus_y.score, 1.0, 0.001)
# Test that we still get the right behavior when using normal Tensors.
a = [1, 2, 3]
b = [4, 5, 6]
a_plus_b = gen_math_ops.add(a, b)
self.assertAllEqual(a_plus_b, [5, 7, 9])
# Test that we still get a TypeError or ValueError if we pass some
# type that's not supported by any dispatcher.
with self.assertRaises((TypeError, ValueError)):
gen_math_ops.add(a, None)
# Clean up
gen_math_ops.add._tf_dispatchers = original_handlers
def testAddDispatchForTypes_With_PythonOp(self):
original_handlers = test_op._tf_dispatchers[:]
@dispatch.dispatch_for_types(test_op, CustomTensor)
def override_for_test_op(x, y, z): # pylint: disable=unused-variable
return CustomTensor(test_op(x.tensor, y.tensor, z.tensor),
(x.score + y.score + z.score) / 3.0)
x = CustomTensor([1, 2, 3], 0.2)
y = CustomTensor([7, 8, 2], 0.4)
z = CustomTensor([0, 1, 2], 0.6)
result = test_op(x, y, z)
self.assertAllEqual(self.evaluate(result.tensor), [15, 21, 13])
self.assertNear(result.score, 0.4, 0.001)
# Clean up
test_op._tf_dispatchers = original_handlers
def testDispatchForTypes_SignatureMismatch(self):
with self.assertRaisesRegexp(AssertionError, "The decorated function's "
"signature must exactly match.*"):
@dispatch.dispatch_for_types(test_op, CustomTensor)
def override_for_test_op(a, b, c): # pylint: disable=unused-variable
return CustomTensor(test_op(a.tensor, b.tensor, c.tensor),
(a.score + b.score + c.score) / 3.0)
def testDispatchForTypes_OpDoesNotSupportDispatch(self):
def some_op(x, y):
return x + y
with self.assertRaisesRegexp(AssertionError, "Dispatching not enabled for"):
@dispatch.dispatch_for_types(some_op, CustomTensor)
def override_for_some_op(x, y): # pylint: disable=unused-variable
return x if x.score > 0 else y
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/util/dispatch_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tf_inspect."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def test_decorator(decorator_name, decorator_doc=None):
def make_tf_decorator(target):
return tf_decorator.TFDecorator(decorator_name, target, decorator_doc)
return make_tf_decorator
def test_undecorated_function():
pass
@test_decorator('decorator 1')
@test_decorator('decorator 2')
@test_decorator('decorator 3')
def test_decorated_function(x):
"""Test Decorated Function Docstring."""
return x * 2
@test_decorator('decorator')
def test_decorated_function_with_defaults(a, b=2, c='Hello'):
"""Test Decorated Function With Defaults Docstring."""
return [a, b, c]
@test_decorator('decorator')
class TestDecoratedClass(object):
"""Test Decorated Class."""
def __init__(self):
pass
def two(self):
return 2
class TfInspectTest(test.TestCase):
def testCurrentFrame(self):
self.assertEqual(inspect.currentframe(), tf_inspect.currentframe())
def testGetArgSpecOnDecoratorsThatDontProvideArgspec(self):
argspec = tf_inspect.getargspec(test_decorated_function_with_defaults)
self.assertEqual(['a', 'b', 'c'], argspec.args)
self.assertEqual((2, 'Hello'), argspec.defaults)
def testGetArgSpecOnDecoratorThatChangesArgspec(self):
argspec = tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
decorator = tf_decorator.TFDecorator('', test_undecorated_function, '',
argspec)
self.assertEqual(argspec, tf_inspect.getargspec(decorator))
def testGetArgSpecIgnoresDecoratorsThatDontProvideArgspec(self):
argspec = tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
inner_decorator = tf_decorator.TFDecorator('', test_undecorated_function,
'', argspec)
outer_decorator = tf_decorator.TFDecorator('', inner_decorator)
self.assertEqual(argspec, tf_inspect.getargspec(outer_decorator))
def testGetArgSpecReturnsOutermostDecoratorThatChangesArgspec(self):
outer_argspec = tf_inspect.ArgSpec(
args=['a'], varargs=None, keywords=None, defaults=None)
inner_argspec = tf_inspect.ArgSpec(
args=['b'], varargs=None, keywords=None, defaults=None)
inner_decorator = tf_decorator.TFDecorator('', test_undecorated_function,
'', inner_argspec)
outer_decorator = tf_decorator.TFDecorator('', inner_decorator, '',
outer_argspec)
self.assertEqual(outer_argspec, tf_inspect.getargspec(outer_decorator))
def testGetArgSpecOnPartialPositionalArgumentOnly(self):
"""Tests getargspec on partial function with only positional arguments."""
def func(m, n):
return 2 * m + n
partial_func = functools.partial(func, 7)
argspec = tf_inspect.ArgSpec(
args=['n'], varargs=None, keywords=None, defaults=None)
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialArgumentWithConvertibleToFalse(self):
"""Tests getargspec on partial function with args that convert to False."""
def func(m, n):
return 2 * m + n
partial_func = functools.partial(func, m=0)
exception_message = (r"Some arguments \['n'\] do not have default value, "
"but they are positioned after those with default "
"values. This can not be expressed with ArgSpec.")
with self.assertRaisesRegexp(ValueError, exception_message):
tf_inspect.getargspec(partial_func)
def testGetArgSpecOnPartialInvalidArgspec(self):
"""Tests getargspec on partial function that doesn't have valid argspec."""
def func(m, n, l, k=4):
return 2 * m + l + n * k
partial_func = functools.partial(func, n=7)
exception_message = (r"Some arguments \['l'\] do not have default value, "
"but they are positioned after those with default "
"values. This can not be expressed with ArgSpec.")
with self.assertRaisesRegexp(ValueError, exception_message):
tf_inspect.getargspec(partial_func)
def testGetArgSpecOnPartialValidArgspec(self):
"""Tests getargspec on partial function with valid argspec."""
def func(m, n, l, k=4):
return 2 * m + l + n * k
partial_func = functools.partial(func, n=7, l=2)
argspec = tf_inspect.ArgSpec(
args=['m', 'n', 'l', 'k'],
varargs=None,
keywords=None,
defaults=(7, 2, 4))
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialNoArgumentsLeft(self):
"""Tests getargspec on partial function that prunes all arguments."""
def func(m, n):
return 2 * m + n
partial_func = functools.partial(func, 7, 10)
argspec = tf_inspect.ArgSpec(
args=[], varargs=None, keywords=None, defaults=None)
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialKeywordArgument(self):
"""Tests getargspec on partial function that prunes some arguments."""
def func(m, n):
return 2 * m + n
partial_func = functools.partial(func, n=7)
argspec = tf_inspect.ArgSpec(
args=['m', 'n'], varargs=None, keywords=None, defaults=(7,))
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialKeywordArgumentWithDefaultValue(self):
"""Tests getargspec on partial function that prunes argument by keyword."""
def func(m=1, n=2):
return 2 * m + n
partial_func = functools.partial(func, n=7)
argspec = tf_inspect.ArgSpec(
args=['m', 'n'], varargs=None, keywords=None, defaults=(1, 7))
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialWithVarargs(self):
"""Tests getargspec on partial function with variable arguments."""
def func(m, *arg):
return m + len(arg)
partial_func = functools.partial(func, 7, 8)
argspec = tf_inspect.ArgSpec(
args=[], varargs='arg', keywords=None, defaults=None)
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialWithVarkwargs(self):
"""Tests getargspec on partial function with variable keyword arguments."""
def func(m, n, **kwarg):
return m * n + len(kwarg)
partial_func = functools.partial(func, 7)
argspec = tf_inspect.ArgSpec(
args=['n'], varargs=None, keywords='kwarg', defaults=None)
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialWithDecorator(self):
"""Tests getargspec on decorated partial function."""
@test_decorator('decorator')
def func(m=1, n=2):
return 2 * m + n
partial_func = functools.partial(func, n=7)
argspec = tf_inspect.ArgSpec(
args=['m', 'n'], varargs=None, keywords=None, defaults=(1, 7))
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialWithDecoratorThatChangesArgspec(self):
"""Tests getargspec on partial function with decorated argspec."""
argspec = tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
decorator = tf_decorator.TFDecorator('', test_undecorated_function, '',
argspec)
partial_argspec = tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(2, 1, 'hello'))
partial_with_decorator = functools.partial(decorator, a=2)
self.assertEqual(argspec, tf_inspect.getargspec(decorator))
self.assertEqual(partial_argspec,
tf_inspect.getargspec(partial_with_decorator))
def testGetArgSpecOnCallableObject(self):
class Callable(object):
def __call__(self, a, b=1, c='hello'):
pass
argspec = tf_inspect.ArgSpec(
args=['self', 'a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
test_obj = Callable()
self.assertEqual(argspec, tf_inspect.getargspec(test_obj))
def testGetArgSpecOnInitClass(self):
class InitClass(object):
def __init__(self, a, b=1, c='hello'):
pass
argspec = tf_inspect.ArgSpec(
args=['self', 'a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
self.assertEqual(argspec, tf_inspect.getargspec(InitClass))
def testGetArgSpecOnNewClass(self):
class NewClass(object):
def __new__(cls, a, b=1, c='hello'):
pass
argspec = tf_inspect.ArgSpec(
args=['cls', 'a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
self.assertEqual(argspec, tf_inspect.getargspec(NewClass))
def testGetFullArgSpecOnDecoratorsThatDontProvideFullArgSpec(self):
argspec = tf_inspect.getfullargspec(test_decorated_function_with_defaults)
self.assertEqual(['a', 'b', 'c'], argspec.args)
self.assertEqual((2, 'Hello'), argspec.defaults)
def testGetFullArgSpecOnDecoratorThatChangesFullArgSpec(self):
argspec = tf_inspect.FullArgSpec(
args=['a', 'b', 'c'],
varargs=None,
varkw=None,
defaults=(1, 'hello'),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
decorator = tf_decorator.TFDecorator('', test_undecorated_function, '',
argspec)
self.assertEqual(argspec, tf_inspect.getfullargspec(decorator))
def testGetFullArgSpecIgnoresDecoratorsThatDontProvideFullArgSpec(self):
argspec = tf_inspect.FullArgSpec(
args=['a', 'b', 'c'],
varargs=None,
varkw=None,
defaults=(1, 'hello'),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
inner_decorator = tf_decorator.TFDecorator('', test_undecorated_function,
'', argspec)
outer_decorator = tf_decorator.TFDecorator('', inner_decorator)
self.assertEqual(argspec, tf_inspect.getfullargspec(outer_decorator))
def testGetFullArgSpecReturnsOutermostDecoratorThatChangesFullArgSpec(self):
outer_argspec = tf_inspect.FullArgSpec(
args=['a'],
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
inner_argspec = tf_inspect.FullArgSpec(
args=['b'],
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
inner_decorator = tf_decorator.TFDecorator('', test_undecorated_function,
'', inner_argspec)
outer_decorator = tf_decorator.TFDecorator('', inner_decorator, '',
outer_argspec)
self.assertEqual(outer_argspec, tf_inspect.getfullargspec(outer_decorator))
def testGetFullArgsSpecForPartial(self):
def func(a, b):
del a, b
partial_function = functools.partial(func, 1)
argspec = tf_inspect.FullArgSpec(
args=['b'],
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(partial_function))
def testGetFullArgSpecOnPartialNoArgumentsLeft(self):
"""Tests getfullargspec on partial function that prunes all arguments."""
def func(m, n):
return 2 * m + n
partial_func = functools.partial(func, 7, 10)
argspec = tf_inspect.FullArgSpec(
args=[],
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(partial_func))
def testGetFullArgSpecOnPartialWithVarargs(self):
"""Tests getfullargspec on partial function with variable arguments."""
def func(m, *arg):
return m + len(arg)
partial_func = functools.partial(func, 7, 8)
argspec = tf_inspect.FullArgSpec(
args=[],
varargs='arg',
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(partial_func))
def testGetFullArgSpecOnPartialWithVarkwargs(self):
"""Tests getfullargspec.
Tests on partial function with variable keyword arguments.
"""
def func(m, n, **kwarg):
return m * n + len(kwarg)
partial_func = functools.partial(func, 7)
argspec = tf_inspect.FullArgSpec(
args=['n'],
varargs=None,
varkw='kwarg',
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(partial_func))
def testGetFullArgSpecOnCallableObject(self):
class Callable(object):
def __call__(self, a, b=1, c='hello'):
pass
argspec = tf_inspect.FullArgSpec(
args=['self', 'a', 'b', 'c'],
varargs=None,
varkw=None,
defaults=(1, 'hello'),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
test_obj = Callable()
self.assertEqual(argspec, tf_inspect.getfullargspec(test_obj))
def testGetFullArgSpecOnInitClass(self):
class InitClass(object):
def __init__(self, a, b=1, c='hello'):
pass
argspec = tf_inspect.FullArgSpec(
args=['self', 'a', 'b', 'c'],
varargs=None,
varkw=None,
defaults=(1, 'hello'),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(InitClass))
def testGetFullArgSpecOnNewClass(self):
class NewClass(object):
def __new__(cls, a, b=1, c='hello'):
pass
argspec = tf_inspect.FullArgSpec(
args=['cls', 'a', 'b', 'c'],
varargs=None,
varkw=None,
defaults=(1, 'hello'),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(NewClass))
def testGetDoc(self):
self.assertEqual('Test Decorated Function With Defaults Docstring.',
tf_inspect.getdoc(test_decorated_function_with_defaults))
def testGetFile(self):
self.assertTrue('tf_inspect_test.py' in tf_inspect.getfile(
test_decorated_function_with_defaults))
self.assertTrue('tf_decorator.py' in tf_inspect.getfile(
test_decorator('decorator')(tf_decorator.unwrap)))
def testGetMembers(self):
self.assertEqual(
inspect.getmembers(TestDecoratedClass),
tf_inspect.getmembers(TestDecoratedClass))
def testGetModule(self):
self.assertEqual(
inspect.getmodule(TestDecoratedClass),
tf_inspect.getmodule(TestDecoratedClass))
self.assertEqual(
inspect.getmodule(test_decorated_function),
tf_inspect.getmodule(test_decorated_function))
self.assertEqual(
inspect.getmodule(test_undecorated_function),
tf_inspect.getmodule(test_undecorated_function))
def testGetSource(self):
expected = '''@test_decorator('decorator')
def test_decorated_function_with_defaults(a, b=2, c='Hello'):
"""Test Decorated Function With Defaults Docstring."""
return [a, b, c]
'''
self.assertEqual(
expected, tf_inspect.getsource(test_decorated_function_with_defaults))
def testGetSourceFile(self):
self.assertEqual(
__file__,
tf_inspect.getsourcefile(test_decorated_function_with_defaults))
def testGetSourceLines(self):
expected = inspect.getsourcelines(
test_decorated_function_with_defaults.decorated_target)
self.assertEqual(
expected,
tf_inspect.getsourcelines(test_decorated_function_with_defaults))
def testIsBuiltin(self):
self.assertEqual(
tf_inspect.isbuiltin(TestDecoratedClass),
inspect.isbuiltin(TestDecoratedClass))
self.assertEqual(
tf_inspect.isbuiltin(test_decorated_function),
inspect.isbuiltin(test_decorated_function))
self.assertEqual(
tf_inspect.isbuiltin(test_undecorated_function),
inspect.isbuiltin(test_undecorated_function))
self.assertEqual(tf_inspect.isbuiltin(range), inspect.isbuiltin(range))
self.assertEqual(tf_inspect.isbuiltin(max), inspect.isbuiltin(max))
def testIsClass(self):
self.assertTrue(tf_inspect.isclass(TestDecoratedClass))
self.assertFalse(tf_inspect.isclass(test_decorated_function))
def testIsFunction(self):
self.assertTrue(tf_inspect.isfunction(test_decorated_function))
self.assertFalse(tf_inspect.isfunction(TestDecoratedClass))
def testIsMethod(self):
self.assertTrue(tf_inspect.ismethod(TestDecoratedClass().two))
self.assertFalse(tf_inspect.ismethod(test_decorated_function))
def testIsModule(self):
self.assertTrue(
tf_inspect.ismodule(inspect.getmodule(inspect.currentframe())))
self.assertFalse(tf_inspect.ismodule(test_decorated_function))
def testIsRoutine(self):
self.assertTrue(tf_inspect.isroutine(len))
self.assertFalse(tf_inspect.isroutine(TestDecoratedClass))
def testStack(self):
expected_stack = inspect.stack()
actual_stack = tf_inspect.stack()
self.assertEqual(len(expected_stack), len(actual_stack))
self.assertEqual(expected_stack[0][0], actual_stack[0][0]) # Frame object
self.assertEqual(expected_stack[0][1], actual_stack[0][1]) # Filename
self.assertEqual(expected_stack[0][2],
actual_stack[0][2] - 1) # Line number
self.assertEqual(expected_stack[0][3], actual_stack[0][3]) # Function name
self.assertEqual(expected_stack[1:], actual_stack[1:])
class TfInspectGetCallArgsTest(test.TestCase):
def testReturnsEmptyWhenUnboundFuncHasNoParameters(self):
def empty():
pass
self.assertEqual({}, tf_inspect.getcallargs(empty))
def testClashingParameterNames(self):
def func(positional, func=1, func_and_positional=2, kwargs=3):
return positional, func, func_and_positional, kwargs
kwargs = {}
self.assertEqual(
tf_inspect.getcallargs(func, 0, **kwargs), {
'positional': 0,
'func': 1,
'func_and_positional': 2,
'kwargs': 3
})
kwargs = dict(func=4, func_and_positional=5, kwargs=6)
self.assertEqual(
tf_inspect.getcallargs(func, 0, **kwargs), {
'positional': 0,
'func': 4,
'func_and_positional': 5,
'kwargs': 6
})
def testUnboundFuncWithOneParamPositional(self):
def func(a):
return a
self.assertEqual({'a': 5}, tf_inspect.getcallargs(func, 5))
def testUnboundFuncWithTwoParamsPositional(self):
def func(a, b):
return (a, b)
self.assertEqual({'a': 10, 'b': 20}, tf_inspect.getcallargs(func, 10, 20))
def testUnboundFuncWithOneParamKeyword(self):
def func(a):
return a
self.assertEqual({'a': 5}, tf_inspect.getcallargs(func, a=5))
def testUnboundFuncWithTwoParamsKeyword(self):
def func(a, b):
return (a, b)
self.assertEqual({'a': 6, 'b': 7}, tf_inspect.getcallargs(func, a=6, b=7))
def testUnboundFuncWithOneParamDefault(self):
def func(a=13):
return a
self.assertEqual({'a': 13}, tf_inspect.getcallargs(func))
def testUnboundFuncWithOneParamDefaultOnePositional(self):
def func(a=0):
return a
self.assertEqual({'a': 1}, tf_inspect.getcallargs(func, 1))
def testUnboundFuncWithTwoParamsDefaultOnePositional(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 5, 'b': 2}, tf_inspect.getcallargs(func, 5))
def testUnboundFuncWithTwoParamsDefaultTwoPositional(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 3, 'b': 4}, tf_inspect.getcallargs(func, 3, 4))
def testUnboundFuncWithOneParamDefaultOneKeyword(self):
def func(a=1):
return a
self.assertEqual({'a': 3}, tf_inspect.getcallargs(func, a=3))
def testUnboundFuncWithTwoParamsDefaultOneKeywordFirst(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 3, 'b': 2}, tf_inspect.getcallargs(func, a=3))
def testUnboundFuncWithTwoParamsDefaultOneKeywordSecond(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 1, 'b': 4}, tf_inspect.getcallargs(func, b=4))
def testUnboundFuncWithTwoParamsDefaultTwoKeywords(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 3, 'b': 4}, tf_inspect.getcallargs(func, a=3, b=4))
def testBoundFuncWithOneParam(self):
class Test(object):
def bound(self):
pass
t = Test()
self.assertEqual({'self': t}, tf_inspect.getcallargs(t.bound))
def testBoundFuncWithManyParamsAndDefaults(self):
class Test(object):
def bound(self, a, b=2, c='Hello'):
return (a, b, c)
t = Test()
self.assertEqual({
'self': t,
'a': 3,
'b': 2,
'c': 'Goodbye'
}, tf_inspect.getcallargs(t.bound, 3, c='Goodbye'))
def testClassMethod(self):
class Test(object):
@classmethod
def test(cls, a, b=3, c='hello'):
return (a, b, c)
self.assertEqual({
'cls': Test,
'a': 5,
'b': 3,
'c': 'goodbye'
}, tf_inspect.getcallargs(Test.test, 5, c='goodbye'))
def testUsesOutermostDecoratorsArgSpec(self):
def func():
pass
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
decorated = tf_decorator.make_decorator(
func,
wrapper,
decorator_argspec=tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(3, 'hello')))
self.assertEqual({
'a': 4,
'b': 3,
'c': 'goodbye'
}, tf_inspect.getcallargs(decorated, 4, c='goodbye'))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/util/tf_inspect_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util import tf_stack
# Allow deprecation warnings to be silenced temporarily with a context manager.
_PRINT_DEPRECATION_WARNINGS = True
# Remember which deprecation warnings have been printed already.
_PRINTED_WARNING = {}
class DeprecatedNamesAlreadySet(Exception):
"""Raised when setting deprecated names multiple times for the same symbol."""
pass
def _add_deprecated_function_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated functions."""
main_text = ['THIS FUNCTION IS DEPRECATED. It will be removed %s.' %
('in a future version' if date is None else ('after %s' % date))]
if instructions:
main_text.append('Instructions for updating:')
return decorator_utils.add_notice_to_docstring(
doc, instructions,
'DEPRECATED FUNCTION',
'(deprecated)', main_text)
def _add_deprecated_arg_notice_to_docstring(doc, date, instructions,
deprecated_names):
"""Adds a deprecation notice to a docstring for deprecated arguments."""
deprecation_string = ', '.join(sorted(deprecated_names))
return decorator_utils.add_notice_to_docstring(
doc, instructions, 'DEPRECATED FUNCTION ARGUMENTS',
'(deprecated arguments)', [
'SOME ARGUMENTS ARE DEPRECATED: `(%s)`. '
'They will be removed %s.' %
(deprecation_string, 'in a future version' if date is None else
('after %s' % date)), 'Instructions for updating:'
])
def _add_deprecated_arg_value_notice_to_docstring(doc, date, instructions,
deprecated_name_value_dict):
"""Adds a deprecation notice to a docstring for deprecated arguments."""
deprecation_string = ', '.join(
'%s=%r' % (key, value)
for key, value in sorted(deprecated_name_value_dict.items()))
when = 'in a future version' if date is None else ('after %s' % date)
return decorator_utils.add_notice_to_docstring(
doc, instructions, 'DEPRECATED FUNCTION ARGUMENT VALUES',
'(deprecated argument values)', [
'SOME ARGUMENT VALUES ARE DEPRECATED: `(%s)`. '
'They will be removed %s.' % (deprecation_string, when),
'Instructions for updating:'
])
def _validate_deprecation_args(date, instructions):
if date is not None and not re.match(r'20\d\d-[01]\d-[0123]\d', date):
raise ValueError('Date must be YYYY-MM-DD.')
if not instructions:
raise ValueError('Don\'t deprecate things without conversion instructions!')
def _call_location(outer=False):
"""Returns call location given level up from current call."""
stack = tf_stack.extract_stack_file_and_line(max_length=4)
length = len(stack)
if length == 0: # should never happen as we're in a function
return 'UNKNOWN'
index = length-4 if outer else length-3
if index < 0:
index = 0
frame = stack[index]
return '{}:{}'.format(frame.file, frame.line)
def _wrap_decorator(wrapped_function):
"""Indicate that one function wraps another.
This decorator wraps a function using `tf_decorator.make_decorator`
so that doc generation scripts can pick up original function
signature.
It would be better to use @functools.wrap decorator, but it would
not update function signature to match wrapped function in Python 2.
Args:
wrapped_function: The function that decorated function wraps.
Returns:
Function that accepts wrapper function as an argument and returns
`TFDecorator` instance.
"""
def wrapper(wrapper_func):
return tf_decorator.make_decorator(wrapped_function, wrapper_func)
return wrapper
def deprecated_alias(deprecated_name, name, func_or_class, warn_once=True):
"""Deprecate a symbol in favor of a new name with identical semantics.
This function is meant to be used when defining a backwards-compatibility
alias for a symbol which has been moved. For example:
module1.py:
```python
class NewNameForClass: pass
```
module2.py:
```python
import module1
DeprecatedNameForClass = deprecated_alias(
deprecated_name='module2.DeprecatedNameForClass',
name='module1.NewNameForClass',
module1.NewNameForClass)
```
This function works for classes and functions.
For classes, it creates a new class which is functionally identical (it
inherits from the original, and overrides its constructor), but which prints
a deprecation warning when an instance is created. It also adds a deprecation
notice to the class' docstring.
For functions, it returns a function wrapped by `tf_decorator.make_decorator`.
That function prints a warning when used, and has a deprecation notice in its
docstring. This is more or less equivalent (the deprecation warning has
slightly different text) to writing:
```python
@deprecated
def deprecated_alias(original_args):
real_function(original_args)
```
Args:
deprecated_name: The name of the symbol that is being deprecated, to be used
in the warning message. This should be its fully qualified name to avoid
confusion.
name: The name of the symbol that is to be used instead of the deprecated
name. This should be a fully qualified name to avoid confusion.
func_or_class: The (non-deprecated) class or function for which a deprecated
alias should be created.
warn_once: If True (the default), only print a deprecation warning the first
time this function is used, or the class is instantiated.
Returns:
A wrapped version of `func_or_class` which prints a deprecation warning on
use and has a modified docstring.
"""
if tf_inspect.isclass(func_or_class):
# Make a new class with __init__ wrapped in a warning.
class _NewClass(func_or_class): # pylint: disable=missing-docstring
__doc__ = decorator_utils.add_notice_to_docstring(
func_or_class.__doc__, 'Please use %s instead.' % name,
'DEPRECATED CLASS',
'(deprecated)', ['THIS CLASS IS DEPRECATED. '
'It will be removed in a future version. '])
__name__ = func_or_class.__name__
__module__ = _call_location(outer=True)
@_wrap_decorator(func_or_class.__init__)
def __init__(self, *args, **kwargs):
if hasattr(_NewClass.__init__, '__func__'):
# Python 2
_NewClass.__init__.__func__.__doc__ = func_or_class.__init__.__doc__
else:
# Python 3
_NewClass.__init__.__doc__ = func_or_class.__init__.__doc__
if _PRINT_DEPRECATION_WARNINGS:
# We're making the alias as we speak. The original may have other
# aliases, so we cannot use it to check for whether it's already been
# warned about.
if _NewClass.__init__ not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[_NewClass.__init__] = True
logging.warning(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), deprecated_name, name)
super(_NewClass, self).__init__(*args, **kwargs)
return _NewClass
else:
decorator_utils.validate_callable(func_or_class, 'deprecated')
# Make a wrapper for the original
@functools.wraps(func_or_class)
def new_func(*args, **kwargs): # pylint: disable=missing-docstring
if _PRINT_DEPRECATION_WARNINGS:
# We're making the alias as we speak. The original may have other
# aliases, so we cannot use it to check for whether it's already been
# warned about.
if new_func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[new_func] = True
logging.warning(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), deprecated_name, name)
return func_or_class(*args, **kwargs)
return tf_decorator.make_decorator(
func_or_class, new_func, 'deprecated',
_add_deprecated_function_notice_to_docstring(
func_or_class.__doc__, None, 'Please use %s instead.' % name))
def deprecated_endpoints(*args):
"""Decorator for marking endpoints deprecated.
This decorator does not print deprecation messages.
TODO(annarev): eventually start printing deprecation warnings when
@deprecation_endpoints decorator is added.
Args:
*args: Deprecated endpoint names.
Returns:
A function that takes symbol as an argument and adds
_tf_deprecated_api_names to that symbol.
_tf_deprecated_api_names would be set to a list of deprecated
endpoint names for the symbol.
"""
def deprecated_wrapper(func):
# pylint: disable=protected-access
if '_tf_deprecated_api_names' in func.__dict__:
raise DeprecatedNamesAlreadySet(
'Cannot set deprecated names for %s to %s. '
'Deprecated names are already set to %s.' % (
func.__name__, str(args), str(func._tf_deprecated_api_names)))
func._tf_deprecated_api_names = args
# pylint: disable=protected-access
return func
return deprecated_wrapper
def deprecated(date, instructions, warn_once=True):
"""Decorator for marking functions or methods deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called. It has the following format:
<function> (from <module>) is deprecated and will be removed after <date>.
Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated)' is appended
to the first line of the docstring and a deprecation notice is prepended
to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
warn_once: Boolean. Set to `True` to warn only the first time the decorated
function is called. Otherwise, every call will log a warning.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
def deprecated_wrapper(func):
"""Deprecation wrapper."""
decorator_utils.validate_callable(func, 'deprecated')
@functools.wraps(func)
def new_func(*args, **kwargs): # pylint: disable=missing-docstring
if _PRINT_DEPRECATION_WARNINGS:
if func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[func] = True
logging.warning(
'From %s: %s (from %s) is deprecated and will be removed %s.\n'
'Instructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
return tf_decorator.make_decorator(
func, new_func, 'deprecated',
_add_deprecated_function_notice_to_docstring(func.__doc__, date,
instructions))
return deprecated_wrapper
DeprecatedArgSpec = collections.namedtuple(
'DeprecatedArgSpec', ['position', 'has_ok_value', 'ok_value'])
def deprecated_args(date, instructions, *deprecated_arg_names_or_tuples,
**kwargs):
"""Decorator for marking specific function arguments as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument. It has the following format:
Calling <function> (from <module>) with <arg> is deprecated and will be
removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> includes the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
*deprecated_arg_names_or_tuples: String or 2-Tuple(String,
[ok_vals]). The string is the deprecated argument name.
Optionally, an ok-value may be provided. If the user provided
argument equals this value, the warning is suppressed.
**kwargs: If `warn_once=False` is passed, every call with a deprecated
argument will log a warning. The default behavior is to only warn the
first time the function is called with any given deprecated argument.
All other kwargs raise `ValueError`.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, instructions are
empty, the deprecated arguments are not present in the function
signature, the second element of a deprecated_tuple is not a
list, or if a kwarg other than `warn_once` is passed.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_arg_names_or_tuples:
raise ValueError('Specify which argument is deprecated.')
if kwargs and list(kwargs.keys()) != ['warn_once']:
kwargs.pop('warn_once', None)
raise ValueError('Illegal argument to deprecated_args: %s' % kwargs)
warn_once = kwargs.get('warn_once', True)
def _get_arg_names_to_ok_vals():
"""Returns a dict mapping arg_name to DeprecatedArgSpec w/o position."""
d = {}
for name_or_tuple in deprecated_arg_names_or_tuples:
if isinstance(name_or_tuple, tuple):
d[name_or_tuple[0]] = DeprecatedArgSpec(-1, True, name_or_tuple[1])
else:
d[name_or_tuple] = DeprecatedArgSpec(-1, False, None)
return d
def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec):
"""Builds a dictionary from deprecated arguments to their spec.
Returned dict is keyed by argument name.
Each value is a DeprecatedArgSpec with the following fields:
position: The zero-based argument position of the argument
within the signature. None if the argument isn't found in
the signature.
ok_values: Values of this argument for which warning will be
suppressed.
Args:
names_to_ok_vals: dict from string arg_name to a list of values,
possibly empty, which should not elicit a warning.
arg_spec: Output from tf_inspect.getfullargspec on the called function.
Returns:
Dictionary from arg_name to DeprecatedArgSpec.
"""
arg_name_to_pos = {
name: pos for pos, name in enumerate(arg_spec.args)}
deprecated_positional_args = {}
for arg_name, spec in iter(names_to_ok_vals.items()):
if arg_name in arg_name_to_pos:
pos = arg_name_to_pos[arg_name]
deprecated_positional_args[arg_name] = DeprecatedArgSpec(
pos, spec.has_ok_value, spec.ok_value)
return deprecated_positional_args
deprecated_arg_names = _get_arg_names_to_ok_vals()
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_args')
arg_spec = tf_inspect.getfullargspec(func)
deprecated_positions = _get_deprecated_positional_arguments(
deprecated_arg_names, arg_spec)
is_varargs_deprecated = arg_spec.varargs in deprecated_arg_names
is_kwargs_deprecated = arg_spec.varkw in deprecated_arg_names
if (len(deprecated_positions) + is_varargs_deprecated + is_kwargs_deprecated
!= len(deprecated_arg_names_or_tuples)):
known_args = arg_spec.args + [arg_spec.varargs, arg_spec.varkw]
missing_args = [arg_name for arg_name in deprecated_arg_names
if arg_name not in known_args]
raise ValueError('The following deprecated arguments are not present '
'in the function signature: %s. '
'Found next arguments: %s.' % (missing_args, known_args))
def _same_value(a, b):
"""A comparison operation that works for multiple object types.
Returns True for two empty lists, two numeric values with the
same value, etc.
Returns False for (pd.DataFrame, None), and other pairs which
should not be considered equivalent.
Args:
a: value one of the comparison.
b: value two of the comparison.
Returns:
A boolean indicating whether the two inputs are the same value
for the purposes of deprecation.
"""
if a is b:
return True
try:
equality = a == b
if isinstance(equality, bool):
return equality
except TypeError:
return False
return False
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
# TODO(apassos) figure out a way to have reasonable performance with
# deprecation warnings and eager mode.
if is_in_graph_mode.IS_IN_GRAPH_MODE() and _PRINT_DEPRECATION_WARNINGS:
invalid_args = []
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, spec in iter(deprecated_positions.items()):
if (spec.position < len(args) and
not (spec.has_ok_value and
_same_value(named_args[arg_name], spec.ok_value))):
invalid_args.append(arg_name)
if is_varargs_deprecated and len(args) > len(arg_spec.args):
invalid_args.append(arg_spec.varargs)
if is_kwargs_deprecated and kwargs:
invalid_args.append(arg_spec.varkw)
for arg_name in deprecated_arg_names:
if (arg_name in kwargs and
not (deprecated_positions[arg_name].has_ok_value and
_same_value(named_args[arg_name],
deprecated_positions[arg_name].ok_value))):
invalid_args.append(arg_name)
for arg_name in invalid_args:
if (func, arg_name) not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[(func, arg_name)] = True
logging.warning(
'From %s: calling %s (from %s) with %s is deprecated and will '
'be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
doc = _add_deprecated_arg_notice_to_docstring(
func.__doc__, date, instructions, sorted(deprecated_arg_names.keys()))
return tf_decorator.make_decorator(func, new_func, 'deprecated', doc)
return deprecated_wrapper
def deprecated_arg_values(date, instructions, warn_once=True,
**deprecated_kwargs):
"""Decorator for marking specific function argument values as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument values. It has the following format:
Calling <function> (from <module>) with <arg>=<value> is deprecated and
will be removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None
instructions: String. Instructions on how to update code using the
deprecated function.
warn_once: If `True`, warn only the first time this function is called with
deprecated argument values. Otherwise, every call (with a deprecated
argument value) will log a warning.
**deprecated_kwargs: The deprecated argument values.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_kwargs:
raise ValueError('Specify which argument values are deprecated.')
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_arg_values')
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
if _PRINT_DEPRECATION_WARNINGS:
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, arg_value in deprecated_kwargs.items():
if arg_name in named_args and named_args[arg_name] == arg_value:
if (func, arg_name) not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[(func, arg_name)] = True
logging.warning(
'From %s: calling %s (from %s) with %s=%s is deprecated and '
'will be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name, arg_value, 'in a future version'
if date is None else ('after %s' % date), instructions)
return func(*args, **kwargs)
doc = _add_deprecated_arg_value_notice_to_docstring(
func.__doc__, date, instructions, deprecated_kwargs)
return tf_decorator.make_decorator(func, new_func, 'deprecated', doc)
return deprecated_wrapper
def deprecated_argument_lookup(new_name, new_value, old_name, old_value):
"""Looks up deprecated argument name and ensures both are not used.
Args:
new_name: new name of argument
new_value: value of new argument (or None if not used)
old_name: old name of argument
old_value: value of old argument (or None if not used)
Returns:
The effective argument that should be used.
Raises:
ValueError: if new_value and old_value are both non-null
"""
if old_value is not None:
if new_value is not None:
raise ValueError("Cannot specify both '%s' and '%s'" %
(old_name, new_name))
return old_value
return new_value
def rewrite_argument_docstring(old_doc, old_argument, new_argument):
return old_doc.replace('`%s`' % old_argument, '`%s`' % new_argument).replace(
'%s:' % old_argument, '%s:' % new_argument)
@tf_contextlib.contextmanager
def silence():
"""Temporarily silence deprecation warnings."""
global _PRINT_DEPRECATION_WARNINGS
print_deprecation_warnings = _PRINT_DEPRECATION_WARNINGS
_PRINT_DEPRECATION_WARNINGS = False
yield
_PRINT_DEPRECATION_WARNINGS = print_deprecation_warnings
class HiddenTfApiAttribute(property):
"""Hides a class attribute from the public API.
Attributes in public classes can be hidden from the API by having an '_' in
front of the name (e.g. ClassName._variables). This doesn't work when
attributes or methods are inherited from a parent class. To hide inherited
attributes, set their values to be `deprecation.hide_attribute_from_api`.
For example, this is used in V2 Estimator to hide the deprecated
export_savedmodel method:
class EstimatorV2(Estimator):
export_savedmodel = deprecation.hide_attribute_from_api('...')
"""
def __init__(self, deprecation_message):
def raise_error(unused_self):
raise AttributeError(deprecation_message)
super(HiddenTfApiAttribute, self).__init__(raise_error)
hide_attribute_from_api = HiddenTfApiAttribute # pylint: disable=invalid-name
# TODO(kathywu): Remove once cl/246395236 is submitted.
HIDDEN_ATTRIBUTE = HiddenTfApiAttribute('This attribute has been deprecated.')
|
tensorflow-master
|
tensorflow/python/util/deprecation.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ensure compatibility with future tensorflow versions.
This ensures that your code will be minimally impacted by future tensorflow
API changes. Import the module to prevent accidental usage of stale APIs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
delattr(tf, 'arg_max')
delattr(tf, 'arg_min')
delattr(tf, 'create_partitioned_variables')
delattr(tf, 'deserialize_many_sparse')
delattr(tf, 'lin_space')
delattr(tf, 'parse_single_sequence_example')
delattr(tf, 'serialize_many_sparse')
delattr(tf, 'serialize_sparse')
delattr(tf, 'sparse_matmul') # Use tf.matmul instead.
|
tensorflow-master
|
tensorflow/python/util/future_api.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Type-based dispatch for TensorFlow ops.
"Operation dispatchers" can be used to override the behavior for TensorFlow ops
when they are called with otherwise unsupported argument types. In particular,
when an operation is called with arguments that would cause it to raise a
TypeError, it falls back on its registered operation dispatchers. If any
registered dispatchers can handle the arguments, then its result is returned.
Otherwise, the original TypeError is raised.
By default, dispatch support is added to the generated op wrappers for any
visible ops by default. Ops that are implemented in Python can opt in to
dispatch support using the `add_dispatch_support` decorator.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
# Private function attribute used to store a list of dispatchers.
DISPATCH_ATTR = "_tf_dispatchers"
class OpDispatcher(object):
"""Abstract base class for TensorFlow operator dispatchers.
Each operation dispatcher acts as an override handler for a single
TensorFlow operation, and its results are used when the handler indicates
that it can handle the operation's arguments (by returning any value other
than `OpDispatcher.NOT_SUPPORTED`).
"""
# Sentinel value that can be returned to indicate that an operation
# dispatcher does not support a given set of arguments.
NOT_SUPPORTED = object()
def handle(self, args, kwargs): # pylint: disable=unused-argument
"""Handle this dispatcher's operation with the specified arguments.
If this operation dispatcher can handle the given arguments, then
return an appropriate value (or raise an appropriate exception).
Args:
args: The arguments to the operation.
kwargs: They keyword arguments to the operation.
Returns:
The result of the operation, or `OpDispatcher.NOT_SUPPORTED` if this
dispatcher can not handle the given arguments.
"""
return self.NOT_SUPPORTED
def register(self, op):
"""Register this dispatcher as a handler for `op`.
Args:
op: Python function: the TensorFlow operation that should be handled. Must
have a dispatch list (which is added automatically for generated ops,
and can be added to Python ops using the `add_dispatch_support`
decorator).
"""
if not hasattr(op, DISPATCH_ATTR):
raise AssertionError("Dispatching not enabled for %s" % op)
getattr(op, DISPATCH_ATTR).append(self)
def dispatch(op, *args, **kwargs):
"""Returns the result from the first successful dispatcher for a given op.
Calls the `handle` method of each `OpDispatcher` that has been registered
to handle `op`, and returns the value from the first successful handler.
Args:
op: Python function: the operation to dispatch for.
*args: The arguments to the operation.
**kwargs: They keyword arguments to the operation.
Returns:
The result of the operation, or `NOT_SUPPORTED` if no registered
dispatcher can handle the given arguments.
"""
for dispatcher in getattr(op, DISPATCH_ATTR):
result = dispatcher.handle(args, kwargs)
if result is not OpDispatcher.NOT_SUPPORTED:
return result
return OpDispatcher.NOT_SUPPORTED
class _TypeBasedDispatcher(OpDispatcher):
"""Dispatcher that handles op if any arguments have a specified type.
Checks the types of the arguments and keyword arguments (including elements
of lists or tuples), and if any argument values have the indicated type(s),
then delegates to an override function.
"""
def __init__(self, override_func, types):
self._types = types
self._override_func = override_func
def _handles(self, args, kwargs):
for arg in itertools.chain(args, kwargs.values()):
if (isinstance(arg, self._types) or
(isinstance(arg, (list, tuple)) and
any(isinstance(elt, self._types) for elt in arg))):
return True
return False
def handle(self, args, kwargs):
if self._handles(args, kwargs):
return self._override_func(*args, **kwargs)
else:
return self.NOT_SUPPORTED
# pylint: disable=g-doc-return-or-yield
def dispatch_for_types(op, *types):
"""Decorator to declare that a Python function overrides an op for a type.
The decorated function is used to override `op` if any of the arguments or
keyword arguments (including elements of lists or tuples) have one of the
specified types.
Example:
```python
@dispatch_for_types(math_ops.add, RaggedTensor, RaggedTensorValue)
def ragged_add(x, y, name=None): ...
```
Args:
op: Python function: the operation that should be overridden.
*types: The argument types for which this function should be used.
"""
def decorator(func):
if tf_inspect.getargspec(func) != tf_inspect.getargspec(op):
raise AssertionError("The decorated function's signature must exactly "
"match the signature of the overridden op.")
_TypeBasedDispatcher(func, types).register(op)
return func
return decorator
# pylint: enable=g-doc-return-or-yield
def add_dispatch_list(target):
"""Decorator that adds a dispatch_list attribute to an op."""
if hasattr(target, DISPATCH_ATTR):
raise AssertionError("%s already has a dispatch list" % target)
setattr(target, DISPATCH_ATTR, [])
return target
def add_dispatch_support(target):
"""Decorator that adds a dispatch handling wrapper to an op."""
def wrapper(*args, **kwargs):
"""Call target, and fall back on dispatchers if there is a TypeError."""
try:
return target(*args, **kwargs)
except (TypeError, ValueError):
# Note: convert_to_eager_tensor currently raises a ValueError, not a
# TypeError, when given unexpected types. So we need to catch both.
result = dispatch(wrapper, *args, **kwargs)
if result is not OpDispatcher.NOT_SUPPORTED:
return result
else:
raise
add_dispatch_list(wrapper)
return tf_decorator.make_decorator(target, wrapper)
|
tensorflow-master
|
tensorflow/python/util/dispatch.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFDecorator-aware replacements for the contextlib module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib as _contextlib
from tensorflow.python.util import tf_decorator
def contextmanager(target):
"""A tf_decorator-aware wrapper for `contextlib.contextmanager`.
Usage is identical to `contextlib.contextmanager`.
Args:
target: A callable to be wrapped in a contextmanager.
Returns:
A callable that can be used inside of a `with` statement.
"""
context_manager = _contextlib.contextmanager(target)
return tf_decorator.make_decorator(target, context_manager, 'contextmanager')
|
tensorflow-master
|
tensorflow/python/util/tf_contextlib.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator related util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.platform import test
from tensorflow.python.util import function_utils
def silly_example_function():
pass
class SillyCallableClass(object):
def __call__(self):
pass
class FnArgsTest(test.TestCase):
def test_simple_function(self):
def fn(a, b):
return a + b
self.assertEqual(('a', 'b'), function_utils.fn_args(fn))
def test_callable(self):
class Foo(object):
def __call__(self, a, b):
return a + b
self.assertEqual(('a', 'b'), function_utils.fn_args(Foo()))
def test_bound_method(self):
class Foo(object):
def bar(self, a, b):
return a + b
self.assertEqual(('a', 'b'), function_utils.fn_args(Foo().bar))
def test_bound_method_no_self(self):
class Foo(object):
def bar(*args): # pylint:disable=no-method-argument
return args[1] + args[2]
self.assertEqual((), function_utils.fn_args(Foo().bar))
def test_partial_function(self):
expected_test_arg = 123
def fn(a, test_arg):
if test_arg != expected_test_arg:
return ValueError('partial fn does not work correctly')
return a
wrapped_fn = functools.partial(fn, test_arg=123)
self.assertEqual(('a',), function_utils.fn_args(wrapped_fn))
def test_partial_function_with_positional_args(self):
expected_test_arg = 123
def fn(test_arg, a):
if test_arg != expected_test_arg:
return ValueError('partial fn does not work correctly')
return a
wrapped_fn = functools.partial(fn, 123)
self.assertEqual(('a',), function_utils.fn_args(wrapped_fn))
self.assertEqual(3, wrapped_fn(3))
self.assertEqual(3, wrapped_fn(a=3))
def test_double_partial(self):
expected_test_arg1 = 123
expected_test_arg2 = 456
def fn(a, test_arg1, test_arg2):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial does not work correctly')
return a
wrapped_fn = functools.partial(fn, test_arg2=456)
double_wrapped_fn = functools.partial(wrapped_fn, test_arg1=123)
self.assertEqual(('a',), function_utils.fn_args(double_wrapped_fn))
def test_double_partial_with_positional_args_in_outer_layer(self):
expected_test_arg1 = 123
expected_test_arg2 = 456
def fn(test_arg1, a, test_arg2):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial fn does not work correctly')
return a
wrapped_fn = functools.partial(fn, test_arg2=456)
double_wrapped_fn = functools.partial(wrapped_fn, 123)
self.assertEqual(('a',), function_utils.fn_args(double_wrapped_fn))
self.assertEqual(3, double_wrapped_fn(3))
self.assertEqual(3, double_wrapped_fn(a=3))
def test_double_partial_with_positional_args_in_both_layers(self):
expected_test_arg1 = 123
expected_test_arg2 = 456
def fn(test_arg1, test_arg2, a):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial fn does not work correctly')
return a
wrapped_fn = functools.partial(fn, 123) # binds to test_arg1
double_wrapped_fn = functools.partial(wrapped_fn, 456) # binds to test_arg2
self.assertEqual(('a',), function_utils.fn_args(double_wrapped_fn))
self.assertEqual(3, double_wrapped_fn(3))
self.assertEqual(3, double_wrapped_fn(a=3))
class HasKwargsTest(test.TestCase):
def test_simple_function(self):
fn_has_kwargs = lambda **x: x
self.assertTrue(function_utils.has_kwargs(fn_has_kwargs))
fn_has_no_kwargs = lambda x: x
self.assertFalse(function_utils.has_kwargs(fn_has_no_kwargs))
def test_callable(self):
class FooHasKwargs(object):
def __call__(self, **x):
del x
self.assertTrue(function_utils.has_kwargs(FooHasKwargs()))
class FooHasNoKwargs(object):
def __call__(self, x):
del x
self.assertFalse(function_utils.has_kwargs(FooHasNoKwargs()))
def test_bound_method(self):
class FooHasKwargs(object):
def fn(self, **x):
del x
self.assertTrue(function_utils.has_kwargs(FooHasKwargs().fn))
class FooHasNoKwargs(object):
def fn(self, x):
del x
self.assertFalse(function_utils.has_kwargs(FooHasNoKwargs().fn))
def test_partial_function(self):
expected_test_arg = 123
def fn_has_kwargs(test_arg, **x):
if test_arg != expected_test_arg:
return ValueError('partial fn does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_kwargs, test_arg=123)
self.assertTrue(function_utils.has_kwargs(wrapped_fn))
some_kwargs = dict(x=1, y=2, z=3)
self.assertEqual(wrapped_fn(**some_kwargs), some_kwargs)
def fn_has_no_kwargs(x, test_arg):
if test_arg != expected_test_arg:
return ValueError('partial fn does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_no_kwargs, test_arg=123)
self.assertFalse(function_utils.has_kwargs(wrapped_fn))
some_arg = 1
self.assertEqual(wrapped_fn(some_arg), some_arg)
def test_double_partial(self):
expected_test_arg1 = 123
expected_test_arg2 = 456
def fn_has_kwargs(test_arg1, test_arg2, **x):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_kwargs, test_arg2=456)
double_wrapped_fn = functools.partial(wrapped_fn, test_arg1=123)
self.assertTrue(function_utils.has_kwargs(double_wrapped_fn))
some_kwargs = dict(x=1, y=2, z=3)
self.assertEqual(double_wrapped_fn(**some_kwargs), some_kwargs)
def fn_has_no_kwargs(x, test_arg1, test_arg2):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_no_kwargs, test_arg2=456)
double_wrapped_fn = functools.partial(wrapped_fn, test_arg1=123)
self.assertFalse(function_utils.has_kwargs(double_wrapped_fn))
some_arg = 1
self.assertEqual(double_wrapped_fn(some_arg), some_arg)
def test_raises_type_error(self):
with self.assertRaisesRegexp(
TypeError, 'fn should be a function-like object'):
function_utils.has_kwargs('not a function')
class GetFuncNameTest(test.TestCase):
def testWithSimpleFunction(self):
self.assertEqual(
'silly_example_function',
function_utils.get_func_name(silly_example_function))
def testWithClassMethod(self):
self.assertEqual(
'GetFuncNameTest.testWithClassMethod',
function_utils.get_func_name(self.testWithClassMethod))
def testWithCallableClass(self):
callable_instance = SillyCallableClass()
self.assertRegexpMatches(
function_utils.get_func_name(callable_instance),
'<.*SillyCallableClass.*>')
def testWithFunctoolsPartial(self):
partial = functools.partial(silly_example_function)
self.assertRegexpMatches(
function_utils.get_func_name(partial),
'<.*functools.partial.*>')
def testWithLambda(self):
anon_fn = lambda x: x
self.assertEqual('<lambda>', function_utils.get_func_name(anon_fn))
def testRaisesWithNonCallableObject(self):
with self.assertRaises(ValueError):
function_utils.get_func_name(None)
class GetFuncCodeTest(test.TestCase):
def testWithSimpleFunction(self):
code = function_utils.get_func_code(silly_example_function)
self.assertIsNotNone(code)
self.assertRegexpMatches(code.co_filename, 'function_utils_test.py')
def testWithClassMethod(self):
code = function_utils.get_func_code(self.testWithClassMethod)
self.assertIsNotNone(code)
self.assertRegexpMatches(code.co_filename, 'function_utils_test.py')
def testWithCallableClass(self):
callable_instance = SillyCallableClass()
code = function_utils.get_func_code(callable_instance)
self.assertIsNotNone(code)
self.assertRegexpMatches(code.co_filename, 'function_utils_test.py')
def testWithLambda(self):
anon_fn = lambda x: x
code = function_utils.get_func_code(anon_fn)
self.assertIsNotNone(code)
self.assertRegexpMatches(code.co_filename, 'function_utils_test.py')
def testWithFunctoolsPartial(self):
partial = functools.partial(silly_example_function)
code = function_utils.get_func_code(partial)
self.assertIsNone(code)
def testRaisesWithNonCallableObject(self):
with self.assertRaises(ValueError):
function_utils.get_func_code(None)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/util/function_utils_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Locking related utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
class GroupLock(object):
"""A lock to allow many members of a group to access a resource exclusively.
This lock provides a way to allow access to a resource by multiple threads
belonging to a logical group at the same time, while restricting access to
threads from all other groups. You can think of this as an extension of a
reader-writer lock, where you allow multiple writers at the same time. We
made it generic to support multiple groups instead of just two - readers and
writers.
Simple usage example with two groups accessing the same resource:
```python
lock = GroupLock(num_groups=2)
# In a member of group 0:
with lock.group(0):
# do stuff, access the resource
# ...
# In a member of group 1:
with lock.group(1):
# do stuff, access the resource
# ...
```
Using as a context manager with `.group(group_id)` is the easiest way. You
can also use the `acquire` and `release` method directly.
"""
def __init__(self, num_groups=2):
"""Initialize a group lock.
Args:
num_groups: The number of groups that will be accessing the resource under
consideration. Should be a positive number.
Returns:
A group lock that can then be used to synchronize code.
Raises:
ValueError: If num_groups is less than 1.
"""
if num_groups < 1:
raise ValueError("num_groups must be a positive integer, got {}".format(
num_groups))
self._ready = threading.Condition(threading.Lock())
self._num_groups = num_groups
self._group_member_counts = [0] * self._num_groups
def group(self, group_id):
"""Enter a context where the lock is with group `group_id`.
Args:
group_id: The group for which to acquire and release the lock.
Returns:
A context manager which will acquire the lock for `group_id`.
"""
self._validate_group_id(group_id)
return self._Context(self, group_id)
def acquire(self, group_id):
"""Acquire the group lock for a specific group `group_id`."""
self._validate_group_id(group_id)
self._ready.acquire()
while self._another_group_active(group_id):
self._ready.wait()
self._group_member_counts[group_id] += 1
self._ready.release()
def release(self, group_id):
"""Release the group lock for a specific group `group_id`."""
self._validate_group_id(group_id)
self._ready.acquire()
self._group_member_counts[group_id] -= 1
if self._group_member_counts[group_id] == 0:
self._ready.notifyAll()
self._ready.release()
def _another_group_active(self, group_id):
return any(
c > 0 for g, c in enumerate(self._group_member_counts) if g != group_id)
def _validate_group_id(self, group_id):
if group_id < 0 or group_id >= self._num_groups:
raise ValueError(
"group_id={} should be between 0 and num_groups={}".format(
group_id, self._num_groups))
class _Context(object):
"""Context manager helper for `GroupLock`."""
def __init__(self, lock, group_id):
self._lock = lock
self._group_id = group_id
def __enter__(self):
self._lock.acquire(self._group_id)
def __exit__(self, type_arg, value_arg, traceback_arg):
del type_arg, value_arg, traceback_arg
self._lock.release(self._group_id)
|
tensorflow-master
|
tensorflow/python/util/lock_util.py
|
tensorflow-master
|
tensorflow/python/util/__init__.py
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deprecation tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_inspect
class DeprecatedAliasTest(test.TestCase):
@test.mock.patch.object(logging, "warning", autospec=True)
def test_function_alias(self, mock_warning):
deprecated_func = deprecation.deprecated_alias("deprecated.func",
"real.func",
logging.error)
logging.error("fake error logged")
self.assertEqual(0, mock_warning.call_count)
deprecated_func("FAKE ERROR!")
self.assertEqual(1, mock_warning.call_count)
# Make sure the error points to the right file.
self.assertRegexpMatches(mock_warning.call_args[0][1],
r"deprecation_test\.py:")
deprecated_func("ANOTHER FAKE ERROR!")
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def test_class_alias(self, mock_warning):
class MyClass(object):
"""My docstring."""
init_args = []
def __init__(self, arg):
MyClass.init_args.append(arg)
deprecated_cls = deprecation.deprecated_alias("deprecated.cls",
"real.cls",
MyClass)
print(deprecated_cls.__name__)
print(deprecated_cls.__module__)
print(deprecated_cls.__doc__)
MyClass("test")
self.assertEqual(0, mock_warning.call_count)
deprecated_cls("deprecated")
self.assertEqual(1, mock_warning.call_count)
# Make sure the error points to the right file.
self.assertRegexpMatches(mock_warning.call_args[0][1],
r"deprecation_test\.py:")
deprecated_cls("deprecated again")
self.assertEqual(1, mock_warning.call_count)
self.assertEqual(["test", "deprecated", "deprecated again"],
MyClass.init_args)
# Check __init__ signature matches for doc generation.
self.assertEqual(
tf_inspect.getfullargspec(MyClass.__init__),
tf_inspect.getfullargspec(deprecated_cls.__init__))
class DeprecationTest(test.TestCase):
@test.mock.patch.object(logging, "warning", autospec=True)
def test_deprecated_once(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions, warn_once=True)
def _fn():
pass
_fn()
self.assertEqual(1, mock_warning.call_count)
_fn()
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def test_silence(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions, warn_once=False)
def _fn():
pass
_fn()
self.assertEqual(1, mock_warning.call_count)
with deprecation.silence():
_fn()
self.assertEqual(1, mock_warning.call_count)
_fn()
self.assertEqual(2, mock_warning.call_count)
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated("", instructions)
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated("07-04-2016", instructions)
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated(date, None)
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated(date, "")
@test.mock.patch.object(logging, "warning", autospec=True)
def test_no_date(self, mock_warning):
date = None
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nWarning: THIS FUNCTION IS DEPRECATED. "
"It will be removed in a future version."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args." % instructions, _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(
args[0], r"deprecated and will be removed")
self._assert_subset(set(["in a future version", instructions]),
set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
@test_util.run_deprecated_v1
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nWarning: THIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions), _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
@test_util.run_deprecated_v1
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc."""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nWarning: THIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
@test_util.run_deprecated_v1
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nWarning: THIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions), _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nWarning: THIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions),
getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
"""fn doc."""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nWarning: THIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions),
getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nWarning: THIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions),
getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
def test_prop_wrong_order(self):
with self.assertRaisesRegexp(
ValueError,
"make sure @property appears before @deprecated in your source code"):
# pylint: disable=unused-variable
class _Object(object):
def __init(self):
pass
@deprecation.deprecated("2016-07-04", "Instructions.")
@property
def _prop(self):
return "prop_wrong_order"
@test.mock.patch.object(logging, "warning", autospec=True)
def test_prop_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@property
@deprecation.deprecated(date, instructions)
def _prop(self):
"""prop doc.
Returns:
String.
"""
return "prop_with_doc"
# Assert function docs are properly updated.
self.assertEqual(
"prop doc. (deprecated)"
"\n"
"\nWarning: THIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s"
"\n"
"\nReturns:"
"\n String." % (date, instructions),
getattr(_Object, "_prop").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual("prop_with_doc", _Object()._prop)
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_prop_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@property
@deprecation.deprecated(date, instructions)
def _prop(self):
return "prop_no_doc"
# Assert function docs are properly updated.
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nWarning: THIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions),
getattr(_Object, "_prop").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual("prop_no_doc", _Object()._prop)
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
class DeprecatedArgsTest(test.TestCase):
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_args("", instructions, "deprecated")
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_args("07-04-2016", instructions, "deprecated")
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_args(date, None, "deprecated")
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_args(date, "", "deprecated")
with self.assertRaisesRegexp(ValueError, "argument"):
deprecation.deprecated_args(date, instructions)
def test_deprecated_missing_args(self):
date = "2016-07-04"
instructions = "This is how you update..."
def _fn(arg0, arg1, deprecated=None):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert calls without the deprecated argument log nothing.
with self.assertRaisesRegexp(ValueError, "not present.*\\['missing'\\]"):
deprecation.deprecated_args(date, instructions, "missing")(_fn)
@test.mock.patch.object(logging, "warning", autospec=True)
@test_util.run_deprecated_v1
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, deprecated=True):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
deprecated: Deprecated!
Returns:
Sum of args.
"""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nWarning: SOME ARGUMENTS ARE DEPRECATED: `(deprecated)`. "
"They will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n deprecated: Deprecated!"
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions), _fn.__doc__)
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
@test_util.run_deprecated_v1
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, deprecated=True):
"""fn doc."""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nWarning: SOME ARGUMENTS ARE DEPRECATED: `(deprecated)`. "
"They will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__)
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
@test_util.run_deprecated_v1
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, deprecated=True):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION ARGUMENTS"
"\n"
"\nWarning: SOME ARGUMENTS ARE DEPRECATED: `(deprecated)`. "
"They will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions), _fn.__doc__)
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
@test_util.run_deprecated_v1
def test_varargs(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, *deprecated):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True, False))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
@test_util.run_deprecated_v1
def test_kwargs(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, **deprecated):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, a=True, b=False))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
@test_util.run_deprecated_v1
def test_positional_and_named(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "d1", "d2")
def _fn(arg0, d1=None, arg1=2, d2=None):
return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1
# Assert calls without the deprecated arguments log nothing.
self.assertEqual(2, _fn(1, arg1=2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated arguments log warnings.
self.assertEqual(2, _fn(1, None, 2, d2=False))
self.assertEqual(2, mock_warning.call_count)
(args1, _) = mock_warning.call_args_list[0]
self.assertRegexpMatches(args1[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions, "d1"]),
set(args1[1:]))
(args2, _) = mock_warning.call_args_list[1]
self.assertRegexpMatches(args2[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions, "d2"]),
set(args2[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
@test_util.run_deprecated_v1
def test_positional_and_named_with_ok_vals(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, ("d1", None),
("d2", "my_ok_val"))
def _fn(arg0, d1=None, arg1=2, d2=None):
return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1
# Assert calls without the deprecated arguments log nothing.
self.assertEqual(2, _fn(1, arg1=2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated arguments log warnings.
self.assertEqual(2, _fn(1, False, 2, d2=False))
self.assertEqual(2, mock_warning.call_count)
(args1, _) = mock_warning.call_args_list[0]
self.assertRegexpMatches(args1[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions, "d1"]),
set(args1[1:]))
(args2, _) = mock_warning.call_args_list[1]
self.assertRegexpMatches(args2[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions, "d2"]),
set(args2[1:]))
# Assert calls with the deprecated arguments don't log warnings if
# the value matches the 'ok_val'.
mock_warning.reset_mock()
self.assertEqual(3, _fn(1, None, 2, d2="my_ok_val"))
self.assertEqual(0, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
@test_util.run_deprecated_v1
def test_deprecated_args_once(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "arg", warn_once=True)
def _fn(arg=0): # pylint: disable=unused-argument
pass
_fn()
self.assertEqual(0, mock_warning.call_count)
_fn(arg=0)
self.assertEqual(1, mock_warning.call_count)
_fn(arg=1)
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
@test_util.run_deprecated_v1
def test_deprecated_multiple_args_once_each(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "arg0", "arg1",
warn_once=True)
def _fn(arg0=0, arg1=0): # pylint: disable=unused-argument
pass
_fn(arg0=0)
self.assertEqual(1, mock_warning.call_count)
_fn(arg0=0)
self.assertEqual(1, mock_warning.call_count)
_fn(arg1=0)
self.assertEqual(2, mock_warning.call_count)
_fn(arg0=0)
self.assertEqual(2, mock_warning.call_count)
_fn(arg1=0)
self.assertEqual(2, mock_warning.call_count)
class DeprecatedArgValuesTest(test.TestCase):
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_arg_values("", instructions, deprecated=True)
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_arg_values(
"07-04-2016", instructions, deprecated=True)
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_arg_values(date, None, deprecated=True)
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_arg_values(date, "", deprecated=True)
with self.assertRaisesRegexp(ValueError, "argument", deprecated=True):
deprecation.deprecated_arg_values(date, instructions)
@test.mock.patch.object(logging, "warning", autospec=True)
@test_util.run_deprecated_v1
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, warn_once=False,
deprecated=True)
def _fn(arg0, arg1, deprecated=True):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
deprecated: Deprecated!
Returns:
Sum of args.
"""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated argument values)"
"\n"
"\nWarning: SOME ARGUMENT VALUES ARE DEPRECATED: `(deprecated=True)`. "
"They will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n deprecated: Deprecated!"
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions), _fn.__doc__)
# Assert calling new fn with non-deprecated value logs nothing.
self.assertEqual(3, _fn(1, 2, deprecated=False))
self.assertEqual(0, mock_warning.call_count)
# Assert calling new fn with deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2, deprecated=True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
# Assert calling new fn with default deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(2, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
@test_util.run_deprecated_v1
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, warn_once=False,
deprecated=True)
def _fn(arg0, arg1, deprecated=True):
"""fn doc."""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated argument values)"
"\n"
"\nWarning: SOME ARGUMENT VALUES ARE DEPRECATED: `(deprecated=True)`. "
"They will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__)
# Assert calling new fn with non-deprecated value logs nothing.
self.assertEqual(3, _fn(1, 2, deprecated=False))
self.assertEqual(0, mock_warning.call_count)
# Assert calling new fn with deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2, deprecated=True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
# Assert calling new fn with default deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(2, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
@test_util.run_deprecated_v1
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, warn_once=False,
deprecated=True)
def _fn(arg0, arg1, deprecated=True):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION ARGUMENT VALUES"
"\n"
"\nWarning: SOME ARGUMENT VALUES ARE DEPRECATED: `(deprecated=True)`. "
"They will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions), _fn.__doc__)
# Assert calling new fn with non-deprecated value logs nothing.
self.assertEqual(3, _fn(1, 2, deprecated=False))
self.assertEqual(0, mock_warning.call_count)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2, deprecated=True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
# Assert calling new fn with default deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(2, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def test_deprecated_arg_values_once(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, warn_once=True,
deprecated=True)
def _fn(deprecated): # pylint: disable=unused-argument
pass
_fn(deprecated=False)
self.assertEqual(0, mock_warning.call_count)
_fn(deprecated=True)
self.assertEqual(1, mock_warning.call_count)
_fn(deprecated=True)
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def test_deprecated_multiple_arg_values_once_each(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, warn_once=True,
arg0="forbidden", arg1="disallowed")
def _fn(arg0, arg1): # pylint: disable=unused-argument
pass
_fn(arg0="allowed", arg1="also allowed")
self.assertEqual(0, mock_warning.call_count)
_fn(arg0="forbidden", arg1="disallowed")
self.assertEqual(2, mock_warning.call_count)
_fn(arg0="forbidden", arg1="allowed")
self.assertEqual(2, mock_warning.call_count)
_fn(arg0="forbidden", arg1="disallowed")
self.assertEqual(2, mock_warning.call_count)
class DeprecationArgumentsTest(test.TestCase):
def testDeprecatedArgumentLookup(self):
good_value = 3
self.assertEqual(
deprecation.deprecated_argument_lookup("val_new", good_value, "val_old",
None), good_value)
self.assertEqual(
deprecation.deprecated_argument_lookup("val_new", None, "val_old",
good_value), good_value)
with self.assertRaisesRegexp(ValueError,
"Cannot specify both 'val_old' and 'val_new'"):
self.assertEqual(
deprecation.deprecated_argument_lookup("val_new", good_value,
"val_old", good_value),
good_value)
def testRewriteArgumentDocstring(self):
docs = """Add `a` and `b`
Args:
a: first arg
b: second arg
"""
new_docs = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(docs, "a", "left"), "b", "right")
new_docs_ref = """Add `left` and `right`
Args:
left: first arg
right: second arg
"""
self.assertEqual(new_docs, new_docs_ref)
class DeprecatedEndpointsTest(test.TestCase):
def testSingleDeprecatedEndpoint(self):
@deprecation.deprecated_endpoints("foo1")
def foo():
pass
self.assertEqual(("foo1",), foo._tf_deprecated_api_names)
def testMultipleDeprecatedEndpoint(self):
@deprecation.deprecated_endpoints("foo1", "foo2")
def foo():
pass
self.assertEqual(("foo1", "foo2"), foo._tf_deprecated_api_names)
def testCannotSetDeprecatedEndpointsTwice(self):
with self.assertRaises(deprecation.DeprecatedNamesAlreadySet):
@deprecation.deprecated_endpoints("foo1")
@deprecation.deprecated_endpoints("foo2")
def foo(): # pylint: disable=unused-variable
pass
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/util/deprecation_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tf_decorator."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def test_tfdecorator(decorator_name, decorator_doc=None):
def make_tf_decorator(target):
return tf_decorator.TFDecorator(decorator_name, target, decorator_doc)
return make_tf_decorator
def test_decorator_increment_first_int_arg(target):
"""This test decorator skips past `self` as args[0] in the bound case."""
def wrapper(*args, **kwargs):
new_args = []
found = False
for arg in args:
if not found and isinstance(arg, int):
new_args.append(arg + 1)
found = True
else:
new_args.append(arg)
return target(*new_args, **kwargs)
return tf_decorator.make_decorator(target, wrapper)
def test_injectable_decorator_square(target):
def wrapper(x):
return wrapper.__wrapped__(x)**2
return tf_decorator.make_decorator(target, wrapper)
def test_injectable_decorator_increment(target):
def wrapper(x):
return wrapper.__wrapped__(x) + 1
return tf_decorator.make_decorator(target, wrapper)
def test_function(x):
"""Test Function Docstring."""
return x + 1
@test_tfdecorator('decorator 1')
@test_decorator_increment_first_int_arg
@test_tfdecorator('decorator 3', 'decorator 3 documentation')
def test_decorated_function(x):
"""Test Decorated Function Docstring."""
return x * 2
@test_injectable_decorator_square
@test_injectable_decorator_increment
def test_rewrappable_decorated(x):
return x * 2
@test_tfdecorator('decorator')
class TestDecoratedClass(object):
"""Test Decorated Class."""
def __init__(self, two_attr=2):
self.two_attr = two_attr
@property
def two_prop(self):
return 2
def two_func(self):
return 2
@test_decorator_increment_first_int_arg
def return_params(self, a, b, c):
"""Return parameters."""
return [a, b, c]
class TfDecoratorTest(test.TestCase):
def testInitCapturesTarget(self):
self.assertIs(test_function,
tf_decorator.TFDecorator('', test_function).decorated_target)
def testInitCapturesDecoratorName(self):
self.assertEqual('decorator name',
tf_decorator.TFDecorator('decorator name',
test_function).decorator_name)
def testInitCapturesDecoratorDoc(self):
self.assertEqual('decorator doc',
tf_decorator.TFDecorator('', test_function,
'decorator doc').decorator_doc)
def testInitCapturesNonNoneArgspec(self):
argspec = tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
self.assertIs(argspec,
tf_decorator.TFDecorator('', test_function, '',
argspec).decorator_argspec)
def testInitSetsDecoratorNameToTargetName(self):
self.assertEqual('test_function',
tf_decorator.TFDecorator('', test_function).__name__)
def testInitSetsDecoratorDocToTargetDoc(self):
self.assertEqual('Test Function Docstring.',
tf_decorator.TFDecorator('', test_function).__doc__)
def testCallingATFDecoratorCallsTheTarget(self):
self.assertEqual(124, tf_decorator.TFDecorator('', test_function)(123))
def testCallingADecoratedFunctionCallsTheTarget(self):
self.assertEqual((2 + 1) * 2, test_decorated_function(2))
def testInitializingDecoratedClassWithInitParamsDoesntRaise(self):
try:
TestDecoratedClass(2)
except TypeError:
self.assertFail()
def testReadingClassAttributeOnDecoratedClass(self):
self.assertEqual(2, TestDecoratedClass().two_attr)
def testCallingClassMethodOnDecoratedClass(self):
self.assertEqual(2, TestDecoratedClass().two_func())
def testReadingClassPropertyOnDecoratedClass(self):
self.assertEqual(2, TestDecoratedClass().two_prop)
def testNameOnBoundProperty(self):
self.assertEqual('return_params',
TestDecoratedClass().return_params.__name__)
def testDocstringOnBoundProperty(self):
self.assertEqual('Return parameters.',
TestDecoratedClass().return_params.__doc__)
def testTarget__get__IsProxied(self):
class Descr(object):
def __get__(self, instance, owner):
return self
class Foo(object):
foo = tf_decorator.TFDecorator('Descr', Descr())
self.assertIsInstance(Foo.foo, Descr)
def test_wrapper(*args, **kwargs):
return test_function(*args, **kwargs)
class TfMakeDecoratorTest(test.TestCase):
def testAttachesATFDecoratorAttr(self):
decorated = tf_decorator.make_decorator(test_function, test_wrapper)
decorator = getattr(decorated, '_tf_decorator')
self.assertIsInstance(decorator, tf_decorator.TFDecorator)
def testAttachesWrappedAttr(self):
decorated = tf_decorator.make_decorator(test_function, test_wrapper)
wrapped_attr = getattr(decorated, '__wrapped__')
self.assertIs(test_function, wrapped_attr)
def testSetsTFDecoratorNameToDecoratorNameArg(self):
decorated = tf_decorator.make_decorator(test_function, test_wrapper,
'test decorator name')
decorator = getattr(decorated, '_tf_decorator')
self.assertEqual('test decorator name', decorator.decorator_name)
def testSetsTFDecoratorDocToDecoratorDocArg(self):
decorated = tf_decorator.make_decorator(
test_function, test_wrapper, decorator_doc='test decorator doc')
decorator = getattr(decorated, '_tf_decorator')
self.assertEqual('test decorator doc', decorator.decorator_doc)
def testUpdatesDictWithMissingEntries(self):
test_function.foobar = True
decorated = tf_decorator.make_decorator(test_function, test_wrapper)
self.assertTrue(decorated.foobar)
del test_function.foobar
def testUpdatesDict_doesNotOverridePresentEntries(self):
test_function.foobar = True
test_wrapper.foobar = False
decorated = tf_decorator.make_decorator(test_function, test_wrapper)
self.assertFalse(decorated.foobar)
del test_function.foobar
del test_wrapper.foobar
def testSetsTFDecoratorArgSpec(self):
argspec = tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
decorated = tf_decorator.make_decorator(test_function, test_wrapper, '', '',
argspec)
decorator = getattr(decorated, '_tf_decorator')
self.assertEqual(argspec, decorator.decorator_argspec)
def testSetsDecoratorNameToFunctionThatCallsMakeDecoratorIfAbsent(self):
def test_decorator_name(wrapper):
return tf_decorator.make_decorator(test_function, wrapper)
decorated = test_decorator_name(test_wrapper)
decorator = getattr(decorated, '_tf_decorator')
self.assertEqual('test_decorator_name', decorator.decorator_name)
def testCompatibleWithNamelessCallables(self):
class Callable(object):
def __call__(self):
pass
callable_object = Callable()
# Smoke test: This should not raise an exception, even though
# `callable_object` does not have a `__name__` attribute.
_ = tf_decorator.make_decorator(callable_object, test_wrapper)
partial = functools.partial(test_function, x=1)
# Smoke test: This should not raise an exception, even though `partial` does
# not have `__name__`, `__module__`, and `__doc__` attributes.
_ = tf_decorator.make_decorator(partial, test_wrapper)
class TfDecoratorRewrapTest(test.TestCase):
def testRewrapMutatesAffectedFunction(self):
def new_target(x):
return x * 3
self.assertEqual((1 * 2 + 1) ** 2, test_rewrappable_decorated(1))
prev_target, _ = tf_decorator.unwrap(test_rewrappable_decorated)
tf_decorator.rewrap(test_rewrappable_decorated, prev_target, new_target)
self.assertEqual((1 * 3 + 1) ** 2, test_rewrappable_decorated(1))
def testRewrapOfDecoratorFunction(self):
def new_target(x):
return x * 3
prev_target = test_rewrappable_decorated._tf_decorator._decorated_target
# In this case, only the outer decorator (test_injectable_decorator_square)
# should be preserved.
tf_decorator.rewrap(test_rewrappable_decorated, prev_target, new_target)
self.assertEqual((1 * 3) ** 2, test_rewrappable_decorated(1))
class TfDecoratorUnwrapTest(test.TestCase):
def testUnwrapReturnsEmptyArrayForUndecoratedFunction(self):
decorators, _ = tf_decorator.unwrap(test_function)
self.assertEqual(0, len(decorators))
def testUnwrapReturnsUndecoratedFunctionAsTarget(self):
_, target = tf_decorator.unwrap(test_function)
self.assertIs(test_function, target)
def testUnwrapReturnsFinalFunctionAsTarget(self):
self.assertEqual((4 + 1) * 2, test_decorated_function(4))
_, target = tf_decorator.unwrap(test_decorated_function)
self.assertTrue(tf_inspect.isfunction(target))
self.assertEqual(4 * 2, target(4))
def testUnwrapReturnsListOfUniqueTFDecorators(self):
decorators, _ = tf_decorator.unwrap(test_decorated_function)
self.assertEqual(3, len(decorators))
self.assertTrue(isinstance(decorators[0], tf_decorator.TFDecorator))
self.assertTrue(isinstance(decorators[1], tf_decorator.TFDecorator))
self.assertTrue(isinstance(decorators[2], tf_decorator.TFDecorator))
self.assertIsNot(decorators[0], decorators[1])
self.assertIsNot(decorators[1], decorators[2])
self.assertIsNot(decorators[2], decorators[0])
def testUnwrapReturnsDecoratorListFromOutermostToInnermost(self):
decorators, _ = tf_decorator.unwrap(test_decorated_function)
self.assertEqual('decorator 1', decorators[0].decorator_name)
self.assertEqual('test_decorator_increment_first_int_arg',
decorators[1].decorator_name)
self.assertEqual('decorator 3', decorators[2].decorator_name)
self.assertEqual('decorator 3 documentation', decorators[2].decorator_doc)
def testUnwrapBoundMethods(self):
test_decorated_class = TestDecoratedClass()
self.assertEqual([2, 2, 3], test_decorated_class.return_params(1, 2, 3))
decorators, target = tf_decorator.unwrap(test_decorated_class.return_params)
self.assertEqual('test_decorator_increment_first_int_arg',
decorators[0].decorator_name)
self.assertEqual([1, 2, 3], target(test_decorated_class, 1, 2, 3))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/util/tf_decorator_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function that tells you if the program is running in graph mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Call IS_IN_GRAPH_MODE() when you want to know whether the thread is in
# graph mode. By default, we always are.
IS_IN_GRAPH_MODE = lambda: True
|
tensorflow-master
|
tensorflow/python/util/is_in_graph_mode.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides wrapper for TensorFlow modules to support deprecation messages.
TODO(annarev): potentially merge with LazyLoader.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import types
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util import tf_stack
from tensorflow.tools.compatibility import all_renames_v2
_PER_MODULE_WARNING_LIMIT = 5
def get_rename_v2(name):
if name not in all_renames_v2.symbol_renames:
return None
return all_renames_v2.symbol_renames[name]
def _call_location():
# We want to get stack frame 2 frames up from current frame,
# i.e. above _getattr__ and _call_location calls.
stack = tf_stack.extract_stack_file_and_line(max_length=3)
if not stack: # should never happen as we're in a function
return 'UNKNOWN'
frame = stack[0]
return '{}:{}'.format(frame.file, frame.line)
def contains_deprecation_decorator(decorators):
return any(
d.decorator_name == 'deprecated' for d in decorators)
def has_deprecation_decorator(symbol):
"""Checks if given object has a deprecation decorator.
We check if deprecation decorator is in decorators as well as
whether symbol is a class whose __init__ method has a deprecation
decorator.
Args:
symbol: Python object.
Returns:
True if symbol has deprecation decorator.
"""
decorators, symbol = tf_decorator.unwrap(symbol)
if contains_deprecation_decorator(decorators):
return True
if tf_inspect.isfunction(symbol):
return False
if not tf_inspect.isclass(symbol):
return False
if not hasattr(symbol, '__init__'):
return False
init_decorators, _ = tf_decorator.unwrap(symbol.__init__)
return contains_deprecation_decorator(init_decorators)
class DeprecationWrapper(types.ModuleType):
"""Wrapper for TensorFlow modules to support deprecation messages."""
def __init__(self, wrapped, module_name): # pylint: disable=super-on-old-class
# Prefix all local attributes with _dw_ so that we can
# handle them differently in attribute access methods.
self._dw_wrapped_module = wrapped
self._dw_module_name = module_name
self._dw_deprecated_printed = set() # names we already printed warning for
self.__file__ = wrapped.__file__
self.__name__ = wrapped.__name__
if hasattr(self._dw_wrapped_module, '__all__'):
self.__all__ = self._dw_wrapped_module.__all__
else:
self.__all__ = dir(self._dw_wrapped_module)
self._dw_warning_count = 0
super(DeprecationWrapper, self).__init__(wrapped.__name__)
def __getattr__(self, name):
if name.startswith('_dw_'):
raise AttributeError('Accessing local variables before they are created.')
attr = getattr(self._dw_wrapped_module, name)
if (self._dw_warning_count < _PER_MODULE_WARNING_LIMIT and
name not in self._dw_deprecated_printed):
if self._dw_module_name:
full_name = 'tf.%s.%s' % (self._dw_module_name, name)
else:
full_name = 'tf.%s' % name
rename = get_rename_v2(full_name)
if rename and not has_deprecation_decorator(attr):
call_location = _call_location()
# skip locations in Python source
if not call_location.startswith('<'):
logging.warning(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), full_name, rename)
self._dw_deprecated_printed.add(name)
self._dw_warning_count += 1
return attr
def __setattr__(self, arg, val): # pylint: disable=super-on-old-class
if arg.startswith('_dw_'):
super(DeprecationWrapper, self).__setattr__(arg, val)
else:
setattr(self._dw_wrapped_module, arg, val)
def __dir__(self):
return dir(self._dw_wrapped_module)
def __delattr__(self, name): # pylint: disable=super-on-old-class
if name.startswith('_dw_'):
super(DeprecationWrapper, self).__delattr__(name)
else:
delattr(self._dw_wrapped_module, name)
def __repr__(self):
return self._dw_wrapped_module.__repr__()
def __getstate__(self):
return self.__name__
def __setstate__(self, d):
# pylint: disable=protected-access
self.__init__(
sys.modules[d]._dw_wrapped_module,
sys.modules[d]._dw_module_name)
# pylint: enable=protected-access
|
tensorflow-master
|
tensorflow/python/util/deprecation_wrapper.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keyword args functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.util import decorator_utils
def keyword_args_only(func):
"""Decorator for marking specific function accepting keyword args only.
This decorator raises a `ValueError` if the input `func` is called with any
non-keyword args. This prevents the caller from providing the arguments in
wrong order.
Args:
func: The function or method needed to be decorated.
Returns:
Decorated function or method.
Raises:
ValueError: If `func` is not callable.
"""
decorator_utils.validate_callable(func, "keyword_args_only")
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Keyword args only wrapper."""
if args:
raise ValueError(
"Must use keyword args to call {}.".format(func.__name__))
return func(**kwargs)
return new_func
|
tensorflow-master
|
tensorflow/python/util/keyword_args.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lock_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
from absl.testing import parameterized
from tensorflow.python.platform import test
from tensorflow.python.util import lock_util
class GroupLockTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(1, 2, 3, 5, 10)
def testGroups(self, num_groups):
lock = lock_util.GroupLock(num_groups)
num_threads = 10
finished = set()
def thread_fn(thread_id):
time.sleep(random.random() * 0.1)
group_id = thread_id % num_groups
with lock.group(group_id):
time.sleep(random.random() * 0.1)
self.assertGreater(lock._group_member_counts[group_id], 0)
for g, c in enumerate(lock._group_member_counts):
if g != group_id:
self.assertEqual(0, c)
finished.add(thread_id)
threads = [
self.checkedThread(target=thread_fn, args=(i,))
for i in range(num_threads)
]
for i in range(num_threads):
threads[i].start()
for i in range(num_threads):
threads[i].join()
self.assertEqual(set(range(num_threads)), finished)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/util/lock_util_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate __all__ from a module docstring."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re as _re
import sys as _sys
from tensorflow.python.util import tf_inspect as _tf_inspect
_reference_pattern = _re.compile(r'^@@(\w+)$', flags=_re.MULTILINE)
def make_all(module_name, doc_string_modules=None):
"""Generates `__all__` from the docstring of one or more modules.
Usage: `make_all(__name__)` or
`make_all(__name__, [sys.modules(__name__), other_module])`. The doc string
modules must each a docstring, and `__all__` will contain all symbols with
`@@` references, where that symbol currently exists in the module named
`module_name`.
Args:
module_name: The name of the module (usually `__name__`).
doc_string_modules: a list of modules from which to take docstring.
If None, then a list containing only the module named `module_name` is used.
Returns:
A list suitable for use as `__all__`.
"""
if doc_string_modules is None:
doc_string_modules = [_sys.modules[module_name]]
cur_members = set([name for name, _
in _tf_inspect.getmembers(_sys.modules[module_name])])
results = set()
for doc_module in doc_string_modules:
results.update([m.group(1)
for m in _reference_pattern.finditer(doc_module.__doc__)
if m.group(1) in cur_members])
return list(results)
# Hidden attributes are attributes that have been hidden by
# `remove_undocumented`. They can be re-instated by `reveal_undocumented`.
# This maps symbol names to a tuple, containing:
# (module object, attribute value)
_HIDDEN_ATTRIBUTES = {}
def reveal_undocumented(symbol_name, target_module=None):
"""Reveals a symbol that was previously removed by `remove_undocumented`.
This should be used by tensorflow internal tests only. It explicitly
defeats the encapsulation afforded by `remove_undocumented`.
It throws an exception when the symbol was not hidden in the first place.
Args:
symbol_name: a string representing the full absolute path of the symbol.
target_module: if specified, the module in which to restore the symbol.
"""
if symbol_name not in _HIDDEN_ATTRIBUTES:
raise LookupError('Symbol %s is not a hidden symbol' % symbol_name)
symbol_basename = symbol_name.split('.')[-1]
(original_module, attr_value) = _HIDDEN_ATTRIBUTES[symbol_name]
if not target_module: target_module = original_module
setattr(target_module, symbol_basename, attr_value)
def remove_undocumented(module_name, allowed_exception_list=None,
doc_string_modules=None):
"""Removes symbols in a module that are not referenced by a docstring.
Args:
module_name: the name of the module (usually `__name__`).
allowed_exception_list: a list of names that should not be removed.
doc_string_modules: a list of modules from which to take the docstrings.
If None, then a list containing only the module named `module_name` is used.
Furthermore, if a symbol previously added with `add_to_global_whitelist`,
then it will always be allowed. This is useful for internal tests.
Returns:
None
"""
current_symbols = set(dir(_sys.modules[module_name]))
should_have = make_all(module_name, doc_string_modules)
should_have += allowed_exception_list or []
extra_symbols = current_symbols - set(should_have)
target_module = _sys.modules[module_name]
for extra_symbol in extra_symbols:
# Skip over __file__, etc. Also preserves internal symbols.
if extra_symbol.startswith('_'): continue
fully_qualified_name = module_name + '.' + extra_symbol
_HIDDEN_ATTRIBUTES[fully_qualified_name] = (target_module,
getattr(target_module,
extra_symbol))
delattr(target_module, extra_symbol)
__all__ = [
'make_all',
'remove_undocumented',
'reveal_undocumented',
]
|
tensorflow-master
|
tensorflow/python/util/all_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base TFDecorator class and utility functions for working with decorators.
There are two ways to create decorators that TensorFlow can introspect into.
This is important for documentation generation purposes, so that function
signatures aren't obscured by the (*args, **kwds) signature that decorators
often provide.
1. Call `tf_decorator.make_decorator` on your wrapper function. If your
decorator is stateless, or can capture all of the variables it needs to work
with through lexical closure, this is the simplest option. Create your wrapper
function as usual, but instead of returning it, return
`tf_decorator.make_decorator(target, your_wrapper)`. This will attach some
decorator introspection metadata onto your wrapper and return it.
Example:
def print_hello_before_calling(target):
def wrapper(*args, **kwargs):
print('hello')
return target(*args, **kwargs)
return tf_decorator.make_decorator(target, wrapper)
2. Derive from TFDecorator. If your decorator needs to be stateful, you can
implement it in terms of a TFDecorator. Store whatever state you need in your
derived class, and implement the `__call__` method to do your work before
calling into your target. You can retrieve the target via
`super(MyDecoratorClass, self).decorated_target`, and call it with whatever
parameters it needs.
Example:
class CallCounter(tf_decorator.TFDecorator):
def __init__(self, target):
super(CallCounter, self).__init__('count_calls', target)
self.call_count = 0
def __call__(self, *args, **kwargs):
self.call_count += 1
return super(CallCounter, self).decorated_target(*args, **kwargs)
def count_calls(target):
return CallCounter(target)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from tensorflow.python.util import tf_stack
def make_decorator(target,
decorator_func,
decorator_name=None,
decorator_doc='',
decorator_argspec=None):
"""Make a decorator from a wrapper and a target.
Args:
target: The final callable to be wrapped.
decorator_func: The wrapper function.
decorator_name: The name of the decorator. If `None`, the name of the
function calling make_decorator.
decorator_doc: Documentation specific to this application of
`decorator_func` to `target`.
decorator_argspec: The new callable signature of this decorator.
Returns:
The `decorator_func` argument with new metadata attached.
"""
if decorator_name is None:
frame = tf_stack.extract_stack(limit=2)[0]
decorator_name = frame[2] # Caller's name
decorator = TFDecorator(decorator_name, target, decorator_doc,
decorator_argspec)
setattr(decorator_func, '_tf_decorator', decorator)
# Objects that are callables (e.g., a functools.partial object) may not have
# the following attributes.
if hasattr(target, '__name__'):
decorator_func.__name__ = target.__name__
if hasattr(target, '__module__'):
decorator_func.__module__ = target.__module__
if hasattr(target, '__dict__'):
# Copy dict entries from target which are not overridden by decorator_func.
for name in target.__dict__:
if name not in decorator_func.__dict__:
decorator_func.__dict__[name] = target.__dict__[name]
if hasattr(target, '__doc__'):
decorator_func.__doc__ = decorator.__doc__
decorator_func.__wrapped__ = target
# Keeping a second handle to `target` allows callers to detect whether the
# decorator was modified using `rewrap`.
decorator_func.__original_wrapped__ = target
return decorator_func
def _has_tf_decorator_attr(obj):
"""Checks if object has _tf_decorator attribute.
This check would work for mocked object as well since it would
check if returned attribute has the right type.
Args:
obj: Python object.
"""
return (
hasattr(obj, '_tf_decorator') and
isinstance(getattr(obj, '_tf_decorator'), TFDecorator))
def rewrap(decorator_func, previous_target, new_target):
"""Injects a new target into a function built by make_decorator.
This function allows replacing a function wrapped by `decorator_func`,
assuming the decorator that wraps the function is written as described below.
The decorator function must use `<decorator name>.__wrapped__` instead of the
wrapped function that is normally used:
Example:
# Instead of this:
def simple_parametrized_wrapper(*args, **kwds):
return wrapped_fn(*args, **kwds)
tf_decorator.make_decorator(simple_parametrized_wrapper, wrapped_fn)
# Write this:
def simple_parametrized_wrapper(*args, **kwds):
return simple_parametrized_wrapper.__wrapped__(*args, **kwds)
tf_decorator.make_decorator(simple_parametrized_wrapper, wrapped_fn)
Note that this process modifies decorator_func.
Args:
decorator_func: Callable returned by `wrap`.
previous_target: Callable that needs to be replaced.
new_target: Callable to replace previous_target with.
Returns:
The updated decorator. If decorator_func is not a tf_decorator, new_target
is returned.
"""
# Because the process mutates the decorator, we only need to alter the
# innermost function that wraps previous_target.
cur = decorator_func
innermost_decorator = None
target = None
while _has_tf_decorator_attr(cur):
innermost_decorator = cur
target = getattr(cur, '_tf_decorator')
if target.decorated_target is previous_target:
break
cur = target.decorated_target
assert cur is not None
# If decorator_func is not a decorator, new_target replaces it directly.
if innermost_decorator is None:
# Consistency check. The caller should always pass the result of
# tf_decorator.unwrap as previous_target. If decorator_func is not a
# decorator, that will have returned decorator_func itself.
assert decorator_func is previous_target
return new_target
target.decorated_target = new_target
if inspect.ismethod(innermost_decorator):
# Bound methods can't be assigned attributes. Thankfully, they seem to
# be just proxies for their unbound counterpart, and we can modify that.
if hasattr(innermost_decorator, '__func__'):
innermost_decorator.__func__.__wrapped__ = new_target
elif hasattr(innermost_decorator, 'im_func'):
innermost_decorator.im_func.__wrapped__ = new_target
else:
innermost_decorator.__wrapped__ = new_target
else:
innermost_decorator.__wrapped__ = new_target
return decorator_func
def unwrap(maybe_tf_decorator):
"""Unwraps an object into a list of TFDecorators and a final target.
Args:
maybe_tf_decorator: Any callable object.
Returns:
A tuple whose first element is an list of TFDecorator-derived objects that
were applied to the final callable target, and whose second element is the
final undecorated callable target. If the `maybe_tf_decorator` parameter is
not decorated by any TFDecorators, the first tuple element will be an empty
list. The `TFDecorator` list is ordered from outermost to innermost
decorators.
"""
decorators = []
cur = maybe_tf_decorator
while True:
if isinstance(cur, TFDecorator):
decorators.append(cur)
elif _has_tf_decorator_attr(cur):
decorators.append(getattr(cur, '_tf_decorator'))
else:
break
if not hasattr(decorators[-1], 'decorated_target'):
break
cur = decorators[-1].decorated_target
return decorators, cur
class TFDecorator(object):
"""Base class for all TensorFlow decorators.
TFDecorator captures and exposes the wrapped target, and provides details
about the current decorator.
"""
def __init__(self,
decorator_name,
target,
decorator_doc='',
decorator_argspec=None):
self._decorated_target = target
self._decorator_name = decorator_name
self._decorator_doc = decorator_doc
self._decorator_argspec = decorator_argspec
if hasattr(target, '__name__'):
self.__name__ = target.__name__
if self._decorator_doc:
self.__doc__ = self._decorator_doc
elif hasattr(target, '__doc__') and target.__doc__:
self.__doc__ = target.__doc__
else:
self.__doc__ = ''
def __get__(self, instance, owner):
return self._decorated_target.__get__(instance, owner)
def __call__(self, *args, **kwargs):
return self._decorated_target(*args, **kwargs)
@property
def decorated_target(self):
return self._decorated_target
@decorated_target.setter
def decorated_target(self, decorated_target):
self._decorated_target = decorated_target
@property
def decorator_name(self):
return self._decorator_name
@property
def decorator_doc(self):
return self._decorator_doc
@property
def decorator_argspec(self):
return self._decorator_argspec
|
tensorflow-master
|
tensorflow/python/util/tf_decorator.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.util.deprecation_wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import types
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation_wrapper
from tensorflow.python.util import tf_inspect
from tensorflow.tools.compatibility import all_renames_v2
class MockModule(types.ModuleType):
__file__ = 'test.py'
class DeprecationWrapperTest(test.TestCase):
def testWrapperIsAModule(self):
module = MockModule('test')
wrapped_module = deprecation_wrapper.DeprecationWrapper(
module, 'test')
self.assertTrue(tf_inspect.ismodule(wrapped_module))
@test.mock.patch.object(logging, 'warning', autospec=True)
def testDeprecationWarnings(self, mock_warning):
module = MockModule('test')
module.foo = 1
module.bar = 2
module.baz = 3
all_renames_v2.symbol_renames['tf.test.bar'] = 'tf.bar2'
all_renames_v2.symbol_renames['tf.test.baz'] = 'tf.compat.v1.baz'
wrapped_module = deprecation_wrapper.DeprecationWrapper(
module, 'test')
self.assertTrue(tf_inspect.ismodule(wrapped_module))
self.assertEqual(0, mock_warning.call_count)
bar = wrapped_module.bar
self.assertEqual(1, mock_warning.call_count)
foo = wrapped_module.foo
self.assertEqual(1, mock_warning.call_count)
baz = wrapped_module.baz
self.assertEqual(2, mock_warning.call_count)
baz = wrapped_module.baz
self.assertEqual(2, mock_warning.call_count)
# Check that values stayed the same
self.assertEqual(module.foo, foo)
self.assertEqual(module.bar, bar)
self.assertEqual(module.baz, baz)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/util/deprecation_wrapper_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFDecorator-aware replacements for the inspect module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import inspect as _inspect
import six
from tensorflow.python.util import tf_decorator
ArgSpec = _inspect.ArgSpec
if hasattr(_inspect, 'FullArgSpec'):
FullArgSpec = _inspect.FullArgSpec # pylint: disable=invalid-name
else:
FullArgSpec = collections.namedtuple('FullArgSpec', [
'args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', 'kwonlydefaults',
'annotations'
])
def _convert_maybe_argspec_to_fullargspec(argspec):
if isinstance(argspec, FullArgSpec):
return argspec
return FullArgSpec(
args=argspec.args,
varargs=argspec.varargs,
varkw=argspec.keywords,
defaults=argspec.defaults,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
if hasattr(_inspect, 'getfullargspec'):
_getfullargspec = _inspect.getfullargspec # pylint: disable=invalid-name
def _getargspec(target):
"""A python3 version of getargspec.
Calls `getfullargspec` and assigns args, varargs,
varkw, and defaults to a python 2/3 compatible `ArgSpec`.
The parameter name 'varkw' is changed to 'keywords' to fit the
`ArgSpec` struct.
Args:
target: the target object to inspect.
Returns:
An ArgSpec with args, varargs, keywords, and defaults parameters
from FullArgSpec.
"""
fullargspecs = getfullargspec(target)
argspecs = ArgSpec(
args=fullargspecs.args,
varargs=fullargspecs.varargs,
keywords=fullargspecs.varkw,
defaults=fullargspecs.defaults)
return argspecs
else:
_getargspec = _inspect.getargspec
def _getfullargspec(target):
"""A python2 version of getfullargspec.
Args:
target: the target object to inspect.
Returns:
A FullArgSpec with empty kwonlyargs, kwonlydefaults and annotations.
"""
return _convert_maybe_argspec_to_fullargspec(getargspec(target))
def currentframe():
"""TFDecorator-aware replacement for inspect.currentframe."""
return _inspect.stack()[1][0]
def getargspec(obj):
"""TFDecorator-aware replacement for `inspect.getargspec`.
Note: `getfullargspec` is recommended as the python 2/3 compatible
replacement for this function.
Args:
obj: A function, partial function, or callable object, possibly decorated.
Returns:
The `ArgSpec` that describes the signature of the outermost decorator that
changes the callable's signature, or the `ArgSpec` that describes
the object if not decorated.
Raises:
ValueError: When callable's signature can not be expressed with
ArgSpec.
TypeError: For objects of unsupported types.
"""
if isinstance(obj, functools.partial):
return _get_argspec_for_partial(obj)
decorators, target = tf_decorator.unwrap(obj)
spec = next((d.decorator_argspec
for d in decorators
if d.decorator_argspec is not None), None)
if spec:
return spec
try:
# Python3 will handle most callables here (not partial).
return _getargspec(target)
except TypeError:
pass
if isinstance(target, type):
try:
return _getargspec(target.__init__)
except TypeError:
pass
try:
return _getargspec(target.__new__)
except TypeError:
pass
# The `type(target)` ensures that if a class is received we don't return
# the signature of its __call__ method.
return _getargspec(type(target).__call__)
def _get_argspec_for_partial(obj):
"""Implements `getargspec` for `functools.partial` objects.
Args:
obj: The `functools.partial` obeject
Returns:
An `inspect.ArgSpec`
Raises:
ValueError: When callable's signature can not be expressed with
ArgSpec.
"""
# When callable is a functools.partial object, we construct its ArgSpec with
# following strategy:
# - If callable partial contains default value for positional arguments (ie.
# object.args), then final ArgSpec doesn't contain those positional arguments.
# - If callable partial contains default value for keyword arguments (ie.
# object.keywords), then we merge them with wrapped target. Default values
# from callable partial takes precedence over those from wrapped target.
#
# However, there is a case where it is impossible to construct a valid
# ArgSpec. Python requires arguments that have no default values must be
# defined before those with default values. ArgSpec structure is only valid
# when this presumption holds true because default values are expressed as a
# tuple of values without keywords and they are always assumed to belong to
# last K arguments where K is number of default values present.
#
# Since functools.partial can give default value to any argument, this
# presumption may no longer hold in some cases. For example:
#
# def func(m, n):
# return 2 * m + n
# partialed = functools.partial(func, m=1)
#
# This example will result in m having a default value but n doesn't. This is
# usually not allowed in Python and can not be expressed in ArgSpec correctly.
#
# Thus, we must detect cases like this by finding first argument with default
# value and ensures all following arguments also have default values. When
# this is not true, a ValueError is raised.
n_prune_args = len(obj.args)
partial_keywords = obj.keywords or {}
args, varargs, keywords, defaults = getargspec(obj.func)
# Pruning first n_prune_args arguments.
args = args[n_prune_args:]
# Partial function may give default value to any argument, therefore length
# of default value list must be len(args) to allow each argument to
# potentially be given a default value.
no_default = object()
all_defaults = [no_default] * len(args)
if defaults:
all_defaults[-len(defaults):] = defaults
# Fill in default values provided by partial function in all_defaults.
for kw, default in six.iteritems(partial_keywords):
if kw in args:
idx = args.index(kw)
all_defaults[idx] = default
elif not keywords:
raise ValueError('Function does not have **kwargs parameter, but '
'contains an unknown partial keyword.')
# Find first argument with default value set.
first_default = next(
(idx for idx, x in enumerate(all_defaults) if x is not no_default), None)
# If no default values are found, return ArgSpec with defaults=None.
if first_default is None:
return ArgSpec(args, varargs, keywords, None)
# Checks if all arguments have default value set after first one.
invalid_default_values = [
args[i] for i, j in enumerate(all_defaults)
if j is no_default and i > first_default
]
if invalid_default_values:
raise ValueError('Some arguments %s do not have default value, but they '
'are positioned after those with default values. This can '
'not be expressed with ArgSpec.' % invalid_default_values)
return ArgSpec(args, varargs, keywords, tuple(all_defaults[first_default:]))
def getfullargspec(obj):
"""TFDecorator-aware replacement for `inspect.getfullargspec`.
This wrapper emulates `inspect.getfullargspec` in[^)]* Python2.
Args:
obj: A callable, possibly decorated.
Returns:
The `FullArgSpec` that describes the signature of
the outermost decorator that changes the callable's signature. If the
callable is not decorated, `inspect.getfullargspec()` will be called
directly on the callable.
"""
decorators, target = tf_decorator.unwrap(obj)
for d in decorators:
if d.decorator_argspec is not None:
return _convert_maybe_argspec_to_fullargspec(d.decorator_argspec)
return _getfullargspec(target)
def getcallargs(*func_and_positional, **named):
"""TFDecorator-aware replacement for inspect.getcallargs.
Args:
*func_and_positional: A callable, possibly decorated, followed by any
positional arguments that would be passed to `func`.
**named: The named argument dictionary that would be passed to `func`.
Returns:
A dictionary mapping `func`'s named arguments to the values they would
receive if `func(*positional, **named)` were called.
`getcallargs` will use the argspec from the outermost decorator that provides
it. If no attached decorators modify argspec, the final unwrapped target's
argspec will be used.
"""
func = func_and_positional[0]
positional = func_and_positional[1:]
argspec = getfullargspec(func)
call_args = named.copy()
this = getattr(func, 'im_self', None) or getattr(func, '__self__', None)
if ismethod(func) and this:
positional = (this,) + positional
remaining_positionals = [arg for arg in argspec.args if arg not in call_args]
call_args.update(dict(zip(remaining_positionals, positional)))
default_count = 0 if not argspec.defaults else len(argspec.defaults)
if default_count:
for arg, value in zip(argspec.args[-default_count:], argspec.defaults):
if arg not in call_args:
call_args[arg] = value
if argspec.kwonlydefaults is not None:
for k, v in argspec.kwonlydefaults.items():
if k not in call_args:
call_args[k] = v
return call_args
def getframeinfo(*args, **kwargs):
return _inspect.getframeinfo(*args, **kwargs)
def getdoc(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getdoc.
Args:
object: An object, possibly decorated.
Returns:
The docstring associated with the object.
The outermost-decorated object is intended to have the most complete
documentation, so the decorated parameter is not unwrapped.
"""
return _inspect.getdoc(object)
def getfile(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getfile."""
unwrapped_object = tf_decorator.unwrap(object)[1]
# Work around for the case when object is a stack frame
# and only .pyc files are used. In this case, getfile
# might return incorrect path. So, we get the path from f_globals
# instead.
if (hasattr(unwrapped_object, 'f_globals') and
'__file__' in unwrapped_object.f_globals):
return unwrapped_object.f_globals['__file__']
return _inspect.getfile(unwrapped_object)
def getmembers(object, predicate=None): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getmembers."""
return _inspect.getmembers(object, predicate)
def getmodule(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getmodule."""
return _inspect.getmodule(object)
def getmro(cls):
"""TFDecorator-aware replacement for inspect.getmro."""
return _inspect.getmro(cls)
def getsource(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getsource."""
return _inspect.getsource(tf_decorator.unwrap(object)[1])
def getsourcefile(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getsourcefile."""
return _inspect.getsourcefile(tf_decorator.unwrap(object)[1])
def getsourcelines(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getsourcelines."""
return _inspect.getsourcelines(tf_decorator.unwrap(object)[1])
def isbuiltin(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isbuiltin."""
return _inspect.isbuiltin(tf_decorator.unwrap(object)[1])
def isclass(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isclass."""
return _inspect.isclass(tf_decorator.unwrap(object)[1])
def isfunction(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isfunction."""
return _inspect.isfunction(tf_decorator.unwrap(object)[1])
def isframe(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.ismodule."""
return _inspect.isframe(tf_decorator.unwrap(object)[1])
def isgenerator(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isgenerator."""
return _inspect.isgenerator(tf_decorator.unwrap(object)[1])
def isgeneratorfunction(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isgeneratorfunction."""
return _inspect.isgeneratorfunction(tf_decorator.unwrap(object)[1])
def ismethod(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.ismethod."""
return _inspect.ismethod(tf_decorator.unwrap(object)[1])
def ismodule(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.ismodule."""
return _inspect.ismodule(tf_decorator.unwrap(object)[1])
def isroutine(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isroutine."""
return _inspect.isroutine(tf_decorator.unwrap(object)[1])
def stack(context=1):
"""TFDecorator-aware replacement for inspect.stack."""
return _inspect.stack(context)[1:]
|
tensorflow-master
|
tensorflow/python/util/tf_inspect.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tf_should_use."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import sys
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import tf_should_use
@contextlib.contextmanager
def reroute_error():
"""Temporarily reroute errors written to tf_logging.error into `captured`."""
with test.mock.patch.object(tf_should_use.tf_logging, 'error') as error:
with test.mock.patch.object(tf_should_use.tf_logging, 'fatal') as fatal:
yield error, fatal
class TfShouldUseTest(test.TestCase):
@test_util.run_deprecated_v1
def testAddShouldUseWarningWhenNotUsed(self):
c = constant_op.constant(0, name='blah0')
def in_this_function():
h = tf_should_use._add_should_use_warning(c)
del h
with reroute_error() as (error, _):
in_this_function()
msg = '\n'.join(error.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah0:0', msg)
self.assertIn('in_this_function', msg)
self.assertFalse(gc.garbage)
@test_util.run_deprecated_v1
def testAddShouldUseFatalWhenNotUsed(self):
c = constant_op.constant(0, name='blah0')
def in_this_function():
h = tf_should_use._add_should_use_warning(c, fatal_error=True)
del h
with reroute_error() as (_, fatal):
in_this_function()
msg = '\n'.join(fatal.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah0:0', msg)
self.assertIn('in_this_function', msg)
self.assertFalse(gc.garbage)
def _testAddShouldUseWarningWhenUsed(self, fn, name):
c = constant_op.constant(0, name=name)
with reroute_error() as (error, fatal):
h = tf_should_use._add_should_use_warning(c)
fn(h)
del h
error.assert_not_called()
fatal.assert_not_called()
@test_util.run_deprecated_v1
def testAddShouldUseWarningWhenUsedWithAdd(self):
def add(h):
_ = h + 1
self._testAddShouldUseWarningWhenUsed(add, name='blah_add')
gc.collect()
self.assertFalse(gc.garbage)
@test_util.run_deprecated_v1
def testAddShouldUseWarningWhenUsedWithGetName(self):
def get_name(h):
_ = h.name
self._testAddShouldUseWarningWhenUsed(get_name, name='blah_get_name')
gc.collect()
self.assertFalse(gc.garbage)
@test_util.run_deprecated_v1
def testShouldUseResult(self):
@tf_should_use.should_use_result
def return_const(value):
return constant_op.constant(value, name='blah2')
with reroute_error() as (error, _):
return_const(0.0)
msg = '\n'.join(error.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah2:0', msg)
self.assertIn('return_const', msg)
gc.collect()
self.assertFalse(gc.garbage)
@test_util.run_deprecated_v1
def testShouldUseResultWhenNotReallyUsed(self):
@tf_should_use.should_use_result
def return_const(value):
return constant_op.constant(value, name='blah3')
with reroute_error() as (error, _):
with self.cached_session():
return_const(0.0)
# Creating another op and executing it does not mark the
# unused op as being "used".
v = constant_op.constant(1.0, name='meh')
self.evaluate(v)
msg = '\n'.join(error.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah3:0', msg)
self.assertIn('return_const', msg)
gc.collect()
self.assertFalse(gc.garbage)
# Tests that mark_used is available in the API.
def testMarkUsed(self):
@tf_should_use.should_use_result
def return_const(value):
return constant_op.constant(value, name='blah3')
with self.cached_session():
return_const(0.0).mark_used()
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/util/tf_should_use_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for exporting TensorFlow symbols to the API.
Exporting a function or a class:
To export a function or a class use tf_export decorator. For e.g.:
```python
@tf_export('foo', 'bar.foo')
def foo(...):
...
```
If a function is assigned to a variable, you can export it by calling
tf_export explicitly. For e.g.:
```python
foo = get_foo(...)
tf_export('foo', 'bar.foo')(foo)
```
Exporting a constant
```python
foo = 1
tf_export('consts.foo').export_constant(__name__, 'foo')
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import sys
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
ESTIMATOR_API_NAME = 'estimator'
KERAS_API_NAME = 'keras'
TENSORFLOW_API_NAME = 'tensorflow'
# List of subpackage names used by TensorFlow components. Have to check that
# TensorFlow core repo does not export any symbols under these names.
SUBPACKAGE_NAMESPACES = [ESTIMATOR_API_NAME]
_Attributes = collections.namedtuple(
'ExportedApiAttributes', ['names', 'constants'])
# Attribute values must be unique to each API.
API_ATTRS = {
TENSORFLOW_API_NAME: _Attributes(
'_tf_api_names',
'_tf_api_constants'),
ESTIMATOR_API_NAME: _Attributes(
'_estimator_api_names',
'_estimator_api_constants'),
KERAS_API_NAME: _Attributes(
'_keras_api_names',
'_keras_api_constants')
}
API_ATTRS_V1 = {
TENSORFLOW_API_NAME: _Attributes(
'_tf_api_names_v1',
'_tf_api_constants_v1'),
ESTIMATOR_API_NAME: _Attributes(
'_estimator_api_names_v1',
'_estimator_api_constants_v1'),
KERAS_API_NAME: _Attributes(
'_keras_api_names_v1',
'_keras_api_constants_v1')
}
class SymbolAlreadyExposedError(Exception):
"""Raised when adding API names to symbol that already has API names."""
pass
class InvalidSymbolNameError(Exception):
"""Raised when trying to export symbol as an invalid or unallowed name."""
pass
def get_canonical_name_for_symbol(
symbol, api_name=TENSORFLOW_API_NAME,
add_prefix_to_v1_names=False):
"""Get canonical name for the API symbol.
Args:
symbol: API function or class.
api_name: API name (tensorflow or estimator).
add_prefix_to_v1_names: Specifies whether a name available only in V1
should be prefixed with compat.v1.
Returns:
Canonical name for the API symbol (for e.g. initializers.zeros) if
canonical name could be determined. Otherwise, returns None.
"""
if not hasattr(symbol, '__dict__'):
return None
api_names_attr = API_ATTRS[api_name].names
_, undecorated_symbol = tf_decorator.unwrap(symbol)
if api_names_attr not in undecorated_symbol.__dict__:
return None
api_names = getattr(undecorated_symbol, api_names_attr)
deprecated_api_names = undecorated_symbol.__dict__.get(
'_tf_deprecated_api_names', [])
canonical_name = get_canonical_name(api_names, deprecated_api_names)
if canonical_name:
return canonical_name
# If there is no V2 canonical name, get V1 canonical name.
api_names_attr = API_ATTRS_V1[api_name].names
api_names = getattr(undecorated_symbol, api_names_attr)
v1_canonical_name = get_canonical_name(api_names, deprecated_api_names)
if add_prefix_to_v1_names:
return 'compat.v1.%s' % v1_canonical_name
return v1_canonical_name
def get_canonical_name(api_names, deprecated_api_names):
"""Get preferred endpoint name.
Args:
api_names: API names iterable.
deprecated_api_names: Deprecated API names iterable.
Returns:
Returns one of the following in decreasing preference:
- first non-deprecated endpoint
- first endpoint
- None
"""
non_deprecated_name = next(
(name for name in api_names if name not in deprecated_api_names),
None)
if non_deprecated_name:
return non_deprecated_name
if api_names:
return api_names[0]
return None
def get_v1_names(symbol):
"""Get a list of TF 1.* names for this symbol.
Args:
symbol: symbol to get API names for.
Returns:
List of all API names for this symbol including TensorFlow and
Estimator names.
"""
names_v1 = []
tensorflow_api_attr_v1 = API_ATTRS_V1[TENSORFLOW_API_NAME].names
estimator_api_attr_v1 = API_ATTRS_V1[ESTIMATOR_API_NAME].names
keras_api_attr_v1 = API_ATTRS_V1[KERAS_API_NAME].names
if not hasattr(symbol, '__dict__'):
return names_v1
if tensorflow_api_attr_v1 in symbol.__dict__:
names_v1.extend(getattr(symbol, tensorflow_api_attr_v1))
if estimator_api_attr_v1 in symbol.__dict__:
names_v1.extend(getattr(symbol, estimator_api_attr_v1))
if keras_api_attr_v1 in symbol.__dict__:
names_v1.extend(getattr(symbol, keras_api_attr_v1))
return names_v1
def get_v2_names(symbol):
"""Get a list of TF 2.0 names for this symbol.
Args:
symbol: symbol to get API names for.
Returns:
List of all API names for this symbol including TensorFlow and
Estimator names.
"""
names_v2 = []
tensorflow_api_attr = API_ATTRS[TENSORFLOW_API_NAME].names
estimator_api_attr = API_ATTRS[ESTIMATOR_API_NAME].names
keras_api_attr = API_ATTRS[KERAS_API_NAME].names
if not hasattr(symbol, '__dict__'):
return names_v2
if tensorflow_api_attr in symbol.__dict__:
names_v2.extend(getattr(symbol, tensorflow_api_attr))
if estimator_api_attr in symbol.__dict__:
names_v2.extend(getattr(symbol, estimator_api_attr))
if keras_api_attr in symbol.__dict__:
names_v2.extend(getattr(symbol, keras_api_attr))
return names_v2
def get_v1_constants(module):
"""Get a list of TF 1.* constants in this module.
Args:
module: TensorFlow module.
Returns:
List of all API constants under the given module including TensorFlow and
Estimator constants.
"""
constants_v1 = []
tensorflow_constants_attr_v1 = API_ATTRS_V1[TENSORFLOW_API_NAME].constants
estimator_constants_attr_v1 = API_ATTRS_V1[ESTIMATOR_API_NAME].constants
if hasattr(module, tensorflow_constants_attr_v1):
constants_v1.extend(getattr(module, tensorflow_constants_attr_v1))
if hasattr(module, estimator_constants_attr_v1):
constants_v1.extend(getattr(module, estimator_constants_attr_v1))
return constants_v1
def get_v2_constants(module):
"""Get a list of TF 2.0 constants in this module.
Args:
module: TensorFlow module.
Returns:
List of all API constants under the given module including TensorFlow and
Estimator constants.
"""
constants_v2 = []
tensorflow_constants_attr = API_ATTRS[TENSORFLOW_API_NAME].constants
estimator_constants_attr = API_ATTRS[ESTIMATOR_API_NAME].constants
if hasattr(module, tensorflow_constants_attr):
constants_v2.extend(getattr(module, tensorflow_constants_attr))
if hasattr(module, estimator_constants_attr):
constants_v2.extend(getattr(module, estimator_constants_attr))
return constants_v2
class api_export(object): # pylint: disable=invalid-name
"""Provides ways to export symbols to the TensorFlow API."""
def __init__(self, *args, **kwargs): # pylint: disable=g-doc-args
"""Export under the names *args (first one is considered canonical).
Args:
*args: API names in dot delimited format.
**kwargs: Optional keyed arguments.
v1: Names for the TensorFlow V1 API. If not set, we will use V2 API
names both for TensorFlow V1 and V2 APIs.
overrides: List of symbols that this is overriding
(those overrided api exports will be removed). Note: passing overrides
has no effect on exporting a constant.
api_name: Name of the API you want to generate (e.g. `tensorflow` or
`estimator`). Default is `tensorflow`.
allow_multiple_exports: Allow symbol to be exported multiple time under
different names.
"""
self._names = args
self._names_v1 = kwargs.get('v1', args)
if 'v2' in kwargs:
raise ValueError('You passed a "v2" argument to tf_export. This is not '
'what you want. Pass v2 names directly as positional '
'arguments instead.')
self._api_name = kwargs.get('api_name', TENSORFLOW_API_NAME)
self._overrides = kwargs.get('overrides', [])
self._allow_multiple_exports = kwargs.get('allow_multiple_exports', False)
self._validate_symbol_names()
def _validate_symbol_names(self):
"""Validate you are exporting symbols under an allowed package.
We need to ensure things exported by tf_export, estimator_export, etc.
export symbols under disjoint top-level package names.
For TensorFlow, we check that it does not export anything under subpackage
names used by components (estimator, keras, etc.).
For each component, we check that it exports everything under its own
subpackage.
Raises:
InvalidSymbolNameError: If you try to export symbol under disallowed name.
"""
all_symbol_names = set(self._names) | set(self._names_v1)
if self._api_name == TENSORFLOW_API_NAME:
for subpackage in SUBPACKAGE_NAMESPACES:
if any(n.startswith(subpackage) for n in all_symbol_names):
raise InvalidSymbolNameError(
'@tf_export is not allowed to export symbols under %s.*' % (
subpackage))
else:
if not all(n.startswith(self._api_name) for n in all_symbol_names):
raise InvalidSymbolNameError(
'Can only export symbols under package name of component. '
'e.g. tensorflow_estimator must export all symbols under '
'tf.estimator')
def __call__(self, func):
"""Calls this decorator.
Args:
func: decorated symbol (function or class).
Returns:
The input function with _tf_api_names attribute set.
Raises:
SymbolAlreadyExposedError: Raised when a symbol already has API names
and kwarg `allow_multiple_exports` not set.
"""
api_names_attr = API_ATTRS[self._api_name].names
api_names_attr_v1 = API_ATTRS_V1[self._api_name].names
# Undecorate overridden names
for f in self._overrides:
_, undecorated_f = tf_decorator.unwrap(f)
delattr(undecorated_f, api_names_attr)
delattr(undecorated_f, api_names_attr_v1)
_, undecorated_func = tf_decorator.unwrap(func)
self.set_attr(undecorated_func, api_names_attr, self._names)
self.set_attr(undecorated_func, api_names_attr_v1, self._names_v1)
return func
def set_attr(self, func, api_names_attr, names):
# Check for an existing api. We check if attribute name is in
# __dict__ instead of using hasattr to verify that subclasses have
# their own _tf_api_names as opposed to just inheriting it.
if api_names_attr in func.__dict__:
if not self._allow_multiple_exports:
raise SymbolAlreadyExposedError(
'Symbol %s is already exposed as %s.' %
(func.__name__, getattr(func, api_names_attr))) # pylint: disable=protected-access
setattr(func, api_names_attr, names)
def export_constant(self, module_name, name):
"""Store export information for constants/string literals.
Export information is stored in the module where constants/string literals
are defined.
e.g.
```python
foo = 1
bar = 2
tf_export("consts.foo").export_constant(__name__, 'foo')
tf_export("consts.bar").export_constant(__name__, 'bar')
```
Args:
module_name: (string) Name of the module to store constant at.
name: (string) Current constant name.
"""
module = sys.modules[module_name]
api_constants_attr = API_ATTRS[self._api_name].constants
api_constants_attr_v1 = API_ATTRS_V1[self._api_name].constants
if not hasattr(module, api_constants_attr):
setattr(module, api_constants_attr, [])
# pylint: disable=protected-access
getattr(module, api_constants_attr).append(
(self._names, name))
if not hasattr(module, api_constants_attr_v1):
setattr(module, api_constants_attr_v1, [])
getattr(module, api_constants_attr_v1).append(
(self._names_v1, name))
def kwarg_only(f):
"""A wrapper that throws away all non-kwarg arguments."""
f_argspec = tf_inspect.getargspec(f)
def wrapper(*args, **kwargs):
if args:
raise TypeError(
'{f} only takes keyword args (possible keys: {kwargs}). '
'Please pass these args as kwargs instead.'
.format(f=f.__name__, kwargs=f_argspec.args))
return f(**kwargs)
return tf_decorator.make_decorator(f, wrapper, decorator_argspec=f_argspec)
tf_export = functools.partial(api_export, api_name=TENSORFLOW_API_NAME)
estimator_export = functools.partial(api_export, api_name=ESTIMATOR_API_NAME)
keras_export = functools.partial(api_export, api_name=KERAS_API_NAME)
|
tensorflow-master
|
tensorflow/python/util/tf_export.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility to retrieve function args."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _is_bound_method(fn):
_, fn = tf_decorator.unwrap(fn)
return tf_inspect.ismethod(fn) and (fn.__self__ is not None)
def _is_callable_object(obj):
return hasattr(obj, '__call__') and tf_inspect.ismethod(obj.__call__)
def fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
if isinstance(fn, functools.partial):
args = fn_args(fn.func)
args = [a for a in args[len(fn.args):] if a not in (fn.keywords or [])]
else:
if _is_callable_object(fn):
fn = fn.__call__
args = tf_inspect.getfullargspec(fn).args
if _is_bound_method(fn) and args:
# If it's a bound method, it may or may not have a self/cls first
# argument; for example, self could be captured in *args.
# If it does have a positional argument, it is self/cls.
args.pop(0)
return tuple(args)
def has_kwargs(fn):
"""Returns whether the passed callable has **kwargs in its signature.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`bool`: if `fn` has **kwargs in its signature.
Raises:
`TypeError`: If fn is not a Function, or function-like object.
"""
if isinstance(fn, functools.partial):
fn = fn.func
elif _is_callable_object(fn):
fn = fn.__call__
elif not callable(fn):
raise TypeError(
'fn should be a function-like object, but is of type {}.'.format(
type(fn)))
return tf_inspect.getfullargspec(fn).varkw is not None
def get_func_name(func):
"""Returns name of passed callable."""
_, func = tf_decorator.unwrap(func)
if callable(func):
if tf_inspect.isfunction(func):
return func.__name__
elif tf_inspect.ismethod(func):
return '%s.%s' % (six.get_method_self(func).__class__.__name__,
six.get_method_function(func).__name__)
else: # Probably a class instance with __call__
return str(type(func))
else:
raise ValueError('Argument must be callable')
def get_func_code(func):
"""Returns func_code of passed callable, or None if not available."""
_, func = tf_decorator.unwrap(func)
if callable(func):
if tf_inspect.isfunction(func) or tf_inspect.ismethod(func):
return six.get_function_code(func)
# Since the object is not a function or method, but is a callable, we will
# try to access the __call__method as a function. This works with callable
# classes but fails with functool.partial objects despite their __call__
# attribute.
try:
return six.get_function_code(func.__call__)
except AttributeError:
return None
else:
raise ValueError('Argument must be callable')
_rewriter_config_optimizer_disabled = None
def get_disabled_rewriter_config():
global _rewriter_config_optimizer_disabled
if _rewriter_config_optimizer_disabled is None:
config = config_pb2.ConfigProto()
rewriter_config = config.graph_options.rewrite_options
rewriter_config.disable_meta_optimizer = True
_rewriter_config_optimizer_disabled = config.SerializeToString()
return _rewriter_config_optimizer_disabled
|
tensorflow-master
|
tensorflow/python/util/function_utils.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keyword args tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.util import keyword_args
class KeywordArgsTest(test.TestCase):
def test_keyword_args_only(self):
def func_without_decorator(a, b):
return a + b
@keyword_args.keyword_args_only
def func_with_decorator(a, b):
return func_without_decorator(a, b)
self.assertEqual(3, func_without_decorator(1, 2))
self.assertEqual(3, func_without_decorator(a=1, b=2))
self.assertEqual(3, func_with_decorator(a=1, b=2))
# Providing non-keyword args should fail.
with self.assertRaisesRegexp(
ValueError, "Must use keyword args to call func_with_decorator."):
self.assertEqual(3, func_with_decorator(1, 2))
# Partially providing keyword args should fail.
with self.assertRaisesRegexp(
ValueError, "Must use keyword args to call func_with_decorator."):
self.assertEqual(3, func_with_decorator(1, b=2))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/util/keyword_args_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Decorator that provides a warning if the wrapped object is never used."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import sys
import traceback
import six # pylint: disable=unused-import
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import tf_decorator
# pylint: enable=g-bad-import-order,g-import-not-at-top
class _TFShouldUseHelper(object):
"""Object stored in TFShouldUse-wrapped objects.
When it is deleted it will emit a warning or error if its `sate` method
has not been called by time of deletion, and Tensorflow is not executing
eagerly outside of functions.
"""
def __init__(self, type_, repr_, stack_frame, fatal_error_if_unsated):
self._type = type_
self._repr = repr_
self._stack_frame = stack_frame
self._fatal_error_if_unsated = fatal_error_if_unsated
self._sated = False
def sate(self):
self._sated = True
self._type = None
self._repr = None
self._stack_frame = None
self._logging_module = None
def __del__(self):
if ops.executing_eagerly_outside_functions():
return
if self._sated:
return
if self._fatal_error_if_unsated:
logger = tf_logging.fatal
else:
logger = tf_logging.error
creation_stack = ''.join(
[line.rstrip() for line in traceback.format_stack(self._stack_frame)])
logger(
'==================================\n'
'Object was never used (type %s):\n%s\nIf you want to mark it as '
'used call its "mark_used()" method.\nIt was originally created '
'here:\n%s\n'
'==================================' %
(self._type, self._repr, creation_stack))
def _new__init__(self, true_value, tf_should_use_helper):
# pylint: disable=protected-access
self._tf_should_use_helper = tf_should_use_helper
self._true_value = true_value
def _new__setattr__(self, key, value):
if key in ('_tf_should_use_helper', '_true_value'):
return object.__setattr__(self, key, value)
return setattr(
object.__getattribute__(self, '_true_value'),
key, value)
def _new__getattribute__(self, key):
if key not in ('_tf_should_use_helper', '_true_value'):
object.__getattribute__(self, '_tf_should_use_helper').sate()
if key in ('_tf_should_use_helper', 'mark_used', '__setatt__'):
return object.__getattribute__(self, key)
return getattr(object.__getattribute__(self, '_true_value'), key)
def _new_mark_used(self, *args, **kwargs):
object.__getattribute__(self, '_tf_should_use_helper').sate()
try:
mu = object.__getattribute__(
object.__getattribute__(self, '_true_value'),
'mark_used')
return mu(*args, **kwargs)
except AttributeError:
pass
_WRAPPERS = {}
def _get_wrapper(x, tf_should_use_helper):
"""Create a wrapper for object x, whose class subclasses type(x).
The wrapper will emit a warning if it is deleted without any of its
properties being accessed or methods being called.
Args:
x: The instance to wrap.
tf_should_use_helper: The object that tracks usage.
Returns:
An object wrapping `x`, of type `type(x)`.
"""
type_x = type(x)
memoized = _WRAPPERS.get(type_x, None)
if memoized:
return memoized(x, tf_should_use_helper)
tx = copy.deepcopy(type_x)
copy_tx = type(tx.__name__, tx.__bases__, dict(tx.__dict__))
copy_tx.__init__ = _new__init__
copy_tx.__getattribute__ = _new__getattribute__
copy_tx.mark_used = _new_mark_used
copy_tx.__setattr__ = _new__setattr__
_WRAPPERS[type_x] = copy_tx
return copy_tx(x, tf_should_use_helper)
def _add_should_use_warning(x, fatal_error=False):
"""Wraps object x so that if it is never used, a warning is logged.
Args:
x: Python object.
fatal_error: Python bool. If `True`, tf.compat.v1.logging.fatal is raised
if the returned value is never used.
Returns:
An instance of `TFShouldUseWarningWrapper` which subclasses `type(x)`
and is a very shallow wrapper for `x` which logs access into `x`.
"""
if x is None or x == []: # pylint: disable=g-explicit-bool-comparison
return x
# Extract the current frame for later use by traceback printing.
try:
raise ValueError()
except ValueError:
stack_frame = sys.exc_info()[2].tb_frame.f_back
tf_should_use_helper = _TFShouldUseHelper(
type_=type(x),
repr_=repr(x),
stack_frame=stack_frame,
fatal_error_if_unsated=fatal_error)
return _get_wrapper(x, tf_should_use_helper)
def should_use_result(fn):
"""Function wrapper that ensures the function's output is used.
If the output is not used, a `tf.compat.v1.logging.error` is logged.
An output is marked as used if any of its attributes are read, modified, or
updated. Examples when the output is a `Tensor` include:
- Using it in any capacity (e.g. `y = t + 0`, `sess.run(t)`)
- Accessing a property (e.g. getting `t.name` or `t.op`).
Note, certain behaviors cannot be tracked - for these the object may not
be marked as used. Examples include:
- `t != 0`. In this case, comparison is done on types / ids.
- `isinstance(t, tf.Tensor)`. Similar to above.
Args:
fn: The function to wrap.
Returns:
The wrapped function.
"""
def wrapped(*args, **kwargs):
return _add_should_use_warning(fn(*args, **kwargs))
return tf_decorator.make_decorator(
fn, wrapped, 'should_use_result',
((fn.__doc__ or '') +
('\n\n '
'**NOTE** The output of this function should be used. If it is not, '
'a warning will be logged. To mark the output as used, '
'call its .mark_used() method.')))
def must_use_result_or_fatal(fn):
"""Function wrapper that ensures the function's output is used.
If the output is not used, a `tf.compat.v1.logging.fatal` error is raised.
An output is marked as used if any of its attributes are read, modified, or
updated. Examples when the output is a `Tensor` include:
- Using it in any capacity (e.g. `y = t + 0`, `sess.run(t)`)
- Accessing a property (e.g. getting `t.name` or `t.op`).
Note, certain behaviors cannot be tracked - for these the object may not
be marked as used. Examples include:
- `t != 0`. In this case, comparison is done on types / ids.
- `isinstance(t, tf.Tensor)`. Similar to above.
Args:
fn: The function to wrap.
Returns:
The wrapped function.
"""
def wrapped(*args, **kwargs):
return _add_should_use_warning(fn(*args, **kwargs), fatal_error=True)
return tf_decorator.make_decorator(
fn, wrapped, 'must_use_result_or_fatal',
((fn.__doc__ or '') +
('\n\n '
'**NOTE** The output of this function must be used. If it is not, '
'a fatal error will be raised. To mark the output as used, '
'call its .mark_used() method.')))
|
tensorflow-master
|
tensorflow/python/util/tf_should_use.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions used to extract and analyze stacks. Faster than Python libs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import inspect
import linecache
import sys
import threading
# Names for indices into TF traceback tuples.
TB_FILENAME = 0
TB_LINENO = 1
TB_FUNCNAME = 2
TB_CODEDICT = 3 # Dictionary of Python interpreter state.
stacks = threading.local()
def _source_mappers():
if not hasattr(stacks, 'source_mapper'):
stacks.source_mapper = []
return stacks.source_mapper
def _source_filters():
if not hasattr(stacks, 'source_filter'):
stacks.source_filter = []
return stacks.source_filter
class StackTraceMapper(object):
"""Allows remapping traceback information to different source code."""
def __enter__(self):
_source_mappers().append(self)
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
assert _source_mappers()[-1] is self, 'Concurrent access?'
_source_mappers().pop()
def map(self, filename, lineno, name):
raise NotImplementedError('subclasses need to override this')
class StackTraceFilter(object):
"""Allows filtering traceback information by removing superfluous frames."""
def __enter__(self):
_source_filters().append(self)
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
assert _source_filters()[-1] is self, 'Concurrent access?'
_source_filters().pop()
def filter(self, filename, lineno, name):
raise NotImplementedError('subclasses need to override this')
class CurrentModuleFilter(StackTraceFilter):
"""Filters stack frames from the module where this is used (best effort)."""
def __init__(self):
filter_filename = None
outer_f = None
f = inspect.currentframe()
try:
if f is not None:
# The current frame is __init__. The first outer frame should be the
# caller.
outer_f = f.f_back
if outer_f is not None:
filter_filename = inspect.getsourcefile(outer_f)
self._filename = filter_filename
finally:
# Avoid reference cycles, see:
# https://docs.python.org/3.7/library/inspect.html#the-interpreter-stack
del f
del outer_f
def should_remove(self, filename, lineno, name):
del lineno, name
return filename == self._filename
def extract_stack(limit=None):
"""A lightweight, extensible re-implementation of traceback.extract_stack.
NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for
each stack frame using linecache, which results in an abundance of stat()
calls. This implementation does not retrieve the code, and any consumer
should apply _convert_stack to the result to obtain a traceback that can
be formatted etc. using traceback methods.
Args:
limit: A limit on the number of frames to return.
Returns:
A list of 5-tuples
(filename, lineno, name, frame_globals, func_start_lineno)
corresponding to the call stack of the current thread. The returned tuples
have the innermost stack frame at the end, unlike the Python inspect
module's stack() function.
"""
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
ret = []
length = 0
while f is not None and (limit is None or length < limit):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
frame_globals = f.f_globals
func_start_lineno = co.co_firstlineno
for mapper in _source_mappers():
# TODO(mdan): Show some indication that the frame was translated.
filename, lineno, name = mapper.map(filename, lineno, name)
keep = True
if ret: # Never filter the innermost frame.
keep = not any(
f.should_remove(filename, lineno, name) for f in _source_filters())
if keep:
ret.append((filename, lineno, name, frame_globals, func_start_lineno))
length += 1
f = f.f_back
# TODO(mdan): Also add a truncation mechanism.
ret.reverse()
return ret
FileAndLine = collections.namedtuple('FileAndLine', ['file', 'line'])
def extract_stack_file_and_line(max_length=1000):
"""A version of extract_stack that only returns filenames and line numbers.
Callers often only require filenames and line numbers, and do not need the
additional information gathered by extract_stack, as they never call
convert_stack.
As a further optimisation, we allow users to specify a limit on the number of
frames examined.
Args:
max_length: The maximum length of stack to extract.
Returns:
A list of FileAndLine objects corresponding to the call stack of the current
thread.
"""
try:
raise ZeroDivisionError
except ZeroDivisionError:
frame = sys.exc_info()[2].tb_frame.f_back
ret = []
length = 0
while frame is not None and length < max_length:
ret.append(FileAndLine(frame.f_code.co_filename, frame.f_lineno))
length += 1
frame = frame.f_back
ret.reverse()
return ret
def convert_stack(stack, include_func_start_lineno=False):
"""Converts a stack extracted using extract_stack() to a traceback stack.
Args:
stack: A list of n 5-tuples,
(filename, lineno, name, frame_globals, func_start_lineno).
include_func_start_lineno: True if function start line number should be
included as the 5th entry in return tuples.
Returns:
A list of n 4-tuples or 5-tuples
(filename, lineno, name, code, [optional: func_start_lineno]), where the
code tuple element is calculated from the corresponding elements of the
input tuple.
"""
ret = []
for (filename, lineno, name, frame_globals, func_start_lineno) in stack:
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, frame_globals)
if line:
line = line.strip()
else:
line = None
if include_func_start_lineno:
ret.append((filename, lineno, name, line, func_start_lineno))
else:
ret.append((filename, lineno, name, line))
return ret
|
tensorflow-master
|
tensorflow/python/util/tf_stack.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import time
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
class _CustomMapping(collections.Mapping):
def __init__(self, *args, **kwargs):
self._wrapped = dict(*args, **kwargs)
def __getitem__(self, key):
return self._wrapped[key]
def __iter__(self):
return iter(self._wrapped)
def __len__(self):
return len(self._wrapped)
class NestTest(parameterized.TestCase, test.TestCase):
PointXY = collections.namedtuple("Point", ["x", "y"]) # pylint: disable=invalid-name
if attr:
class BadAttr(object):
"""Class that has a non-iterable __attrs_attrs__."""
__attrs_attrs__ = None
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
@attr.s
class UnsortedSampleAttr(object):
field3 = attr.ib()
field1 = attr.ib()
field2 = attr.ib()
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrsFlattenAndPack(self):
if attr is None:
self.skipTest("attr module is unavailable.")
field_values = [1, 2]
sample_attr = NestTest.SampleAttr(*field_values)
self.assertFalse(nest._is_attrs(field_values))
self.assertTrue(nest._is_attrs(sample_attr))
flat = nest.flatten(sample_attr)
self.assertEqual(field_values, flat)
restructured_from_flat = nest.pack_sequence_as(sample_attr, flat)
self.assertIsInstance(restructured_from_flat, NestTest.SampleAttr)
self.assertEqual(restructured_from_flat, sample_attr)
# Check that flatten fails if attributes are not iterable
with self.assertRaisesRegexp(TypeError, "object is not iterable"):
flat = nest.flatten(NestTest.BadAttr())
@parameterized.parameters(
{"values": [1, 2, 3]},
{"values": [{"B": 10, "A": 20}, [1, 2], 3]},
{"values": [(1, 2), [3, 4], 5]},
{"values": [PointXY(1, 2), 3, 4]},
)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrsMapStructure(self, values):
if attr is None:
self.skipTest("attr module is unavailable.")
structure = NestTest.UnsortedSampleAttr(*values)
new_structure = nest.map_structure(lambda x: x, structure)
self.assertEqual(structure, new_structure)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("f", "g"), "h")))
structure = (NestTest.PointXY(x=4, y=2),
((NestTest.PointXY(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegexp(ValueError, "Structure is a scalar"):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegexp(TypeError, "flat_sequence"):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenDictOrder(self, mapping_type):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
def testPackDictOrder(self, mapping_type):
"""Packing orders dicts by key, including OrderedDicts."""
custom = mapping_type([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
custom_reconstruction = nest.pack_sequence_as(custom, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
self.assertIsInstance(custom_reconstruction, mapping_type)
self.assertIsInstance(plain_reconstruction, dict)
self.assertEqual(
mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
custom_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
Abc = collections.namedtuple("A", ("b", "c")) # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack_withDicts(self):
# A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.
mess = [
"z",
NestTest.Abc(3, 4), {
"d": _CustomMapping({
41: 4
}),
"c": [
1,
collections.OrderedDict([
("b", 3),
("a", 2),
]),
],
"b": 5
}, 17
]
flattened = nest.flatten(mess)
self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 4, 17])
structure_of_mess = [
14,
NestTest.Abc("a", True),
{
"d": _CustomMapping({
41: 42
}),
"c": [
0,
collections.OrderedDict([
("b", 9),
("a", 8),
]),
],
"b": 3
},
"hi everybody",
]
unflattened = nest.pack_sequence_as(structure_of_mess, flattened)
self.assertEqual(unflattened, mess)
# Check also that the OrderedDict was created, with the correct key order.
unflattened_ordered_dict = unflattened[2]["c"][1]
self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)
self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"])
unflattened_custom_mapping = unflattened[2]["d"]
self.assertIsInstance(unflattened_custom_mapping, _CustomMapping)
self.assertEqual(list(unflattened_custom_mapping.keys()), [41])
def testFlatten_numpyIsNotFlattened(self):
structure = np.array([1, 2, 3])
flattened = nest.flatten(structure)
self.assertLen(flattened, 1)
def testFlatten_stringIsNotFlattened(self):
structure = "lots of letters"
flattened = nest.flatten(structure)
self.assertLen(flattened, 1)
unflattened = nest.pack_sequence_as("goodbye", flattened)
self.assertEqual(structure, unflattened)
def testPackSequenceAs_notIterableError(self):
with self.assertRaisesRegexp(TypeError,
"flat_sequence must be a sequence"):
nest.pack_sequence_as("hi", "bye")
def testPackSequenceAs_wrongLengthsError(self):
with self.assertRaisesRegexp(
ValueError,
"Structure had 2 elements, but flat_sequence had 3 elements."):
nest.pack_sequence_as(["hello", "world"],
["and", "goodbye", "again"])
@test_util.assert_no_new_pyobjects_executing_eagerly
def testIsNested(self):
self.assertFalse(nest.is_nested("1234"))
self.assertTrue(nest.is_nested([1, 3, [4, 5]]))
self.assertTrue(nest.is_nested(((7, 8), (5, 6))))
self.assertTrue(nest.is_nested([]))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}))
self.assertFalse(nest.is_nested(set([1, 2])))
ones = array_ops.ones([2, 3])
self.assertFalse(nest.is_nested(ones))
self.assertFalse(nest.is_nested(math_ops.tanh(ones)))
self.assertFalse(nest.is_nested(np.ones((4, 5))))
@parameterized.parameters({"mapping_type": _CustomMapping},
{"mapping_type": dict})
def testFlattenDictItems(self, mapping_type):
dictionary = mapping_type({(4, 5, (6, 8)): ("a", "b", ("c", "d"))})
flat = {4: "a", 5: "b", 6: "c", 8: "d"}
self.assertEqual(nest.flatten_dict_items(dictionary), flat)
with self.assertRaises(TypeError):
nest.flatten_dict_items(4)
bad_dictionary = mapping_type({(4, 5, (4, 8)): ("a", "b", ("c", "d"))})
with self.assertRaisesRegexp(ValueError, "not unique"):
nest.flatten_dict_items(bad_dictionary)
another_bad_dictionary = mapping_type({
(4, 5, (6, 8)): ("a", "b", ("c", ("d", "e")))
})
with self.assertRaisesRegexp(
ValueError, "Key had [0-9]* elements, but value had [0-9]* elements"):
nest.flatten_dict_items(another_bad_dictionary)
# pylint does not correctly recognize these as class names and
# suggests to use variable style under_score naming.
# pylint: disable=invalid-name
Named0ab = collections.namedtuple("named_0", ("a", "b"))
Named1ab = collections.namedtuple("named_1", ("a", "b"))
SameNameab = collections.namedtuple("same_name", ("a", "b"))
SameNameab2 = collections.namedtuple("same_name", ("a", "b"))
SameNamexy = collections.namedtuple("same_name", ("x", "y"))
SameName1xy = collections.namedtuple("same_name_1", ("x", "y"))
SameName1xy2 = collections.namedtuple("same_name_1", ("x", "y"))
NotSameName = collections.namedtuple("not_same_name", ("a", "b"))
# pylint: enable=invalid-name
class SameNamedType1(SameNameab):
pass
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAssertSameStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
structure_different_num_elements = ("spam", "eggs")
structure_different_nesting = (((1, 2), 3), 4, 5, (6,))
nest.assert_same_structure(structure1, structure2)
nest.assert_same_structure("abc", 1.0)
nest.assert_same_structure("abc", np.array([0, 1]))
nest.assert_same_structure("abc", constant_op.constant([0, 1]))
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
"More specifically: Substructure "
r'"type=tuple str=\(\(1, 2\), 3\)" is a sequence, while '
'substructure "type=str str=spam" is not\n'
"Entire first structure:\n"
r"\(\(\(\., \.\), \.\), \., \(\., \.\)\)\n"
"Entire second structure:\n"
r"\(\., \.\)")):
nest.assert_same_structure(structure1, structure_different_num_elements)
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
r'is a sequence, while substructure "type=ndarray str=\[0 1\]" '
"is not")):
nest.assert_same_structure([0, 1], np.array([0, 1]))
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
'is a sequence, while substructure "type=int str=0" '
"is not")):
nest.assert_same_structure(0, [0, 1])
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), [0, 1])
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(structure1, structure_different_nesting)
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
NestTest.Named0ab("a", "b"))
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.Named0ab(3, 4), NestTest.Named1ab(3, 4))
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab([3], 4))
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure([[3], 4], [3, [4]])
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError,
"don't have the same sequence type"):
nest.assert_same_structure(structure1, structure1_list)
nest.assert_same_structure(structure1, structure2, check_types=False)
nest.assert_same_structure(structure1, structure1_list, check_types=False)
with self.assertRaisesRegexp(ValueError,
"don't have the same set of keys"):
nest.assert_same_structure({"a": 1}, {"b": 1})
nest.assert_same_structure(NestTest.SameNameab(0, 1),
NestTest.SameNameab2(2, 3))
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
nest.assert_same_structure(
NestTest.SameNameab(NestTest.SameName1xy(0, 1), 2),
NestTest.SameNameab2(NestTest.SameName1xy2(2, 3), 4))
expected_message = "The two structures don't have the same.*"
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_same_structure(
NestTest.SameNameab(0, NestTest.SameNameab2(1, 2)),
NestTest.SameNameab2(NestTest.SameNameab(0, 1), 2))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.NotSameName(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamexy(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamedType1(2, 3))
EmptyNT = collections.namedtuple("empty_nt", "") # pylint: disable=invalid-name
def testHeterogeneousComparison(self):
nest.assert_same_structure({"a": 4}, _CustomMapping(a=3))
nest.assert_same_structure(_CustomMapping(b=3), {"b": 4})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = (((7, 8), 9), 10, (11, 12))
structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)
nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual(
[2, 3, 4, 5, 6, 7],
nest.flatten(structure1_plus1))
structure1_plus_structure2 = nest.map_structure(
lambda x, y: x + y, structure1, structure2)
self.assertEqual(
(((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
structure1_plus_structure2)
self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))
self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
# Empty structures
self.assertEqual((), nest.map_structure(lambda x: x + 1, ()))
self.assertEqual([], nest.map_structure(lambda x: x + 1, []))
self.assertEqual({}, nest.map_structure(lambda x: x + 1, {}))
self.assertEqual(NestTest.EmptyNT(), nest.map_structure(lambda x: x + 1,
NestTest.EmptyNT()))
# This is checking actual equality of types, empty list != empty tuple
self.assertNotEqual((), nest.map_structure(lambda x: x + 1, []))
with self.assertRaisesRegexp(TypeError, "callable"):
nest.map_structure("bad", structure1_plus1)
with self.assertRaisesRegexp(ValueError, "at least one structure"):
nest.map_structure(lambda x: x)
with self.assertRaisesRegexp(ValueError, "same number of elements"):
nest.map_structure(lambda x, y: None, (3, 4), (3, 4, 5))
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, 3, (3,))
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), [(3, 4), 5])
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, structure1, structure1_list)
nest.map_structure(lambda x, y: None, structure1, structure1_list,
check_types=False)
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
check_types=False)
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, foo="a")
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
ABTuple = collections.namedtuple("ab_tuple", "a, b") # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructureWithStrings(self):
inp_a = NestTest.ABTuple(a="foo", b=("bar", "baz"))
inp_b = NestTest.ABTuple(a=2, b=(1, 3))
out = nest.map_structure(lambda string, repeats: string * repeats,
inp_a,
inp_b)
self.assertEqual("foofoo", out.a)
self.assertEqual("bar", out.b[0])
self.assertEqual("bazbazbaz", out.b[1])
nt = NestTest.ABTuple(a=("something", "something_else"),
b="yet another thing")
rev_nt = nest.map_structure(lambda x: x[::-1], nt)
# Check the output is the correct structure, and all strings are reversed.
nest.assert_same_structure(nt, rev_nt)
self.assertEqual(nt.a[0][::-1], rev_nt.a[0])
self.assertEqual(nt.a[1][::-1], rev_nt.a[1])
self.assertEqual(nt.b[::-1], rev_nt.b)
@test_util.run_deprecated_v1
def testMapStructureOverPlaceholders(self):
inp_a = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
inp_b = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
output = nest.map_structure(lambda x1, x2: x1 + x2, inp_a, inp_b)
nest.assert_same_structure(output, inp_a)
self.assertShapeEqual(np.zeros((3, 4)), output[0])
self.assertShapeEqual(np.zeros((3, 7)), output[1])
feed_dict = {
inp_a: (np.random.randn(3, 4), np.random.randn(3, 7)),
inp_b: (np.random.randn(3, 4), np.random.randn(3, 7))
}
with self.cached_session() as sess:
output_np = sess.run(output, feed_dict=feed_dict)
self.assertAllClose(output_np[0],
feed_dict[inp_a][0] + feed_dict[inp_b][0])
self.assertAllClose(output_np[1],
feed_dict[inp_a][1] + feed_dict[inp_b][1])
def testAssertShallowStructure(self):
inp_ab = ["a", "b"]
inp_abc = ["a", "b", "c"]
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
ValueError,
nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(inp_ab),
shallow_length=len(inp_abc))):
nest.assert_shallow_structure(inp_abc, inp_ab)
inp_ab1 = [(1, 1), (2, 2)]
inp_ab2 = [[1, 1], [2, 2]]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._STRUCTURES_HAVE_MISMATCHING_TYPES.format(
shallow_type=type(inp_ab2[0]),
input_type=type(inp_ab1[0]))):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}}
inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}}
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["d"])):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))])
inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)])
nest.assert_shallow_structure(inp_ab, inp_ba)
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
inp_shallow = NestTest.SameNameab(1, 2)
inp_deep = NestTest.SameNameab2(1, [1, 2, 3])
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=False)
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=True)
def testFlattenUpTo(self):
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = NestTest.ABTuple
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'str'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'int'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = [(1,), (2,), 3]
shallow_tree = [(1,), (2,)]
expected_message = nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree), shallow_length=len(shallow_tree))
with self.assertRaisesRegexp(ValueError, expected_message): # pylint: disable=g-error-prone-assert-raises
nest.assert_shallow_structure(shallow_tree, input_tree)
def testFlattenWithTuplePathsUpTo(self):
def get_paths_and_values(shallow_tree, input_tree,
check_subtrees_length=True):
path_value_pairs = nest.flatten_with_tuple_paths_up_to(
shallow_tree, input_tree, check_subtrees_length=check_subtrees_length)
paths = [p for p, _ in path_value_pairs]
values = [v for _, v in path_value_pairs]
return paths, values
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths,
[(0, 0), (0, 1), (1, 0), (1, 1)])
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree_paths,
[(0, 0), (0, 1), (1, 0), (1, 1)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
input_tree_flattened_paths = [p for p, _ in
nest.flatten_with_tuple_paths(input_tree)]
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[(0, 0), (0, 1, 0), (0, 1, 1, 0), (0, 1, 1, 1, 0)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened_paths,
[(0, 0, 0), (0, 0, 1),
(0, 1, 0, 0), (0, 1, 0, 1),
(0, 1, 1, 0, 0), (0, 1, 1, 0, 1),
(0, 1, 1, 1, 0, 0), (0, 1, 1, 1, 0, 1)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("b",), ("d", 0), ("d", 1)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("b",)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a", "a", 0),
("a", "a", 1, "b"),
("a", "b"),
("c", "d"),
("c", "e", "f")])
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",),
("c", "d"),
("c", "e")])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("c",)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Test case where len(shallow_tree) < len(input_tree)
input_tree = {"a": "A", "b": "B", "c": "C"}
shallow_tree = {"a": 1, "c": 2}
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
ValueError,
nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree),
shallow_length=len(shallow_tree))):
get_paths_and_values(shallow_tree, input_tree)
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree,
check_subtrees_length=False)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [("a",), ("c",)])
self.assertEqual(flattened_input_tree, ["A", "C"])
self.assertEqual(flattened_shallow_tree_paths, [("a",), ("c",)])
self.assertEqual(flattened_shallow_tree, [1, 2])
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,), (1,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,), (1,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
def testMapStructureUpTo(self):
# Named tuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)
self.assertEqual(out.a, 6)
self.assertEqual(out.b, 15)
# Lists.
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ["evens", ["odds", "primes"]]
out = nest.map_structure_up_to(
name_list, lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
self.assertEqual(out, ["first_4_evens", ["first_5_odds", "first_3_primes"]])
# Dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["b"])):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
# Dict+custom mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dict/mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["b"])):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
def testGetTraverseShallowStructure(self):
scalar_traverse_input = [3, 4, (1, 2, [0]), [5, 6], {"a": (7,)}, []]
scalar_traverse_r = nest.get_traverse_shallow_structure(
lambda s: not isinstance(s, tuple),
scalar_traverse_input)
self.assertEqual(scalar_traverse_r,
[True, True, False, [True, True], {"a": False}, []])
nest.assert_shallow_structure(scalar_traverse_r,
scalar_traverse_input)
structure_traverse_input = [(1, [2]), ([1], 2)]
structure_traverse_r = nest.get_traverse_shallow_structure(
lambda s: (True, False) if isinstance(s, tuple) else True,
structure_traverse_input)
self.assertEqual(structure_traverse_r,
[(True, False), ([True], False)])
nest.assert_shallow_structure(structure_traverse_r,
structure_traverse_input)
with self.assertRaisesRegexp(TypeError, "returned structure"):
nest.get_traverse_shallow_structure(lambda _: [True], 0)
with self.assertRaisesRegexp(TypeError, "returned a non-bool scalar"):
nest.get_traverse_shallow_structure(lambda _: 1, [1])
with self.assertRaisesRegexp(
TypeError, "didn't return a depth=1 structure of bools"):
nest.get_traverse_shallow_structure(lambda _: [1], [1])
def testYieldFlatStringPaths(self):
for inputs_expected in ({"inputs": [], "expected": []},
{"inputs": 3, "expected": [()]},
{"inputs": [3], "expected": [(0,)]},
{"inputs": {"a": 3}, "expected": [("a",)]},
{"inputs": {"a": {"b": 4}},
"expected": [("a", "b")]},
{"inputs": [{"a": 2}], "expected": [(0, "a")]},
{"inputs": [{"a": [2]}], "expected": [(0, "a", 0)]},
{"inputs": [{"a": [(23, 42)]}],
"expected": [(0, "a", 0, 0), (0, "a", 0, 1)]},
{"inputs": [{"a": ([23], 42)}],
"expected": [(0, "a", 0, 0), (0, "a", 1)]},
{"inputs": {"a": {"a": 2}, "c": [[[4]]]},
"expected": [("a", "a"), ("c", 0, 0, 0)]},
{"inputs": {"0": [{"1": 23}]},
"expected": [("0", 0, "1")]}):
inputs = inputs_expected["inputs"]
expected = inputs_expected["expected"]
self.assertEqual(list(nest.yield_flat_paths(inputs)), expected)
# We cannot define namedtuples within @parameterized argument lists.
# pylint: disable=invalid-name
Foo = collections.namedtuple("Foo", ["a", "b"])
Bar = collections.namedtuple("Bar", ["c", "d"])
# pylint: enable=invalid-name
@parameterized.parameters([
dict(inputs=[], expected=[]),
dict(inputs=[23, "42"], expected=[("0", 23), ("1", "42")]),
dict(inputs=[[[[108]]]], expected=[("0/0/0/0", 108)]),
dict(inputs=Foo(a=3, b=Bar(c=23, d=42)),
expected=[("a", 3), ("b/c", 23), ("b/d", 42)]),
dict(inputs=Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="thing")),
expected=[("a/c", 23), ("a/d", 42), ("b/c", 0), ("b/d", "thing")]),
dict(inputs=Bar(c=42, d=43),
expected=[("c", 42), ("d", 43)]),
dict(inputs=Bar(c=[42], d=43),
expected=[("c/0", 42), ("d", 43)]),
])
def testFlattenWithStringPaths(self, inputs, expected):
self.assertEqual(
nest.flatten_with_joined_string_paths(inputs, separator="/"),
expected)
@parameterized.parameters([
dict(inputs=[], expected=[]),
dict(inputs=[23, "42"], expected=[((0,), 23), ((1,), "42")]),
dict(inputs=[[[[108]]]], expected=[((0, 0, 0, 0), 108)]),
dict(inputs=Foo(a=3, b=Bar(c=23, d=42)),
expected=[(("a",), 3), (("b", "c"), 23), (("b", "d"), 42)]),
dict(inputs=Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="thing")),
expected=[(("a", "c"), 23), (("a", "d"), 42), (("b", "c"), 0),
(("b", "d"), "thing")]),
dict(inputs=Bar(c=42, d=43),
expected=[(("c",), 42), (("d",), 43)]),
dict(inputs=Bar(c=[42], d=43),
expected=[(("c", 0), 42), (("d",), 43)]),
])
def testFlattenWithTuplePaths(self, inputs, expected):
self.assertEqual(nest.flatten_with_tuple_paths(inputs), expected)
@parameterized.named_parameters(
("tuples", (1, 2), (3, 4), True, (("0", 4), ("1", 6))),
("dicts", {"a": 1, "b": 2}, {"b": 4, "a": 3}, True,
{"a": ("a", 4), "b": ("b", 6)}),
("mixed", (1, 2), [3, 4], False, (("0", 4), ("1", 6))),
("nested",
{"a": [2, 3], "b": [1, 2, 3]}, {"b": [5, 6, 7], "a": [8, 9]}, True,
{"a": [("a/0", 10), ("a/1", 12)],
"b": [("b/0", 6), ("b/1", 8), ("b/2", 10)]}))
def testMapWithPathsCompatibleStructures(self, s1, s2, check_types, expected):
def format_sum(path, *values):
return (path, sum(values))
result = nest.map_structure_with_paths(format_sum, s1, s2,
check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters(
("tuples", (1, 2, 3), (4, 5), ValueError),
("dicts", {"a": 1}, {"b": 2}, ValueError),
("mixed", (1, 2), [3, 4], TypeError),
("nested",
{"a": [2, 3, 4], "b": [1, 3]},
{"b": [5, 6], "a": [8, 9]},
ValueError
))
def testMapWithPathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_paths(lambda path, *s: 0, s1, s2)
@parameterized.named_parameters([
dict(testcase_name="Tuples", s1=(1, 2), s2=(3, 4),
check_types=True, expected=(((0,), 4), ((1,), 6))),
dict(testcase_name="Dicts", s1={"a": 1, "b": 2}, s2={"b": 4, "a": 3},
check_types=True, expected={"a": (("a",), 4), "b": (("b",), 6)}),
dict(testcase_name="Mixed", s1=(1, 2), s2=[3, 4],
check_types=False, expected=(((0,), 4), ((1,), 6))),
dict(testcase_name="Nested",
s1={"a": [2, 3], "b": [1, 2, 3]},
s2={"b": [5, 6, 7], "a": [8, 9]},
check_types=True,
expected={"a": [(("a", 0), 10), (("a", 1), 12)],
"b": [(("b", 0), 6), (("b", 1), 8), (("b", 2), 10)]}),
])
def testMapWithTuplePathsCompatibleStructures(
self, s1, s2, check_types, expected):
def path_and_sum(path, *values):
return path, sum(values)
result = nest.map_structure_with_tuple_paths(
path_and_sum, s1, s2, check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters([
dict(testcase_name="Tuples", s1=(1, 2, 3), s2=(4, 5),
error_type=ValueError),
dict(testcase_name="Dicts", s1={"a": 1}, s2={"b": 2},
error_type=ValueError),
dict(testcase_name="Mixed", s1=(1, 2), s2=[3, 4], error_type=TypeError),
dict(testcase_name="Nested",
s1={"a": [2, 3, 4], "b": [1, 3]},
s2={"b": [5, 6], "a": [8, 9]},
error_type=ValueError)
])
def testMapWithTuplePathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_tuple_paths(lambda path, *s: 0, s1, s2)
class NestBenchmark(test.Benchmark):
def run_and_report(self, s1, s2, name):
burn_iter, test_iter = 100, 30000
for _ in xrange(burn_iter):
nest.assert_same_structure(s1, s2)
t0 = time.time()
for _ in xrange(test_iter):
nest.assert_same_structure(s1, s2)
t1 = time.time()
self.report_benchmark(iters=test_iter, wall_time=(t1 - t0) / test_iter,
name=name)
def benchmark_assert_structure(self):
s1 = (((1, 2), 3), 4, (5, 6))
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
self.run_and_report(s1, s2, "assert_same_structure_6_elem")
s1 = (((1, 2), 3), 4, (5, 6)) * 10
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) * 10
self.run_and_report(s1, s2, "assert_same_structure_60_elem")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/util/nest_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for serialization functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.platform import test
from tensorflow.python.util import serialization
class SerializationTests(test.TestCase):
def test_serialize_dense(self):
dense = core.Dense(3)
dense(constant_op.constant([[4.]]))
round_trip = json.loads(json.dumps(
dense, default=serialization.get_json_type))
self.assertEqual(3, round_trip["config"]["units"])
def test_serialize_shape(self):
round_trip = json.loads(json.dumps(
tensor_shape.TensorShape([None, 2, 3]),
default=serialization.get_json_type))
self.assertIs(round_trip[0], None)
self.assertEqual(round_trip[1], 2)
@test_util.run_in_graph_and_eager_modes
def test_serialize_sequential(self):
model = sequential.Sequential()
model.add(core.Dense(4))
model.add(core.Dense(5))
model(constant_op.constant([[1.]]))
sequential_round_trip = json.loads(
json.dumps(model, default=serialization.get_json_type))
self.assertEqual(
5, sequential_round_trip["config"]["layers"][1]["config"]["units"])
@test_util.run_in_graph_and_eager_modes
def test_serialize_model(self):
x = input_layer.Input(shape=[3])
y = core.Dense(10)(x)
model = training.Model(x, y)
model(constant_op.constant([[1., 1., 1.]]))
model_round_trip = json.loads(
json.dumps(model, default=serialization.get_json_type))
self.assertEqual(
10, model_round_trip["config"]["layers"][1]["config"]["units"])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/util/serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for serializing Python objects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.framework import tensor_shape
def get_json_type(obj):
"""Serializes any object to a JSON-serializable structure.
Arguments:
obj: the object to serialize
Returns:
JSON-serializable structure representing `obj`.
Raises:
TypeError: if `obj` cannot be serialized.
"""
# if obj is a serializable Keras class instance
# e.g. optimizer, layer
if hasattr(obj, 'get_config'):
return {'class_name': obj.__class__.__name__, 'config': obj.get_config()}
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
# misc functions (e.g. loss function)
if callable(obj):
return obj.__name__
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
if isinstance(obj, tensor_shape.Dimension):
return obj.value
if isinstance(obj, tensor_shape.TensorShape):
return obj.as_list()
if isinstance(obj, collections.Mapping):
return dict(obj)
raise TypeError('Not JSON Serializable:', obj)
|
tensorflow-master
|
tensorflow/python/util/serialization.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf_export tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.platform import test
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
def _test_function(unused_arg=0):
pass
def _test_function2(unused_arg=0):
pass
class TestClassA(object):
pass
class TestClassB(TestClassA):
pass
class ValidateExportTest(test.TestCase):
"""Tests for tf_export class."""
class MockModule(object):
def __init__(self, name):
self.__name__ = name
def setUp(self):
self._modules = []
def tearDown(self):
for name in self._modules:
del sys.modules[name]
self._modules = []
for symbol in [_test_function, _test_function, TestClassA, TestClassB]:
if hasattr(symbol, '_tf_api_names'):
del symbol._tf_api_names
if hasattr(symbol, '_tf_api_names_v1'):
del symbol._tf_api_names_v1
if hasattr(symbol, '_estimator_api_names'):
del symbol._estimator_api_names
if hasattr(symbol, '_estimator_api_names_v1'):
del symbol._estimator_api_names_v1
def _CreateMockModule(self, name):
mock_module = self.MockModule(name)
sys.modules[name] = mock_module
self._modules.append(name)
return mock_module
def testExportSingleFunction(self):
export_decorator = tf_export.tf_export('nameA', 'nameB')
decorated_function = export_decorator(_test_function)
self.assertEquals(decorated_function, _test_function)
self.assertEquals(('nameA', 'nameB'), decorated_function._tf_api_names)
self.assertEquals(['nameA', 'nameB'],
tf_export.get_v1_names(decorated_function))
self.assertEquals(['nameA', 'nameB'],
tf_export.get_v2_names(decorated_function))
def testExportMultipleFunctions(self):
export_decorator1 = tf_export.tf_export('nameA', 'nameB')
export_decorator2 = tf_export.tf_export('nameC', 'nameD')
decorated_function1 = export_decorator1(_test_function)
decorated_function2 = export_decorator2(_test_function2)
self.assertEquals(decorated_function1, _test_function)
self.assertEquals(decorated_function2, _test_function2)
self.assertEquals(('nameA', 'nameB'), decorated_function1._tf_api_names)
self.assertEquals(('nameC', 'nameD'), decorated_function2._tf_api_names)
def testExportClasses(self):
export_decorator_a = tf_export.tf_export('TestClassA1')
export_decorator_a(TestClassA)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
self.assertTrue('_tf_api_names' not in TestClassB.__dict__)
export_decorator_b = tf_export.tf_export('TestClassB1')
export_decorator_b(TestClassB)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
self.assertEquals(('TestClassB1',), TestClassB._tf_api_names)
self.assertEquals(['TestClassA1'], tf_export.get_v1_names(TestClassA))
self.assertEquals(['TestClassB1'], tf_export.get_v1_names(TestClassB))
def testExportClassInEstimator(self):
export_decorator_a = tf_export.tf_export('TestClassA1')
export_decorator_a(TestClassA)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
export_decorator_b = tf_export.estimator_export(
'estimator.TestClassB1')
export_decorator_b(TestClassB)
self.assertTrue('_tf_api_names' not in TestClassB.__dict__)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
self.assertEquals(['TestClassA1'], tf_export.get_v1_names(TestClassA))
self.assertEquals(['estimator.TestClassB1'],
tf_export.get_v1_names(TestClassB))
def testExportSingleConstant(self):
module1 = self._CreateMockModule('module1')
export_decorator = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator.export_constant('module1', 'test_constant')
self.assertEquals([(('NAME_A', 'NAME_B'), 'test_constant')],
module1._tf_api_constants)
self.assertEquals([(('NAME_A', 'NAME_B'), 'test_constant')],
tf_export.get_v1_constants(module1))
self.assertEquals([(('NAME_A', 'NAME_B'), 'test_constant')],
tf_export.get_v2_constants(module1))
def testExportMultipleConstants(self):
module1 = self._CreateMockModule('module1')
module2 = self._CreateMockModule('module2')
test_constant1 = 123
test_constant2 = 'abc'
test_constant3 = 0.5
export_decorator1 = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator2 = tf_export.tf_export('NAME_C', 'NAME_D')
export_decorator3 = tf_export.tf_export('NAME_E', 'NAME_F')
export_decorator1.export_constant('module1', test_constant1)
export_decorator2.export_constant('module2', test_constant2)
export_decorator3.export_constant('module2', test_constant3)
self.assertEquals([(('NAME_A', 'NAME_B'), 123)],
module1._tf_api_constants)
self.assertEquals([(('NAME_C', 'NAME_D'), 'abc'),
(('NAME_E', 'NAME_F'), 0.5)],
module2._tf_api_constants)
def testRaisesExceptionIfAlreadyHasAPINames(self):
_test_function._tf_api_names = ['abc']
export_decorator = tf_export.tf_export('nameA', 'nameB')
with self.assertRaises(tf_export.SymbolAlreadyExposedError):
export_decorator(_test_function)
def testRaisesExceptionIfInvalidSymbolName(self):
# TensorFlow code is not allowed to export symbols under package
# tf.estimator
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.tf_export('estimator.invalid')
# All symbols exported by Estimator must be under tf.estimator package.
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('invalid')
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('Estimator.invalid')
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('invalid.estimator')
def testRaisesExceptionIfInvalidV1SymbolName(self):
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.tf_export('valid', v1=['estimator.invalid'])
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('estimator.valid', v1=['invalid'])
def testOverridesFunction(self):
_test_function2._tf_api_names = ['abc']
export_decorator = tf_export.tf_export(
'nameA', 'nameB', overrides=[_test_function2])
export_decorator(_test_function)
# _test_function overrides _test_function2. So, _tf_api_names
# should be removed from _test_function2.
self.assertFalse(hasattr(_test_function2, '_tf_api_names'))
def testMultipleDecorators(self):
def get_wrapper(func):
def wrapper(*unused_args, **unused_kwargs):
pass
return tf_decorator.make_decorator(func, wrapper)
decorated_function = get_wrapper(_test_function)
export_decorator = tf_export.tf_export('nameA', 'nameB')
exported_function = export_decorator(decorated_function)
self.assertEquals(decorated_function, exported_function)
self.assertEquals(('nameA', 'nameB'), _test_function._tf_api_names)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/util/tf_export_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for Python 2 vs. 3 compatibility that are private to TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.compat import as_str_any
def path_to_str(path):
"""Returns the file system path representation of a `PathLike` object,
else as it is.
Args:
path: An object that can be converted to path representation.
Returns:
A `str` object.
"""
if hasattr(path, "__fspath__"):
path = as_str_any(path.__fspath__())
return path
|
tensorflow-master
|
tensorflow/python/util/compat_internal.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""decorator_utils tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import decorator_utils
def _test_function(unused_arg=0):
pass
class GetQualifiedNameTest(test.TestCase):
def test_method(self):
self.assertEqual(
"GetQualifiedNameTest.test_method",
decorator_utils.get_qualified_name(GetQualifiedNameTest.test_method))
def test_function(self):
self.assertEqual("_test_function",
decorator_utils.get_qualified_name(_test_function))
class AddNoticeToDocstringTest(test.TestCase):
def _check(self, doc, expected):
self.assertEqual(
decorator_utils.add_notice_to_docstring(
doc=doc,
instructions="Instructions",
no_doc_str="Nothing here",
suffix_str="(suffix)",
notice=["Go away"]),
expected)
def test_regular(self):
expected = (
"Brief (suffix)\n\nWarning: Go away\nInstructions\n\nDocstring\n\n"
"Args:\n arg1: desc")
# No indent for main docstring
self._check("Brief\n\nDocstring\n\nArgs:\n arg1: desc", expected)
# 2 space indent for main docstring, blank lines not indented
self._check("Brief\n\n Docstring\n\n Args:\n arg1: desc", expected)
# 2 space indent for main docstring, blank lines indented as well.
self._check("Brief\n \n Docstring\n \n Args:\n arg1: desc", expected)
# No indent for main docstring, first line blank.
self._check("\n Brief\n \n Docstring\n \n Args:\n arg1: desc",
expected)
# 2 space indent, first line blank.
self._check("\n Brief\n \n Docstring\n \n Args:\n arg1: desc",
expected)
def test_brief_only(self):
expected = "Brief (suffix)\n\nWarning: Go away\nInstructions"
self._check("Brief", expected)
self._check("Brief\n", expected)
self._check("Brief\n ", expected)
self._check("\nBrief\n ", expected)
self._check("\n Brief\n ", expected)
def test_no_docstring(self):
expected = "Nothing here\n\nWarning: Go away\nInstructions"
self._check(None, expected)
self._check("", expected)
def test_no_empty_line(self):
expected = "Brief (suffix)\n\nWarning: Go away\nInstructions\n\nDocstring"
# No second line indent
self._check("Brief\nDocstring", expected)
# 2 space second line indent
self._check("Brief\n Docstring", expected)
# No second line indent, first line blank
self._check("\nBrief\nDocstring", expected)
# 2 space second line indent, first line blank
self._check("\n Brief\n Docstring", expected)
class ValidateCallableTest(test.TestCase):
def test_function(self):
decorator_utils.validate_callable(_test_function, "test")
def test_method(self):
decorator_utils.validate_callable(self.test_method, "test")
def test_callable(self):
class TestClass(object):
def __call__(self):
pass
decorator_utils.validate_callable(TestClass(), "test")
def test_partial(self):
partial = functools.partial(_test_function, unused_arg=7)
decorator_utils.validate_callable(partial, "test")
def test_fail_non_callable(self):
x = 0
self.assertRaises(ValueError, decorator_utils.validate_callable, x, "test")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/util/decorator_utils_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for python.util.protobuf.compare."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import textwrap
import six
from google.protobuf import text_format
from tensorflow.python.platform import googletest
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.protobuf import compare_test_pb2
def LargePbs(*args):
"""Converts ASCII string Large PBs to messages."""
pbs = []
for arg in args:
pb = compare_test_pb2.Large()
text_format.Merge(arg, pb)
pbs.append(pb)
return pbs
class ProtoEqTest(googletest.TestCase):
def assertNotEquals(self, a, b):
"""Asserts that ProtoEq says a != b."""
a, b = LargePbs(a, b)
googletest.TestCase.assertEquals(self, compare.ProtoEq(a, b), False)
def assertEquals(self, a, b):
"""Asserts that ProtoEq says a == b."""
a, b = LargePbs(a, b)
googletest.TestCase.assertEquals(self, compare.ProtoEq(a, b), True)
def testPrimitives(self):
googletest.TestCase.assertEqual(self, True, compare.ProtoEq('a', 'a'))
googletest.TestCase.assertEqual(self, False, compare.ProtoEq('b', 'a'))
def testEmpty(self):
self.assertEquals('', '')
def testPrimitiveFields(self):
self.assertNotEquals('string_: "a"', '')
self.assertEquals('string_: "a"', 'string_: "a"')
self.assertNotEquals('string_: "b"', 'string_: "a"')
self.assertNotEquals('string_: "ab"', 'string_: "aa"')
self.assertNotEquals('int64_: 0', '')
self.assertEquals('int64_: 0', 'int64_: 0')
self.assertNotEquals('int64_: -1', '')
self.assertNotEquals('int64_: 1', 'int64_: 0')
self.assertNotEquals('int64_: 0', 'int64_: -1')
self.assertNotEquals('float_: 0.0', '')
self.assertEquals('float_: 0.0', 'float_: 0.0')
self.assertNotEquals('float_: -0.1', '')
self.assertNotEquals('float_: 3.14', 'float_: 0')
self.assertNotEquals('float_: 0', 'float_: -0.1')
self.assertEquals('float_: -0.1', 'float_: -0.1')
self.assertNotEquals('bool_: true', '')
self.assertNotEquals('bool_: false', '')
self.assertNotEquals('bool_: true', 'bool_: false')
self.assertEquals('bool_: false', 'bool_: false')
self.assertEquals('bool_: true', 'bool_: true')
self.assertNotEquals('enum_: A', '')
self.assertNotEquals('enum_: B', 'enum_: A')
self.assertNotEquals('enum_: C', 'enum_: B')
self.assertEquals('enum_: C', 'enum_: C')
def testRepeatedPrimitives(self):
self.assertNotEquals('int64s: 0', '')
self.assertEquals('int64s: 0', 'int64s: 0')
self.assertNotEquals('int64s: 1', 'int64s: 0')
self.assertNotEquals('int64s: 0 int64s: 0', '')
self.assertNotEquals('int64s: 0 int64s: 0', 'int64s: 0')
self.assertNotEquals('int64s: 1 int64s: 0', 'int64s: 0')
self.assertNotEquals('int64s: 0 int64s: 1', 'int64s: 0')
self.assertNotEquals('int64s: 1', 'int64s: 0 int64s: 2')
self.assertNotEquals('int64s: 2 int64s: 0', 'int64s: 1')
self.assertEquals('int64s: 0 int64s: 0', 'int64s: 0 int64s: 0')
self.assertEquals('int64s: 0 int64s: 1', 'int64s: 0 int64s: 1')
self.assertNotEquals('int64s: 1 int64s: 0', 'int64s: 0 int64s: 0')
self.assertNotEquals('int64s: 1 int64s: 0', 'int64s: 0 int64s: 1')
self.assertNotEquals('int64s: 1 int64s: 0', 'int64s: 0 int64s: 2')
self.assertNotEquals('int64s: 1 int64s: 1', 'int64s: 1 int64s: 0')
self.assertNotEquals('int64s: 1 int64s: 1', 'int64s: 1 int64s: 0 int64s: 2')
def testMessage(self):
self.assertNotEquals('small <>', '')
self.assertEquals('small <>', 'small <>')
self.assertNotEquals('small < strings: "a" >', '')
self.assertNotEquals('small < strings: "a" >', 'small <>')
self.assertEquals('small < strings: "a" >', 'small < strings: "a" >')
self.assertNotEquals('small < strings: "b" >', 'small < strings: "a" >')
self.assertNotEquals('small < strings: "a" strings: "b" >',
'small < strings: "a" >')
self.assertNotEquals('string_: "a"', 'small <>')
self.assertNotEquals('string_: "a"', 'small < strings: "b" >')
self.assertNotEquals('string_: "a"', 'small < strings: "b" strings: "c" >')
self.assertNotEquals('string_: "a" small <>', 'small <>')
self.assertNotEquals('string_: "a" small <>', 'small < strings: "b" >')
self.assertEquals('string_: "a" small <>', 'string_: "a" small <>')
self.assertNotEquals('string_: "a" small < strings: "a" >',
'string_: "a" small <>')
self.assertEquals('string_: "a" small < strings: "a" >',
'string_: "a" small < strings: "a" >')
self.assertNotEquals('string_: "a" small < strings: "a" >',
'int64_: 1 small < strings: "a" >')
self.assertNotEquals('string_: "a" small < strings: "a" >', 'int64_: 1')
self.assertNotEquals('string_: "a"', 'int64_: 1 small < strings: "a" >')
self.assertNotEquals('string_: "a" int64_: 0 small < strings: "a" >',
'int64_: 1 small < strings: "a" >')
self.assertNotEquals('string_: "a" int64_: 1 small < strings: "a" >',
'string_: "a" int64_: 0 small < strings: "a" >')
self.assertEquals('string_: "a" int64_: 0 small < strings: "a" >',
'string_: "a" int64_: 0 small < strings: "a" >')
def testNestedMessage(self):
self.assertNotEquals('medium <>', '')
self.assertEquals('medium <>', 'medium <>')
self.assertNotEquals('medium < smalls <> >', 'medium <>')
self.assertEquals('medium < smalls <> >', 'medium < smalls <> >')
self.assertNotEquals('medium < smalls <> smalls <> >',
'medium < smalls <> >')
self.assertEquals('medium < smalls <> smalls <> >',
'medium < smalls <> smalls <> >')
self.assertNotEquals('medium < int32s: 0 >', 'medium < smalls <> >')
self.assertNotEquals('medium < smalls < strings: "a"> >',
'medium < smalls <> >')
def testTagOrder(self):
"""Tests that different fields are ordered by tag number.
For reference, here are the relevant tag numbers from compare_test.proto:
optional string string_ = 1;
optional int64 int64_ = 2;
optional float float_ = 3;
optional Small small = 8;
optional Medium medium = 7;
optional Small small = 8;
"""
self.assertNotEquals('string_: "a" ',
' int64_: 1 ')
self.assertNotEquals('string_: "a" int64_: 2 ',
' int64_: 1 ')
self.assertNotEquals('string_: "b" int64_: 1 ',
'string_: "a" int64_: 2 ')
self.assertEquals('string_: "a" int64_: 1 ',
'string_: "a" int64_: 1 ')
self.assertNotEquals('string_: "a" int64_: 1 float_: 0.0',
'string_: "a" int64_: 1 ')
self.assertEquals('string_: "a" int64_: 1 float_: 0.0',
'string_: "a" int64_: 1 float_: 0.0')
self.assertNotEquals('string_: "a" int64_: 1 float_: 0.1',
'string_: "a" int64_: 1 float_: 0.0')
self.assertNotEquals('string_: "a" int64_: 2 float_: 0.0',
'string_: "a" int64_: 1 float_: 0.1')
self.assertNotEquals('string_: "a" ',
' int64_: 1 float_: 0.1')
self.assertNotEquals('string_: "a" float_: 0.0',
' int64_: 1 ')
self.assertNotEquals('string_: "b" float_: 0.0',
'string_: "a" int64_: 1 ')
self.assertNotEquals('string_: "a"', 'small < strings: "a" >')
self.assertNotEquals('string_: "a" small < strings: "a" >',
'small < strings: "b" >')
self.assertNotEquals('string_: "a" small < strings: "b" >',
'string_: "a" small < strings: "a" >')
self.assertEquals('string_: "a" small < strings: "a" >',
'string_: "a" small < strings: "a" >')
self.assertNotEquals('string_: "a" medium <>',
'string_: "a" small < strings: "a" >')
self.assertNotEquals('string_: "a" medium < smalls <> >',
'string_: "a" small < strings: "a" >')
self.assertNotEquals('medium <>', 'small < strings: "a" >')
self.assertNotEquals('medium <> small <>', 'small < strings: "a" >')
self.assertNotEquals('medium < smalls <> >', 'small < strings: "a" >')
self.assertNotEquals('medium < smalls < strings: "a" > >',
'small < strings: "b" >')
class NormalizeNumbersTest(googletest.TestCase):
"""Tests for NormalizeNumberFields()."""
def testNormalizesInts(self):
pb = compare_test_pb2.Large()
pb.int64_ = 4
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64_, six.integer_types))
pb.int64_ = 4
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64_, six.integer_types))
pb.int64_ = 9999999999999999
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64_, six.integer_types))
def testNormalizesRepeatedInts(self):
pb = compare_test_pb2.Large()
pb.int64s.extend([1, 400, 999999999999999])
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64s[0], six.integer_types))
self.assertTrue(isinstance(pb.int64s[1], six.integer_types))
self.assertTrue(isinstance(pb.int64s[2], six.integer_types))
def testNormalizesFloats(self):
pb1 = compare_test_pb2.Large()
pb1.float_ = 1.2314352351231
pb2 = compare_test_pb2.Large()
pb2.float_ = 1.231435
self.assertNotEqual(pb1.float_, pb2.float_)
compare.NormalizeNumberFields(pb1)
compare.NormalizeNumberFields(pb2)
self.assertEqual(pb1.float_, pb2.float_)
def testNormalizesRepeatedFloats(self):
pb = compare_test_pb2.Large()
pb.medium.floats.extend([0.111111111, 0.111111])
compare.NormalizeNumberFields(pb)
for value in pb.medium.floats:
self.assertAlmostEqual(0.111111, value)
def testNormalizesDoubles(self):
pb1 = compare_test_pb2.Large()
pb1.double_ = 1.2314352351231
pb2 = compare_test_pb2.Large()
pb2.double_ = 1.2314352
self.assertNotEqual(pb1.double_, pb2.double_)
compare.NormalizeNumberFields(pb1)
compare.NormalizeNumberFields(pb2)
self.assertEqual(pb1.double_, pb2.double_)
def testNormalizesMaps(self):
pb = compare_test_pb2.WithMap()
pb.value_message[4].strings.extend(['a', 'b', 'c'])
pb.value_string['d'] = 'e'
compare.NormalizeNumberFields(pb)
class AssertTest(googletest.TestCase):
"""Tests assertProtoEqual()."""
def assertProtoEqual(self, a, b, **kwargs):
if isinstance(a, six.string_types) and isinstance(b, six.string_types):
a, b = LargePbs(a, b)
compare.assertProtoEqual(self, a, b, **kwargs)
def assertAll(self, a, **kwargs):
"""Checks that all possible asserts pass."""
self.assertProtoEqual(a, a, **kwargs)
def assertSameNotEqual(self, a, b):
"""Checks that assertProtoEqual() fails."""
self.assertRaises(AssertionError, self.assertProtoEqual, a, b)
def assertNone(self, a, b, message, **kwargs):
"""Checks that all possible asserts fail with the given message."""
message = re.escape(textwrap.dedent(message))
self.assertRaisesRegexp(AssertionError, message, self.assertProtoEqual, a,
b, **kwargs)
def testCheckInitialized(self):
# neither is initialized
a = compare_test_pb2.Labeled()
a.optional = 1
self.assertNone(a, a, 'Initialization errors: ', check_initialized=True)
self.assertAll(a, check_initialized=False)
# a is initialized, b isn't
b = copy.deepcopy(a)
a.required = 2
self.assertNone(a, b, 'Initialization errors: ', check_initialized=True)
self.assertNone(
a,
b,
"""
- required: 2
optional: 1
""",
check_initialized=False)
# both are initialized
a = compare_test_pb2.Labeled()
a.required = 2
self.assertAll(a, check_initialized=True)
self.assertAll(a, check_initialized=False)
b = copy.deepcopy(a)
b.required = 3
message = """
- required: 2
? ^
+ required: 3
? ^
"""
self.assertNone(a, b, message, check_initialized=True)
self.assertNone(a, b, message, check_initialized=False)
def testAssertEqualWithStringArg(self):
pb = compare_test_pb2.Large()
pb.string_ = 'abc'
pb.float_ = 1.234
compare.assertProtoEqual(self, """
string_: 'abc'
float_: 1.234
""", pb)
def testNormalizesNumbers(self):
pb1 = compare_test_pb2.Large()
pb1.int64_ = 4
pb2 = compare_test_pb2.Large()
pb2.int64_ = 4
compare.assertProtoEqual(self, pb1, pb2)
def testNormalizesFloat(self):
pb1 = compare_test_pb2.Large()
pb1.double_ = 4.0
pb2 = compare_test_pb2.Large()
pb2.double_ = 4
compare.assertProtoEqual(self, pb1, pb2, normalize_numbers=True)
def testPrimitives(self):
self.assertAll('string_: "x"')
self.assertNone('string_: "x"', 'string_: "y"', """
- string_: "x"
? ^
+ string_: "y"
? ^
""")
def testRepeatedPrimitives(self):
self.assertAll('int64s: 0 int64s: 1')
self.assertSameNotEqual('int64s: 0 int64s: 1', 'int64s: 1 int64s: 0')
self.assertSameNotEqual('int64s: 0 int64s: 1 int64s: 2',
'int64s: 2 int64s: 1 int64s: 0')
self.assertSameNotEqual('int64s: 0', 'int64s: 0 int64s: 0')
self.assertSameNotEqual('int64s: 0 int64s: 1',
'int64s: 1 int64s: 0 int64s: 1')
self.assertNone('int64s: 0', 'int64s: 0 int64s: 2', """
int64s: 0
+ int64s: 2
""")
self.assertNone('int64s: 0 int64s: 1', 'int64s: 0 int64s: 2', """
int64s: 0
- int64s: 1
? ^
+ int64s: 2
? ^
""")
def testMessage(self):
self.assertAll('medium: {}')
self.assertAll('medium: { smalls: {} }')
self.assertAll('medium: { int32s: 1 smalls: {} }')
self.assertAll('medium: { smalls: { strings: "x" } }')
self.assertAll(
'medium: { smalls: { strings: "x" } } small: { strings: "y" }')
self.assertSameNotEqual('medium: { smalls: { strings: "x" strings: "y" } }',
'medium: { smalls: { strings: "y" strings: "x" } }')
self.assertSameNotEqual(
'medium: { smalls: { strings: "x" } smalls: { strings: "y" } }',
'medium: { smalls: { strings: "y" } smalls: { strings: "x" } }')
self.assertSameNotEqual(
'medium: { smalls: { strings: "x" strings: "y" strings: "x" } }',
'medium: { smalls: { strings: "y" strings: "x" } }')
self.assertSameNotEqual(
'medium: { smalls: { strings: "x" } int32s: 0 }',
'medium: { int32s: 0 smalls: { strings: "x" } int32s: 0 }')
self.assertNone('medium: {}', 'medium: { smalls: { strings: "x" } }', """
medium {
+ smalls {
+ strings: "x"
+ }
}
""")
self.assertNone('medium: { smalls: { strings: "x" } }',
'medium: { smalls: {} }', """
medium {
smalls {
- strings: "x"
}
}
""")
self.assertNone('medium: { int32s: 0 }', 'medium: { int32s: 1 }', """
medium {
- int32s: 0
? ^
+ int32s: 1
? ^
}
""")
def testMsgPassdown(self):
self.assertRaisesRegexp(
AssertionError,
'test message passed down',
self.assertProtoEqual,
'medium: {}',
'medium: { smalls: { strings: "x" } }',
msg='test message passed down')
def testRepeatedMessage(self):
self.assertAll('medium: { smalls: {} smalls: {} }')
self.assertAll('medium: { smalls: { strings: "x" } } medium: {}')
self.assertAll('medium: { smalls: { strings: "x" } } medium: { int32s: 0 }')
self.assertAll('medium: { smalls: {} smalls: { strings: "x" } } small: {}')
self.assertSameNotEqual('medium: { smalls: { strings: "x" } smalls: {} }',
'medium: { smalls: {} smalls: { strings: "x" } }')
self.assertSameNotEqual('medium: { smalls: {} }',
'medium: { smalls: {} smalls: {} }')
self.assertSameNotEqual('medium: { smalls: {} smalls: {} } medium: {}',
'medium: {} medium: {} medium: { smalls: {} }')
self.assertSameNotEqual(
'medium: { smalls: { strings: "x" } smalls: {} }',
'medium: { smalls: {} smalls: { strings: "x" } smalls: {} }')
self.assertNone('medium: {}', 'medium: {} medium { smalls: {} }', """
medium {
+ smalls {
+ }
}
""")
self.assertNone('medium: { smalls: {} smalls: { strings: "x" } }',
'medium: { smalls: {} smalls: { strings: "y" } }', """
medium {
smalls {
}
smalls {
- strings: "x"
? ^
+ strings: "y"
? ^
}
}
""")
class MixinTests(compare.ProtoAssertions, googletest.TestCase):
def testAssertEqualWithStringArg(self):
pb = compare_test_pb2.Large()
pb.string_ = 'abc'
pb.float_ = 1.234
self.assertProtoEqual("""
string_: 'abc'
float_: 1.234
""", pb)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/util/protobuf/compare_test.py
|
tensorflow-master
|
tensorflow/python/util/protobuf/__init__.py
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for comparing proto2 messages in Python.
ProtoEq() compares two proto2 messages for equality.
ClearDefaultValuedFields() recursively clears the fields that are set to their
default values. This is useful for comparing protocol buffers where the
semantics of unset fields and default valued fields are the same.
assertProtoEqual() is useful for unit tests. It produces much more helpful
output than assertEqual() for proto2 messages, e.g. this:
outer {
inner {
- strings: "x"
? ^
+ strings: "y"
? ^
}
}
...compared to the default output from assertEqual() that looks like this:
AssertionError: <my.Msg object at 0x9fb353c> != <my.Msg object at 0x9fb35cc>
Call it inside your unit test's googletest.TestCase subclasses like this:
from tensorflow.python.util.protobuf import compare
class MyTest(googletest.TestCase):
...
def testXXX(self):
...
compare.assertProtoEqual(self, a, b)
Alternatively:
from tensorflow.python.util.protobuf import compare
class MyTest(compare.ProtoAssertions, googletest.TestCase):
...
def testXXX(self):
...
self.assertProtoEqual(a, b)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import difflib
import six
from google.protobuf import descriptor
from google.protobuf import descriptor_pool
from google.protobuf import message
from google.protobuf import text_format
def assertProtoEqual(self, a, b, check_initialized=True, # pylint: disable=invalid-name
normalize_numbers=False, msg=None):
"""Fails with a useful error if a and b aren't equal.
Comparison of repeated fields matches the semantics of
unittest.TestCase.assertEqual(), ie order and extra duplicates fields matter.
Args:
self: googletest.TestCase
a: proto2 PB instance, or text string representing one.
b: proto2 PB instance -- message.Message or subclass thereof.
check_initialized: boolean, whether to fail if either a or b isn't
initialized.
normalize_numbers: boolean, whether to normalize types and precision of
numbers before comparison.
msg: if specified, is used as the error message on failure.
"""
pool = descriptor_pool.Default()
if isinstance(a, six.string_types):
a = text_format.Merge(a, b.__class__(), descriptor_pool=pool)
for pb in a, b:
if check_initialized:
errors = pb.FindInitializationErrors()
if errors:
self.fail('Initialization errors: %s\n%s' % (errors, pb))
if normalize_numbers:
NormalizeNumberFields(pb)
a_str = text_format.MessageToString(a, descriptor_pool=pool)
b_str = text_format.MessageToString(b, descriptor_pool=pool)
# Some Python versions would perform regular diff instead of multi-line
# diff if string is longer than 2**16. We substitute this behavior
# with a call to unified_diff instead to have easier-to-read diffs.
# For context, see: https://bugs.python.org/issue11763.
if len(a_str) < 2**16 and len(b_str) < 2**16:
self.assertMultiLineEqual(a_str, b_str, msg=msg)
else:
diff = '\n' + ''.join(difflib.unified_diff(a_str.splitlines(True),
b_str.splitlines(True)))
self.fail('%s : %s' % (msg, diff))
def NormalizeNumberFields(pb):
"""Normalizes types and precisions of number fields in a protocol buffer.
Due to subtleties in the python protocol buffer implementation, it is possible
for values to have different types and precision depending on whether they
were set and retrieved directly or deserialized from a protobuf. This function
normalizes integer values to ints and longs based on width, 32-bit floats to
five digits of precision to account for python always storing them as 64-bit,
and ensures doubles are floating point for when they're set to integers.
Modifies pb in place. Recurses into nested objects.
Args:
pb: proto2 message.
Returns:
the given pb, modified in place.
"""
for desc, values in pb.ListFields():
is_repeated = True
if desc.label is not descriptor.FieldDescriptor.LABEL_REPEATED:
is_repeated = False
values = [values]
normalized_values = None
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
if desc.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_SINT64):
normalized_values = [int(x) for x in values]
elif desc.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_ENUM):
normalized_values = [int(x) for x in values]
elif desc.type == descriptor.FieldDescriptor.TYPE_FLOAT:
normalized_values = [round(x, 6) for x in values]
elif desc.type == descriptor.FieldDescriptor.TYPE_DOUBLE:
normalized_values = [round(float(x), 7) for x in values]
if normalized_values is not None:
if is_repeated:
pb.ClearField(desc.name)
getattr(pb, desc.name).extend(normalized_values)
else:
setattr(pb, desc.name, normalized_values[0])
if (desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE or
desc.type == descriptor.FieldDescriptor.TYPE_GROUP):
if (desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
desc.message_type.has_options and
desc.message_type.GetOptions().map_entry):
# This is a map, only recurse if the values have a message type.
if (desc.message_type.fields_by_number[2].type ==
descriptor.FieldDescriptor.TYPE_MESSAGE):
for v in six.itervalues(values):
NormalizeNumberFields(v)
else:
for v in values:
# recursive step
NormalizeNumberFields(v)
return pb
def _IsMap(value):
return isinstance(value, collections.Mapping)
def _IsRepeatedContainer(value):
if isinstance(value, six.string_types):
return False
try:
iter(value)
return True
except TypeError:
return False
def ProtoEq(a, b):
"""Compares two proto2 objects for equality.
Recurses into nested messages. Uses list (not set) semantics for comparing
repeated fields, ie duplicates and order matter.
Args:
a: A proto2 message or a primitive.
b: A proto2 message or a primitive.
Returns:
`True` if the messages are equal.
"""
def Format(pb):
"""Returns a dictionary or unchanged pb bases on its type.
Specifically, this function returns a dictionary that maps tag
number (for messages) or element index (for repeated fields) to
value, or just pb unchanged if it's neither.
Args:
pb: A proto2 message or a primitive.
Returns:
A dict or unchanged pb.
"""
if isinstance(pb, message.Message):
return dict((desc.number, value) for desc, value in pb.ListFields())
elif _IsMap(pb):
return dict(pb.items())
elif _IsRepeatedContainer(pb):
return dict(enumerate(list(pb)))
else:
return pb
a, b = Format(a), Format(b)
# Base case
if not isinstance(a, dict) or not isinstance(b, dict):
return a == b
# This list performs double duty: it compares two messages by tag value *or*
# two repeated fields by element, in order. the magic is in the format()
# function, which converts them both to the same easily comparable format.
for tag in sorted(set(a.keys()) | set(b.keys())):
if tag not in a or tag not in b:
return False
else:
# Recursive step
if not ProtoEq(a[tag], b[tag]):
return False
# Didn't find any values that differed, so they're equal!
return True
class ProtoAssertions(object):
"""Mix this into a googletest.TestCase class to get proto2 assertions.
Usage:
class SomeTestCase(compare.ProtoAssertions, googletest.TestCase):
...
def testSomething(self):
...
self.assertProtoEqual(a, b)
See module-level definitions for method documentation.
"""
# pylint: disable=invalid-name
def assertProtoEqual(self, *args, **kwargs):
return assertProtoEqual(self, *args, **kwargs)
|
tensorflow-master
|
tensorflow/python/util/protobuf/compare.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convert_to_constants.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python import keras
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import convert_to_constants
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import simple_save
from tensorflow.python.saved_model.load import load
from tensorflow.python.saved_model.save import save
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import nest
# TODO(nupurgarg): Simplify the test cases to use the ConcreteFunction.
class VariablesToConstantsTest(test.TestCase):
def _hasStatefulPartitionedCallOp(self, graph_def):
"""Determines if a StatefulPartitionedCall op exists in the graph."""
for node in graph_def.node:
if node.op == "StatefulPartitionedCall":
return True
return False
def _getNumVariables(self, graph_def):
"""Returns the number of ReadVariableOp in the graph."""
return sum(node.op == "ReadVariableOp" for node in graph_def.node)
def _testConvertedFunction(self, obj, func, converted_concrete_func,
input_data):
# Check that the converted ConcreteFunction produces the same result as the
# original Function.
expected_value = nest.flatten(func(input_data))
actual_value = nest.flatten(converted_concrete_func(input_data))
self.assertEqual(expected_value[0].numpy(), actual_value)
# Ensure the shape is retained.
self.assertEqual(converted_concrete_func.inputs[0].shape, input_data.shape)
# Save the converted ConcreteFunction as a signature.
save_dir = os.path.join(self.get_temp_dir(), "frozen_saved_model")
save(obj, save_dir, {"mykey": converted_concrete_func})
# Load it back and make sure it works.
loaded_obj = load(save_dir)
actual_value = nest.flatten(loaded_obj.signatures["mykey"](input_data))
self.assertEqual(expected_value[0].numpy(), actual_value)
@test_util.run_v2_only
def testConstSavedModel(self):
"""Test a basic model with functions to make sure functions are inlined."""
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save(root, save_dir, to_save)
saved_model = load(save_dir)
input_func = saved_model.signatures["serving_default"]
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(variable_graph_def))
self.assertTrue(variable_graph_def.library.function)
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(constant_graph_def.library.function)
self._testConvertedFunction(root, root.f, output_func, input_data)
@test_util.run_v2_only
def testVariableModel(self):
"""Test a basic model with Variables."""
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
input_func = root.f.get_concrete_function(input_data)
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(2, self._getNumVariables(variable_graph_def))
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.f, output_func, input_data)
@test_util.run_v2_only
def testScalarModel(self):
"""Test a basic model with Variables."""
input_data = constant_op.constant(1., shape=[])
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
input_func = root.f.get_concrete_function(input_data)
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(2, self._getNumVariables(variable_graph_def))
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.f, output_func, input_data)
@test_util.run_v2_only
def testVariableSavedModel(self):
"""Test a basic model with Variables with saving/loading the SavedModel."""
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save(root, save_dir, to_save)
saved_model = load(save_dir)
input_func = saved_model.signatures["serving_default"]
variable_graph_def = input_func.graph.as_graph_def()
self.assertTrue(self._hasStatefulPartitionedCallOp(variable_graph_def))
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.f, output_func, input_data)
@test_util.run_v2_only
def testMultiFunctionModel(self):
"""Test a basic model with Variables."""
class BasicModel(tracking.AutoTrackable):
def __init__(self):
self.y = None
self.z = None
@def_function.function
def add(self, x):
if self.y is None:
self.y = variables.Variable(2.)
return x + self.y
@def_function.function
def sub(self, x):
if self.z is None:
self.z = variables.Variable(3.)
return x - self.z
input_data = constant_op.constant(1., shape=[1])
root = BasicModel()
input_func = root.add.get_concrete_function(input_data)
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(1, self._getNumVariables(variable_graph_def))
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.add, output_func, input_data)
@test_util.run_v2_only
def testKerasModel(self):
input_data = constant_op.constant(1., shape=[1, 1])
# Create a simple Keras model.
x = [-1, 0, 1, 2, 3, 4]
y = [-3, -1, 1, 3, 5, 7]
model = keras.models.Sequential(
[keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer="sgd", loss="mean_squared_error")
model.fit(x, y, epochs=1)
# Get the concrete function from the Keras model.
@def_function.function
def to_save(x):
return model(x)
input_func = to_save.get_concrete_function(input_data)
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(2, self._getNumVariables(variable_graph_def))
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
# Check value.
expected_value = to_save(input_data)
actual_value = nest.flatten(output_func(input_data))
self.assertEqual(expected_value.numpy(), actual_value)
def _v1_single_metagraph_saved_model(self):
export_graph = ops.Graph()
with export_graph.as_default():
start = array_ops.placeholder(
shape=[1, 1], dtype=dtypes.float32, name="start")
distractor = variables.RefVariable(-1., name="distractor")
v = variables.RefVariable(3., name="v")
local_variable = variables.VariableV1(
1.,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
trainable=False,
use_resource=True)
output = array_ops.identity(start * v * local_variable, name="output")
with session_lib.Session() as session:
session.run([v.initializer, distractor.initializer,
local_variable.initializer])
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"start": start},
outputs={"output": output},
legacy_init_op=local_variable.initializer)
return path
@test_util.run_v2_only
def test_ref_variable_import(self):
saved = self._v1_single_metagraph_saved_model()
imported = load(saved)
fn = imported.signatures["serving_default"]
output_func = convert_to_constants.convert_variables_to_constants_v2(fn)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
input_data = constant_op.constant(1., shape=[1, 1])
root = tracking.AutoTrackable()
self._testConvertedFunction(root, fn, output_func, input_data)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/framework/convert_to_constants_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the TypeSpec base class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.platform import googletest
class TwoTensors(object):
"""A simple value type to test TypeSpec.
Contains two tensors (x, y) and a string (color). The color value is a
stand-in for any extra type metadata we might need to store.
"""
def __init__(self, x, y, color="red"):
assert isinstance(color, str)
self.x = ops.convert_to_tensor(x)
self.y = ops.convert_to_tensor(y)
self.color = color
class TwoTensorsSpec(type_spec.TypeSpec):
"""A TypeSpec for the TwoTensors value type."""
def __init__(self, x_shape, x_dtype, y_shape, y_dtype, color="red"):
self.x_shape = tensor_shape.as_shape(x_shape)
self.x_dtype = dtypes.as_dtype(x_dtype)
self.y_shape = tensor_shape.as_shape(y_shape)
self.y_dtype = dtypes.as_dtype(y_dtype)
self.color = color
value_type = property(lambda self: TwoTensors)
@property
def _component_specs(self):
return (tensor_spec.TensorSpec(self.x_shape, self.x_dtype),
tensor_spec.TensorSpec(self.y_shape, self.y_dtype))
def _to_components(self, value):
return (value.x, value.y)
def _from_components(self, components):
return TwoTensors(*components)
def _serialize(self):
return (self.x_shape, self.x_dtype, self.y_shape, self.y_dtype, self.color)
@classmethod
def from_value(cls, value):
return cls(value.x.shape, value.x.dtype, value.y.shape, value.y.dtype,
value.color)
type_spec.register_type_spec_from_value_converter(
TwoTensors, TwoTensorsSpec.from_value)
class TypeSpecTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters(
("FullySpecified",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool)),
("UnknownDim",
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool)),
("UnknownRank",
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool)),
("Metadata",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "blue"),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "blue")),
("NumpyMetadata",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
np.array([[1, 2], [3, 4]])),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
np.array([[1, 2], [3, 4]]))),
)
def testEquality(self, v1, v2):
# pylint: disable=g-generic-assert
self.assertEqual(v1, v2)
self.assertEqual(v2, v1)
self.assertFalse(v1 != v2)
self.assertFalse(v2 != v1)
self.assertEqual(hash(v1), hash(v2))
@parameterized.named_parameters(
("UnknownDim",
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),
("UnknownRank",
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),
("IncompatibleDtype",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),
("IncompatibleRank",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None, None], dtypes.bool)),
("IncompatibleDimSize",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 8], dtypes.int32, [None], dtypes.bool)),
("IncompatibleMetadata",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "red"),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "blue")),
("SwappedValues",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([None], dtypes.bool, [5, 3], dtypes.int32)),
("DiffMetadataNumpy",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
np.array([[1, 2], [3, 4]])),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
np.array([[1, 2], [3, 8]]))),
("DiffMetadataTensorSpecName",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
tensor_spec.TensorSpec([4], name="a")),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
tensor_spec.TensorSpec([4], name="b"))),
)
def testInequality(self, v1, v2):
# pylint: disable=g-generic-assert
self.assertNotEqual(v1, v2)
self.assertNotEqual(v2, v1)
self.assertFalse(v1 == v2)
self.assertFalse(v2 == v1)
@parameterized.named_parameters(
("SameValue",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool)),
("UnknownDim",
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),
("UnknownRank",
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),
)
def testIsCompatibleWith(self, v1, v2):
self.assertTrue(v1.is_compatible_with(v2))
self.assertTrue(v2.is_compatible_with(v1))
@parameterized.named_parameters(
("IncompatibleDtype",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),
("IncompatibleRank",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None, None], dtypes.bool)),
("IncompatibleDimSize",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 8], dtypes.int32, [None], dtypes.bool)),
("IncompatibleMetadata",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "red"),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "blue")),
("SwappedValues",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([None], dtypes.bool, [5, 3], dtypes.int32)),
)
def testIsNotCompatibleWith(self, v1, v2):
self.assertFalse(v1.is_compatible_with(v2))
self.assertFalse(v2.is_compatible_with(v1))
@parameterized.named_parameters(
("EqualTypes",
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool)),
("UnknownDim",
TwoTensorsSpec([5, None], dtypes.int32, [8], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool)),
("UnknownRank",
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool),
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool)),
("DiffRank",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None, None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool)),
("DiffDimSize",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 8], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool)),
("DiffMetadataTensorSpecName",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
tensor_spec.TensorSpec([4], name="a")),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
tensor_spec.TensorSpec([4], name="b")),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
tensor_spec.TensorSpec([4], name=None))),
)
def testMostSpecificCompatibleType(self, v1, v2, expected):
self.assertEqual(v1.most_specific_compatible_type(v2), expected)
self.assertEqual(v2.most_specific_compatible_type(v1), expected)
@parameterized.named_parameters(
("IncompatibleDtype",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),
("IncompatibleMetadata",
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool, "red"),
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool, "blue")),
)
def testMostSpecificCompatibleTypeException(self, v1, v2):
with self.assertRaises(ValueError):
v1.most_specific_compatible_type(v2)
with self.assertRaises(ValueError):
v2.most_specific_compatible_type(v1)
def toTensorList(self):
value = TwoTensors([1, 2, 3], [1.0, 2.0], "red")
spec = TwoTensorsSpec.from_value(value)
tensor_list = spec._to_tensor_list(value)
self.assertLen(tensor_list, 2)
self.assertIs(tensor_list[0], value.x)
self.assertIs(tensor_list[1], value.y)
def fromTensorList(self):
x = ops.convert_to_tensor([1, 2, 3])
y = ops.convert_to_tensor([1.0, 2.0])
color = "green"
spec = TwoTensorsSpec(x.shape, x.dtype, y.shape, y.dtype, color)
value = spec._from_tensor_list([x, y])
self.assertIs(value.x, x)
self.assertIs(value.y, y)
self.assertEqual(value.color, color)
def fromIncompatibleTensorList(self):
x = ops.convert_to_tensor([1, 2, 3])
y = ops.convert_to_tensor([1.0, 2.0])
spec1 = TwoTensorsSpec([100], x.dtype, y.shape, y.dtype, "green")
spec2 = TwoTensorsSpec(x.shape, x.dtype, y.shape, dtypes.bool, "green")
with self.assertRaises(ValueError):
spec1._from_tensor_list([x, y]) # shape mismatch
with self.assertRaises(ValueError):
spec2._from_tensor_list([x, y]) # dtype mismatch
def testFlatTensorSpecs(self):
spec = TwoTensorsSpec([5], dtypes.int32, [5, 8], dtypes.float32, "red")
self.assertEqual(spec._flat_tensor_specs,
[tensor_spec.TensorSpec([5], dtypes.int32),
tensor_spec.TensorSpec([5, 8], dtypes.float32)])
self.assertEqual(spec._flat_shapes, [[5], [5, 8]])
self.assertEqual(spec._flat_types, [dtypes.int32, dtypes.float32])
def testRepr(self):
spec = TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool)
self.assertEqual(
repr(spec),
"TwoTensorsSpec(%r, %r, %r, %r, %r)" %
(tensor_shape.TensorShape([5, 3]), dtypes.int32,
tensor_shape.TensorShape(None), dtypes.bool, "red"))
def testFromValue(self):
value = TwoTensors([1, 2, 3], [1.0, 2.0], "red")
spec = type_spec.type_spec_from_value(value)
self.assertEqual(spec, TwoTensorsSpec.from_value(value))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/type_spec_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Subscribe function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import re
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
def _recursive_apply(tensors, apply_fn):
"""Helper method to recursively apply a function to structure of tensors.
The structure of the tensors should take the form similar to fetches in
`tf.compat.v1.Session` and includes single `Tensor`, `list`, nested `list`,
`tuple`,
`namedtuple`, or `dict`.
Args:
tensors: Single `Tensor`, `list`, nested `list, `tuple`, `namedtuple`, or
`dict`.
apply_fn: Function to apply to each `Tensor` and should return a `Tensor`.
Returns:
Returns the modified tensors with the same structure.
Raises:
`TypeError` if undefined type in the tensors structure.
"""
tensors_type = type(tensors)
if tensors_type is ops.Tensor:
return apply_fn(tensors)
elif isinstance(tensors, variables.Variable):
return apply_fn(tensors.value())
elif isinstance(tensors, (list, tuple)):
tensors = [_recursive_apply(t, apply_fn) for t in tensors]
if tensors_type is list:
return list(tensors)
elif tensors_type is tuple:
return tuple(tensors)
return tensors_type(*tensors) # collections.namedtuple
elif tensors_type is dict:
return dict([(k, _recursive_apply(v, apply_fn)) for k, v in tensors.items()
])
else:
raise TypeError('_recursive_apply argument %r has invalid type %r' %
(tensors, tensors_type))
class _ControlOutputCache(object):
"""Helper class to manage calculating and caching control_outputs in graph."""
def __init__(self):
self.cache = {}
def calc_control_outputs(self, graph):
"""Returns the map of control_outputs for a given graph.
Args:
graph: The graph to parse.
Returns:
A map of the control outputs.
"""
control_outputs = {}
for op in graph.get_operations():
for control_input in op.control_inputs:
if control_input not in control_outputs:
control_outputs[control_input] = set()
control_outputs[control_input].add(op)
return control_outputs
def get_control_outputs(self, op):
"""Return the control outputs for a given op.
Args:
op: The op to fetch control outputs for.
Returns:
Iterable of control output ops.
"""
if op.graph not in self.cache:
control_outputs = self.calc_control_outputs(op.graph)
self.cache[op.graph] = control_outputs
else:
control_outputs = self.cache[op.graph]
return control_outputs.get(op, [])
def _subscribe_new(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
Args:
tensor: `tf.Tensor`
side_effects: List of side_effect functions see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects.
"""
update_input = []
for consumer_op in list(tensor.consumers()): # explicit copy
update_input.append((consumer_op, list(consumer_op.inputs).index(tensor)))
update_control_input = control_cache.get_control_outputs(tensor.op)
# Trailing slash on name scope to replace the scope.
name_scope = tensor.op.name + '/subscription/'
with ops.name_scope(name_scope):
outs = []
for s in side_effects:
outs += s(tensor)
with ops.control_dependencies(outs):
out = array_ops.identity(tensor)
for consumer_op, index in update_input:
consumer_op._update_input(index, out) # pylint: disable=protected-access
for consumer_op in update_control_input:
# If an op has more than one output and two or more of its output tensors
# are subscribed at the same time, we remove the control dependency from
# the original op only once and we add the dependencies to all the
# new identities.
new_control_inputs = consumer_op.control_inputs
if tensor.op in new_control_inputs:
new_control_inputs.remove(tensor.op)
new_control_inputs.append(out.op)
# pylint: disable=protected-access
consumer_op._remove_all_control_inputs()
consumer_op._add_control_inputs(new_control_inputs)
# pylint: enable=protected-access
return out
def _subscribe_extend(tensor, side_effects):
"""Helper method to extend the list of side_effects for a subscribed tensor.
Args:
tensor: A `tf.Tensor` as returned by subscribe().
side_effects: List of side_effect functions, see subscribe for details.
Returns:
The given subscribed tensor (for API consistency).
"""
assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(
tensor.op.name)
source_tensor = tensor.op.inputs[0]
# Build the side effect graphs and add their outputs to the list of control
# dependencies for the subscribed tensor.
outs = []
name_scope = source_tensor.op.name + '/subscription/'
with ops.name_scope(name_scope):
for s in side_effects:
outs += s(source_tensor)
out_ops = [out.op if isinstance(out, ops.Tensor) else out for out in outs]
tensor.op._add_control_inputs(out_ops) # pylint: disable=protected-access
return tensor
def _is_subscribed_identity(tensor):
"""Checks if the given tensor is an identity op returned by `subscribe()`.
Args:
tensor: A `tf.Tensor` to check.
Returns:
True if the given tensor matches the criteria for subscription identies:
its op type is `Identity`, its name matches the name of its input and
conforms to the convention for subscribed nodes.
False otherwise.
"""
# Subscribed tensor are assumed to be identity ops.
if tensor.op.type != 'Identity':
return False
# Check that the tensor name matches the convention in place for identity ops
# created by subscribe().
match = re.match(r'(?P<prefix_name>^.*?)/subscription/Identity[^/]+',
tensor.name)
if match is None or len(match.groups()) != 1:
return False
prefix_name = match.group('prefix_name')
# Get a reference to the source tensor and check that it has a matching name.
assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(
tensor.op.name)
source_tensor = tensor.op.inputs[0]
if prefix_name != source_tensor.op.name:
return False
return True
def _subscribe(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
This method will check if the given tensor has already been subscribed or if
it's a tensor returned by a previous call to `subscribe()` and, if so, will
reuse the existing identity op, appending the given side effects to the list
of existing ones.
Args:
tensor: The `tf.Tensor` to be subscribed.
side_effects: List of side_effect functions, see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects or the given tensor, if it was already been subscribed.
"""
# Check if the given tensor has a numpy compatible type (see dtypes.py).
# If not, we cannot subscribe it, so we just return the original tensor.
if not tensor.dtype.is_numpy_compatible:
logging.debug(('Tensor {} has an un-supported {} type and cannot be '
'subscribed.').format(tensor.name, tensor.dtype))
return tensor
if _is_subscribed_identity(tensor):
return _subscribe_extend(tensor, side_effects)
# Check if the given tensor has already been subscribed by inspecting its
# outputs.
name_scope = tensor.op.name + '/subscription/Identity'
consumers = tensor.consumers()
matching_ops = [op for op in consumers if op.name.startswith(name_scope)]
assert len(matching_ops) <= 1, ('Op {} must only have one subscription '
'op connected to it').format(tensor.op.name)
if len(matching_ops) == 1:
candidate_tensor = matching_ops[0].outputs[0]
if _is_subscribed_identity(candidate_tensor):
return _subscribe_extend(candidate_tensor, side_effects)
return _subscribe_new(tensor, side_effects, control_cache)
@contextlib.contextmanager
def _preserve_control_flow_context(tensor):
"""Preserve the control flow context for the given tensor.
Sets the graph context to the tensor's context so that side effect ops are
added under the same context.
This is needed when subscribing to tensors defined within a conditional
block or a while loop. In these cases we need that the side-effect ops
are created within the same control flow context as that of the tensor
they are attached to.
Args:
tensor: tensor whose context should be preserved.
Yields:
None
"""
# pylint: disable=protected-access
context = tensor.op._get_control_flow_context()
# pylint: enable=protected-access
if context:
context.Enter()
try:
yield
finally:
if context:
context.Exit()
def _scoped_subscribe(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
This is a thin wrapper around `_subscribe` and ensures that the side effect
ops are added within the same device and control flow context of the
subscribed tensor.
Args:
tensor: The `tf.Tensor` to be subscribed.
side_effects: List of side_effect functions, see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects or the given tensor, if it was already been subscribed.
"""
with ops.device(tensor.device):
with _preserve_control_flow_context(tensor):
return _subscribe(tensor, side_effects, control_cache)
def subscribe(tensors, side_effects):
"""Subscribe to a tensor.
This method will attach side effect graphs to a given set
of tensors. Set of tensors follows from session.run and supports
single `Tensor`, `list`, nested `list`, `tuple`, `namedtuple`, or `dict`. It
returns the tensors in the same passed in structure, but as clones with
side effects applied. The supplied side effect graphs are specified
as a constructor function which takes the target tensor and
constructs a side effect graph and returns a list of ops that should
be control dependencies on fetching the tensor. It will append
'subscription' to the name scope of the tensor for every node in
the side effect graph. These control dependencies are what trigger
the side effects. Subscribe will construct the additions to your
graph and return the created identity tensor downstream of the control
dependencies. Use these tensors as you would normally in the rest of
your tensorflow code. If a given tensor has already been subscribed or a
tensor returned by a call to subscribe is passed, the previously created
identity tensor will be reused and the side effect graphs will be added to
the existing ones.
Args:
tensors: `Tensor` or set of tensors to subscribe to. Set of tensors format
follows from `Session.run` and supports single `Tensor`, `list`, nested
`list`, `tuple`, `namedtuple`, or `dict`.
side_effects: Function(s) that takes a `Tensor`, construct a subgraph, and
return a nonempty list of control dependencies. This can be a single
function or list of functions.
Returns:
Subscribed tensors, which are identity copies of the passed in tensors
in the same passed in structure, but the graph has been modified
such that these are downstream of the control dependencies for
the side effect graphs. Use these functionally equivalent tensors
instead of the passed in tensors for further construction or running.
"""
if not hasattr(side_effects, '__iter__'):
side_effects = [side_effects]
control_outputs = _ControlOutputCache()
result = _recursive_apply(
tensors, lambda t: _scoped_subscribe(t, side_effects, control_outputs))
return result
|
tensorflow-master
|
tensorflow/python/framework/subscribe.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for exposed tensorflow versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import versions
from tensorflow.python.platform import test
class VersionTest(test.TestCase):
def testVersion(self):
self.assertEqual(type(versions.__version__), str)
self.assertEqual(type(versions.VERSION), str)
# This pattern will need to grow as we include alpha, builds, etc.
self.assertRegexpMatches(versions.__version__,
r'^\d+\.\d+\.(\d+(\-\w+)?|head)$')
self.assertRegexpMatches(versions.VERSION,
r'^\d+\.\d+\.(\d+(\-\w+)?|head)$')
def testGraphDefVersion(self):
version = versions.GRAPH_DEF_VERSION
min_consumer = versions.GRAPH_DEF_VERSION_MIN_CONSUMER
min_producer = versions.GRAPH_DEF_VERSION_MIN_PRODUCER
for v in version, min_consumer, min_producer:
self.assertEqual(type(v), int)
self.assertLessEqual(0, min_consumer)
self.assertLessEqual(0, min_producer)
self.assertLessEqual(min_producer, version)
def testGitAndCompilerVersion(self):
self.assertEqual(type(versions.__git_version__), str)
self.assertEqual(type(versions.__compiler_version__), str)
self.assertEqual(type(versions.GIT_VERSION), str)
self.assertEqual(type(versions.COMPILER_VERSION), str)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/framework/versions_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.subscribe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import subscribe
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class SubscribeTest(test_util.TensorFlowTestCase):
def _ExpectSubscribedIdentities(self, container):
"""Convenience function to test a container of subscribed identities."""
self.assertTrue(
all(subscribe._is_subscribed_identity(x) for x in container))
@test_util.run_deprecated_v1
def testSideEffect(self):
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
with ops.control_dependencies([c]):
d = constant_op.constant(42)
n = math_ops.negative(c)
shared = []
def sub(t):
shared.append(t)
return t
c0 = c
self.assertTrue(c0.op in d.op.control_inputs)
c = subscribe.subscribe(c,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
# Verify that control dependencies are correctly moved to the subscription.
self.assertFalse(c0.op in d.op.control_inputs)
self.assertTrue(c.op in d.op.control_inputs)
with self.cached_session() as sess:
c_out = self.evaluate([c])
n_out = self.evaluate([n])
d_out = self.evaluate([d])
self.assertEqual(n_out, [-2])
self.assertEqual(c_out, [2])
self.assertEqual(d_out, [42])
self.assertEqual(shared, [2, 2, 2])
@test_util.run_deprecated_v1
def testSupportedTypes(self):
"""Confirm that supported types are correctly detected and handled."""
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
def sub(t):
return t
# Tuples.
subscribed = subscribe.subscribe(
(a, b), lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, tuple)
self._ExpectSubscribedIdentities(subscribed)
# Lists.
subscribed = subscribe.subscribe(
[a, b], lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, list)
self._ExpectSubscribedIdentities(subscribed)
# Dictionaries.
subscribed = subscribe.subscribe({
'first': a,
'second': b
}, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, dict)
self._ExpectSubscribedIdentities(subscribed.values())
# Namedtuples.
# pylint: disable=invalid-name
TensorPair = collections.namedtuple('TensorPair', ['first', 'second'])
# pylint: enable=invalid-name
pair = TensorPair(a, b)
subscribed = subscribe.subscribe(
pair, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, TensorPair)
self._ExpectSubscribedIdentities(subscribed)
# Expect an exception to be raised for unsupported types.
with self.assertRaisesRegexp(TypeError, 'has invalid type'):
subscribe.subscribe(c.name,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
@test_util.run_deprecated_v1
def testCaching(self):
"""Confirm caching of control output is recalculated between calls."""
a = constant_op.constant(1)
b = constant_op.constant(2)
with ops.control_dependencies([a]):
c = constant_op.constant(42)
shared = {}
def sub(t):
shared[t] = shared.get(t, 0) + 1
return t
a = subscribe.subscribe(a,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with ops.control_dependencies([b]):
d = constant_op.constant(11)
# If it was using outdated cached control_outputs then
# evaling would not trigger the new subscription.
b = subscribe.subscribe(b,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.cached_session() as sess:
c_out = self.evaluate([c])
d_out = self.evaluate([d])
self.assertEqual(c_out, [42])
self.assertEqual(d_out, [11])
self.assertEqual(shared, {2: 1, 1: 1})
@test_util.run_deprecated_v1
def testIsSubscribedIdentity(self):
"""Confirm subscribed identity ops are correctly detected."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
idop = array_ops.identity(c)
c_sub = subscribe.subscribe(c, [])
self.assertFalse(subscribe._is_subscribed_identity(a))
self.assertFalse(subscribe._is_subscribed_identity(c))
self.assertFalse(subscribe._is_subscribed_identity(idop))
self.assertTrue(subscribe._is_subscribed_identity(c_sub))
@test_util.run_deprecated_v1
def testSubscribeExtend(self):
"""Confirm side effect are correctly added for different input types."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
shared = {}
def sub(t, name):
shared[name] = shared.get(name, 0) + 1
return t
# Subscribe with a first side effect graph, passing an unsubscribed tensor.
sub_graph1 = lambda t: sub(t, 'graph1')
c_sub = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph1, [t], [t.dtype]))
# Add a second side effect graph, passing the tensor returned by the
# previous call to subscribe().
sub_graph2 = lambda t: sub(t, 'graph2')
c_sub2 = subscribe.subscribe(
c_sub, lambda t: script_ops.py_func(sub_graph2, [t], [t.dtype]))
# Add a third side effect graph, passing the original tensor.
sub_graph3 = lambda t: sub(t, 'graph3')
c_sub3 = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph3, [t], [t.dtype]))
# Make sure there's only one identity op matching the source tensor's name.
graph_ops = ops.get_default_graph().get_operations()
name_prefix = c.op.name + '/subscription/Identity'
identity_ops = [op for op in graph_ops if op.name.startswith(name_prefix)]
self.assertEqual(1, len(identity_ops))
# Expect the objects returned by subscribe() to reference the same tensor.
self.assertIs(c_sub, c_sub2)
self.assertIs(c_sub, c_sub3)
# Expect the three side effect graphs to have been evaluated.
with self.cached_session() as sess:
self.evaluate([c_sub])
self.assertIn('graph1', shared)
self.assertIn('graph2', shared)
self.assertIn('graph3', shared)
@test_util.run_v1_only('b/120545219')
def testSubscribeVariable(self):
"""Confirm that variables can be subscribed."""
v1 = variables.VariableV1(0.0)
v2 = variables.VariableV1(4.0)
add = math_ops.add(v1, v2)
assign_v1 = v1.assign(3.0)
shared = []
def sub(t):
shared.append(t)
return t
v1_sub = subscribe.subscribe(
v1, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertTrue(subscribe._is_subscribed_identity(v1_sub))
with self.cached_session() as sess:
# Initialize the variables first.
self.evaluate([v1.initializer])
self.evaluate([v2.initializer])
# Expect the side effects to be triggered when evaluating the add op as
# it will read the value of the variable.
self.evaluate([add])
self.assertEqual(1, len(shared))
# Expect the side effect not to be triggered when evaluating the assign
# op as it will not access the 'read' output of the variable.
self.evaluate([assign_v1])
self.assertEqual(1, len(shared))
self.evaluate([add])
self.assertEqual(2, len(shared))
# Make sure the values read from the variable match the expected ones.
self.assertEqual([0.0, 3.0], shared)
@test_util.run_v1_only('b/120545219')
def testResourceType(self):
"""Confirm that subscribe correctly handles tensors with 'resource' type."""
tensor_array = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name='test',
size=3,
infer_shape=False)
writer = tensor_array.write(0, [[4.0, 5.0]])
reader = writer.read(0)
shared = []
def sub(t):
shared.append(t)
return t
# TensorArray's handle output tensor has a 'resource' type and cannot be
# subscribed as it's not 'numpy compatible' (see dtypes.py).
# Expect that the original tensor is returned when subscribing to it.
tensor_array_sub = subscribe.subscribe(
tensor_array.handle, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIs(tensor_array_sub, tensor_array.handle)
self.assertFalse(subscribe._is_subscribed_identity(tensor_array.handle))
with self.cached_session() as sess:
self.evaluate([reader])
self.assertEqual(0, len(shared))
@test_util.run_deprecated_v1
def testMultipleOutputs(self):
"""Handle subscriptions to multiple outputs from the same op."""
sparse_tensor_1 = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
sparse_tensor_2 = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[2, 3], dense_shape=[3, 4])
# This op has three outputs.
sparse_add = sparse_ops.sparse_add(sparse_tensor_1, sparse_tensor_2)
self.assertEqual(3, len(sparse_add.op.outputs))
c1 = constant_op.constant(1)
with ops.control_dependencies(sparse_add.op.outputs):
# This op depends on all the three outputs.
neg = -c1
shared = []
def sub(t):
shared.append(t)
return t
# Subscribe the three outputs at once.
subscribe.subscribe(sparse_add.op.outputs,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.cached_session() as sess:
self.evaluate([neg])
# All three ops have been processed.
self.assertEqual(3, len(shared))
@test_util.run_deprecated_v1
def test_subscribe_tensors_on_different_devices(self):
"""Side effect ops are added with the same device of the subscribed op."""
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
with ops.device('cpu:0'):
add = math_ops.add(c1, c2)
with ops.device('cpu:1'):
mul = math_ops.multiply(c1, c2)
def sub(t):
return t
add_sub = subscribe.subscribe(
add, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
mul_sub = subscribe.subscribe(
mul, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
# Expect the identity tensors injected by subscribe to have been created
# on the same device as their original tensors.
self.assertNotEqual(add_sub.device, mul_sub.device)
self.assertEqual(add.device, add_sub.device)
self.assertEqual(mul.device, mul_sub.device)
@test_util.run_v1_only('b/120545219')
def test_subscribe_tensors_within_control_flow_context(self):
"""Side effect ops are added with the same control flow context."""
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
x1 = math_ops.add(c1, c2)
x2 = math_ops.multiply(c1, c2)
cond = control_flow_ops.cond(
x1 < x2,
lambda: math_ops.add(c1, c2, name='then'),
lambda: math_ops.subtract(c1, c2, name='else'),
name='cond')
branch = ops.get_default_graph().get_tensor_by_name('cond/then:0')
def context(tensor):
return tensor.op._get_control_flow_context()
self.assertIs(context(x1), context(x2))
self.assertIsNot(context(x1), context(branch))
results = []
def sub(tensor):
results.append(tensor)
return tensor
tensors = [x1, branch, x2]
subscriptions = subscribe.subscribe(
tensors, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
for tensor, subscription in zip(tensors, subscriptions):
self.assertIs(context(tensor), context(subscription))
# Verify that sub(x1) and sub(x2) are in the same context.
self.assertIs(context(subscriptions[0]), context(subscriptions[2]))
# Verify that sub(x1) and sub(branch) are not.
self.assertIsNot(context(subscriptions[0]), context(subscriptions[1]))
with self.cached_session() as sess:
self.evaluate(cond)
self.assertEqual(3, len(results))
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/subscribe_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.sparse_tensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import googletest
class SparseTensorTest(test_util.TensorFlowTestCase):
def testPythonConstruction(self):
indices = [[1, 2], [2, 0], [3, 4]]
values = [b"a", b"b", b"c"]
shape = [4, 5]
sp_value = sparse_tensor.SparseTensorValue(indices, values, shape)
for sp in [
sparse_tensor.SparseTensor(indices, values, shape),
sparse_tensor.SparseTensor.from_value(sp_value),
sparse_tensor.SparseTensor.from_value(
sparse_tensor.SparseTensor(indices, values, shape))]:
self.assertEqual(sp.indices.dtype, dtypes.int64)
self.assertEqual(sp.values.dtype, dtypes.string)
self.assertEqual(sp.dense_shape.dtype, dtypes.int64)
self.assertEqual(sp.get_shape(), (4, 5))
with self.cached_session() as sess:
value = self.evaluate(sp)
self.assertAllEqual(indices, value.indices)
self.assertAllEqual(values, value.values)
self.assertAllEqual(shape, value.dense_shape)
sess_run_value = self.evaluate(sp)
self.assertAllEqual(sess_run_value.indices, value.indices)
self.assertAllEqual(sess_run_value.values, value.values)
self.assertAllEqual(sess_run_value.dense_shape, value.dense_shape)
def testIsSparse(self):
self.assertFalse(sparse_tensor.is_sparse(3))
self.assertFalse(sparse_tensor.is_sparse("foo"))
self.assertFalse(sparse_tensor.is_sparse(np.array(3)))
self.assertTrue(
sparse_tensor.is_sparse(sparse_tensor.SparseTensor([[0]], [0], [1])))
self.assertTrue(
sparse_tensor.is_sparse(
sparse_tensor.SparseTensorValue([[0]], [0], [1])))
def testConsumers(self):
with context.graph_mode():
sp = sparse_tensor.SparseTensor([[0, 0], [1, 2]], [1.0, 3.0], [3, 4])
w = ops.convert_to_tensor(np.ones([4, 1], np.float32))
out = sparse_ops.sparse_tensor_dense_matmul(sp, w)
self.assertEqual(len(sp.consumers()), 1)
self.assertEqual(sp.consumers()[0], out.op)
dense = sparse_ops.sparse_tensor_to_dense(sp)
self.assertEqual(len(sp.consumers()), 2)
self.assertIn(dense.op, sp.consumers())
self.assertIn(out.op, sp.consumers())
class ConvertToTensorOrSparseTensorTest(test_util.TensorFlowTestCase):
def test_convert_dense(self):
with self.cached_session():
value = [42, 43]
from_value = sparse_tensor.convert_to_tensor_or_sparse_tensor(
value)
self.assertAllEqual(value, self.evaluate(from_value))
@test_util.run_deprecated_v1
def test_convert_sparse(self):
with self.cached_session():
indices = [[0, 1], [1, 0]]
values = [42, 43]
shape = [2, 2]
sparse_tensor_value = sparse_tensor.SparseTensorValue(
indices, values, shape)
st = sparse_tensor.SparseTensor.from_value(sparse_tensor_value)
from_value = sparse_tensor.convert_to_tensor_or_sparse_tensor(
sparse_tensor_value).eval()
from_tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(st).eval()
for convertee in [from_value, from_tensor]:
self.assertAllEqual(sparse_tensor_value.indices, convertee.indices)
self.assertAllEqual(sparse_tensor_value.values, convertee.values)
self.assertAllEqual(
sparse_tensor_value.dense_shape, convertee.dense_shape)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/sparse_tensor_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Indexed slices."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import warnings
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_conversion_registry
from tensorflow.python.framework import tensor_like
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import type_spec
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
# Use LazyLoader to avoid circular dependencies.
#
# Note: these can all be changed to regular imports once all code has been
# updated to refer the symbols defined in this module directly, rather than
# using the backwards-compatible aliases in ops.py. (E.g.,
# "indexed_slices.IndexedSlices" rather than "ops.IndexedSlices".)
math_ops = LazyLoader(
"math_ops", globals(),
"tensorflow.python.ops.math_ops")
ops = LazyLoader(
"ops", globals(), "tensorflow.python.framework.ops")
tensor_spec = LazyLoader(
"tensor_spec", globals(),
"tensorflow.python.framework.tensor_spec")
tensor_util = LazyLoader(
"tensor_util", globals(),
"tensorflow.python.framework.tensor_util")
# pylint: disable=protected-access
_TensorLike = tensor_like._TensorLike
# pylint: enable=protected-access
@tf_export("IndexedSlices")
class IndexedSlices(_TensorLike, composite_tensor.CompositeTensor):
"""A sparse representation of a set of tensor slices at given indices.
This class is a simple wrapper for a pair of `Tensor` objects:
* `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.
* `indices`: A 1-D integer `Tensor` with shape `[D0]`.
An `IndexedSlices` is typically used to represent a subset of a larger
tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.
The values in `indices` are the indices in the first dimension of
the slices that have been extracted from the larger tensor.
The dense tensor `dense` represented by an `IndexedSlices` `slices` has
```python
dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]
```
The `IndexedSlices` class is used principally in the definition of
gradients for operations that have sparse gradients
(e.g. `tf.gather`).
Contrast this representation with
`tf.SparseTensor`,
which uses multi-dimensional indices and scalar values.
"""
def __init__(self, values, indices, dense_shape=None):
"""Creates an `IndexedSlices`."""
ops._get_graph_from_inputs([values, indices, dense_shape]) # pylint: disable=protected-access
self._values = values
self._indices = indices
self._dense_shape = dense_shape
@property
def values(self):
"""A `Tensor` containing the values of the slices."""
return self._values
@property
def indices(self):
"""A 1-D `Tensor` containing the indices of the slices."""
return self._indices
@property
def dense_shape(self):
"""A 1-D `Tensor` containing the shape of the corresponding dense tensor."""
return self._dense_shape
@property
def name(self):
"""The name of this `IndexedSlices`."""
return self.values.name
@property
def device(self):
"""The name of the device on which `values` will be produced, or `None`."""
return self.values.device
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self.values.dtype
@property
def graph(self):
"""The `Graph` that contains the values, indices, and shape tensors."""
return self._values.graph
def __str__(self):
return "IndexedSlices(indices=%s, values=%s%s)" % (
self._indices, self._values,
(", dense_shape=%s" %
self._dense_shape) if self._dense_shape is not None else "")
def __neg__(self):
return IndexedSlices(-self.values, self.indices, self.dense_shape)
@property
def _type_spec(self):
indices_shape = self._indices.shape.merge_with(self._values.shape[:1])
dense_shape = tensor_shape.TensorShape([None]).concatenate(
self._values.shape[1:])
if self._dense_shape is not None:
dense_shape_dtype = self._dense_shape.dtype
dense_shape = dense_shape.merge_with(
tensor_util.constant_value_as_shape(self._dense_shape))
else:
dense_shape_dtype = None
return IndexedSlicesSpec(dense_shape, self.dtype, self._indices.dtype,
dense_shape_dtype, indices_shape)
def _shape_invariant_to_type_spec(self, shape):
# From tf.while_loop docs: "If a loop variable is an IndexedSlices, the
# shape invariant must be a shape invariant of the values tensor of the
# IndexedSlices. It means the shapes of the three tensors of the
# IndexedSlices are (shape, [shape[0]], [shape.ndims])."
indices_shape = shape[:1]
dense_shape = tensor_shape.TensorShape([None]).concatenate(shape[1:])
if self._dense_shape is None:
dense_shape_dtype = None
else:
dense_shape_dtype = self._dense_shape.dtype
return IndexedSlicesSpec(dense_shape, self.dtype, self._indices.dtype,
dense_shape_dtype, indices_shape)
def consumers(self):
return self._consumers()
IndexedSlicesValue = collections.namedtuple(
"IndexedSlicesValue", ["values", "indices", "dense_shape"])
@tf_export("IndexedSlicesSpec")
class IndexedSlicesSpec(type_spec.TypeSpec):
"""Type specification for a `tf.IndexedSlices`."""
__slots__ = ["_shape", "_values_dtype", "_indices_dtype",
"_dense_shape_dtype", "_indices_shape"]
value_type = property(lambda self: IndexedSlices)
def __init__(self, shape=None, dtype=dtypes.float32,
indices_dtype=dtypes.int64, dense_shape_dtype=True,
indices_shape=None):
"""Constructs a type specification for a `tf.IndexedSlices`.
Args:
shape: The dense shape of the `IndexedSlices`, or `None` to allow any
dense shape.
dtype: `tf.DType` of values in the `IndexedSlices`.
indices_dtype: `tf.DType` of the `indices` in the `IndexedSlices`. One
of `tf.int32` or `tf.int64`.
dense_shape_dtype: `tf.DType` of the `dense_shape` in the `IndexedSlices`.
One of `tf.int32`, `tf.int64`, or `None` (if the `IndexedSlices` has
no `dense_shape` tensor).
indices_shape: The shape of the `indices` component, which indicates
how many slices are in the `IndexedSlices`.
"""
self._shape = tensor_shape.as_shape(shape)
self._values_dtype = dtypes.as_dtype(dtype)
self._indices_dtype = dtypes.as_dtype(indices_dtype)
if dense_shape_dtype is None:
self._dense_shape_dtype = None
else:
self._dense_shape_dtype = dtypes.as_dtype(dense_shape_dtype)
self._indices_shape = tensor_shape.as_shape(indices_shape)
def _serialize(self):
return (self._shape, self._values_dtype, self._indices_dtype,
self._dense_shape_dtype, self._indices_shape)
@property
def _component_specs(self):
value_shape = self._indices_shape.concatenate(self._shape[1:])
specs = [
tensor_spec.TensorSpec(value_shape, self._values_dtype),
tensor_spec.TensorSpec(self._indices_shape, self._indices_dtype)]
if self._dense_shape_dtype is not None:
specs.append(
tensor_spec.TensorSpec([self._shape.ndims], self._dense_shape_dtype))
return specs
def _to_components(self, value):
if value.dense_shape is None:
return (value.values, value.indices)
else:
return (value.values, value.indices, value.dense_shape)
def _from_components(self, tensor_list):
return IndexedSlices(*tensor_list)
@tf_export(v1=["convert_to_tensor_or_indexed_slices"])
def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
A `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_indexed_slices(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_indexed_slices(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, ops.EagerTensor) and not context.executing_eagerly():
return ops.internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
elif isinstance(value, _TensorLike):
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return ops.internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
def internal_convert_n_to_tensor_or_indexed_slices(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `IndexedSlices`, `SparseTensor` and/or `None` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a sequence.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_indexed_slices(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_indexed_slices(
values=values, dtype=dtype, name=name, as_ref=False)
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _indexed_slices_to_tensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
if not context.executing_eagerly():
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d "
"elements. This may consume a large amount of memory." %
num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
tensor_conversion_registry.register_tensor_conversion_function(
IndexedSlices, _indexed_slices_to_tensor)
|
tensorflow-master
|
tensorflow/python/framework/indexed_slices.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import pickle
import warnings
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class ErrorsTest(test.TestCase):
def _CountReferences(self, typeof):
"""Count number of references to objects of type |typeof|."""
objs = gc.get_objects()
ref_count = 0
for o in objs:
try:
if isinstance(o, typeof):
ref_count += 1
# Certain versions of python keeps a weakref to deleted objects.
except ReferenceError:
pass
return ref_count
def testUniqueClassForEachErrorCode(self):
for error_code, exc_type in [
(errors.CANCELLED, errors_impl.CancelledError),
(errors.UNKNOWN, errors_impl.UnknownError),
(errors.INVALID_ARGUMENT, errors_impl.InvalidArgumentError),
(errors.DEADLINE_EXCEEDED, errors_impl.DeadlineExceededError),
(errors.NOT_FOUND, errors_impl.NotFoundError),
(errors.ALREADY_EXISTS, errors_impl.AlreadyExistsError),
(errors.PERMISSION_DENIED, errors_impl.PermissionDeniedError),
(errors.UNAUTHENTICATED, errors_impl.UnauthenticatedError),
(errors.RESOURCE_EXHAUSTED, errors_impl.ResourceExhaustedError),
(errors.FAILED_PRECONDITION, errors_impl.FailedPreconditionError),
(errors.ABORTED, errors_impl.AbortedError),
(errors.OUT_OF_RANGE, errors_impl.OutOfRangeError),
(errors.UNIMPLEMENTED, errors_impl.UnimplementedError),
(errors.INTERNAL, errors_impl.InternalError),
(errors.UNAVAILABLE, errors_impl.UnavailableError),
(errors.DATA_LOSS, errors_impl.DataLossError),
]:
# pylint: disable=protected-access
self.assertTrue(
isinstance(
errors_impl._make_specific_exception(None, None, None,
error_code), exc_type))
# error_code_from_exception_type and exception_type_from_error_code should
# be consistent with operation result.
self.assertEqual(error_code,
errors_impl.error_code_from_exception_type(exc_type))
# pylint: enable=protected-access
def testKnownErrorClassForEachErrorCodeInProto(self):
for error_code in error_codes_pb2.Code.values():
# pylint: disable=line-too-long
if error_code in (
error_codes_pb2.OK, error_codes_pb2.
DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_
):
continue
# pylint: enable=line-too-long
with warnings.catch_warnings(record=True) as w:
# pylint: disable=protected-access
exc = errors_impl._make_specific_exception(None, None, None, error_code)
# pylint: enable=protected-access
self.assertEqual(0, len(w)) # No warning is raised.
self.assertTrue(isinstance(exc, errors_impl.OpError))
self.assertTrue(errors_impl.OpError in exc.__class__.__bases__)
def testUnknownErrorCodeCausesWarning(self):
with warnings.catch_warnings(record=True) as w:
# pylint: disable=protected-access
exc = errors_impl._make_specific_exception(None, None, None, 37)
# pylint: enable=protected-access
self.assertEqual(1, len(w))
self.assertTrue("Unknown error code: 37" in str(w[0].message))
self.assertTrue(isinstance(exc, errors_impl.OpError))
with warnings.catch_warnings(record=True) as w:
# pylint: disable=protected-access
exc = errors_impl.error_code_from_exception_type("Unknown")
# pylint: enable=protected-access
self.assertEqual(1, len(w))
self.assertTrue("Unknown class exception" in str(w[0].message))
self.assertTrue(isinstance(exc, errors_impl.OpError))
def testStatusDoesNotLeak(self):
try:
pywrap_tensorflow.DeleteFile(compat.as_bytes("/DOES_NOT_EXIST/"))
except:
pass
gc.collect()
self.assertEqual(0, self._CountReferences(c_api_util.ScopedTFStatus))
def testPickleable(self):
for error_code in [
errors.CANCELLED,
errors.UNKNOWN,
errors.INVALID_ARGUMENT,
errors.DEADLINE_EXCEEDED,
errors.NOT_FOUND,
errors.ALREADY_EXISTS,
errors.PERMISSION_DENIED,
errors.UNAUTHENTICATED,
errors.RESOURCE_EXHAUSTED,
errors.FAILED_PRECONDITION,
errors.ABORTED,
errors.OUT_OF_RANGE,
errors.UNIMPLEMENTED,
errors.INTERNAL,
errors.UNAVAILABLE,
errors.DATA_LOSS,
]:
# pylint: disable=protected-access
exc = errors_impl._make_specific_exception(None, None, None, error_code)
# pylint: enable=protected-access
unpickled = pickle.loads(pickle.dumps(exc))
self.assertEqual(exc.node_def, unpickled.node_def)
self.assertEqual(exc.op, unpickled.op)
self.assertEqual(exc.message, unpickled.message)
self.assertEqual(exc.error_code, unpickled.error_code)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/framework/errors_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import time
import numpy as np
from tensorflow.core.framework import function_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.framework.errors import InvalidArgumentError
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _OptimizerOptions():
for cse in [False, True]:
for inline in [False, True]:
for cfold in [False, True]:
cfg = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=cse,
do_function_inlining=inline,
do_constant_folding=cfold)))
if cse:
cfg.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
else:
cfg.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
if inline:
cfg.graph_options.rewrite_options.function_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
else:
cfg.graph_options.rewrite_options.function_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
if cfold:
cfg.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.ON)
else:
cfg.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
yield cfg
class FunctionTest(test.TestCase):
"""Test methods for verifying Function support.
These test methods are used as mix-ins in two test cases: with
and without C API support.
"""
def testIdentity(self):
@function.Defun(dtypes.float32, func_name="MyIdentity")
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
call = MyIdentityFunc([18.0])
self.assertEqual("MyIdentity", call.op.name)
with session.Session() as sess:
self.assertAllEqual([18.0], self.evaluate(call))
@test_util.run_v1_only("b/120545219")
def testIdentityImplicitDeref(self):
@function.Defun(dtypes.float32, func_name="MyIdentity")
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
var = variables.VariableV1([18.0])
call = MyIdentityFunc(var._ref()) # pylint: disable=protected-access
self.assertEqual("MyIdentity", call.op.name)
for cfg in _OptimizerOptions():
with session.Session(config=cfg) as sess:
self.evaluate(var.initializer)
self.assertAllEqual([18.0], self.evaluate(call))
def testIdentityOutputName(self):
@function.Defun(
dtypes.float32, func_name="MyIdentity", out_names=["my_result_name"])
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
call = MyIdentityFunc([18.0])
self.assertEqual("MyIdentity", call.op.name)
with session.Session() as sess:
self.assertAllEqual([18.0], self.evaluate(call))
def testTooManyOutputNames(self):
@function.Defun(
dtypes.float32,
func_name="MyIdentity",
out_names=["my_result1", "my_result2"])
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
(r"output names must be either empty or equal in size to outputs. "
"output names size = 2 outputs size = 1")):
MyIdentityFunc([18.0])
def testDefineFunction2Args(self):
@function.Defun(dtypes.float32, dtypes.float32, func_name="APlus2B")
def APlus2B(a, b):
return a + b * 2
with ops.Graph().as_default():
call = APlus2B([1.0], [2.0])
self.assertEqual("APlus2B", call.op.name)
with session.Session() as sess:
self.assertAllEqual([5.0], self.evaluate(call))
def testFunctionWithNoOutput(self):
@function.Defun(dtypes.float32, dtypes.float32)
def APlus2B(a, b):
c = a + b * 2 # Create some ops to have nodes in the body
print(c) # Using 'print' to make lint happy
with ops.Graph().as_default():
# Call function. There should be no exceptions.
APlus2B([1.0], [2.0])
def testDefineFunction2ArgsOutputName(self):
@function.Defun(
dtypes.float32,
dtypes.float32,
func_name="APlus2B",
out_names=["my_result_name"])
def APlus2B(a, b):
return a + b * 2
# APlus2B is stateless.
self.assertEqual([], APlus2B.stateful_ops)
with ops.Graph().as_default():
call = APlus2B([1.0], [2.0])
self.assertEqual("APlus2B", call.op.name)
with session.Session() as sess:
self.assertAllEqual([5.0], self.evaluate(call))
def testDefineFunctionDuplicateOutputs(self):
@function.Defun(dtypes.float32, func_name="Duplicate")
def Duplicate(a):
b = a + 1.0
return b, b
g = ops.Graph()
with g.as_default():
Duplicate([3.0])
func_sig = g.as_graph_def().library.function[0].signature
# The names given to both outputs should be different
# even though the same tensor is emitted to both.
out_names = [a.name for a in func_sig.output_arg]
self.assertEqual(2, len(out_names))
self.assertNotEqual(out_names[0], out_names[1])
def testGradientFunc(self):
@function.Defun(dtypes.float32, func_name="XSquarePlusOneFn")
def XSquarePlusOne(x):
return x * x + 1.0
@function.Defun(dtypes.float32, dtypes.float32)
def XSquarePlusOneGrad(x, dy):
dx = functional_ops.symbolic_gradient(
input=[x, dy], Tout=[dtypes.float32], f="XSquarePlusOneFn", name="dx")
return dx
g = ops.Graph()
with g.as_default():
call_f = XSquarePlusOne([2.0])
call_g = XSquarePlusOneGrad([2.0], [0.1])
with session.Session() as sess:
self.assertAllClose([5.0], self.evaluate(call_f))
self.assertAllClose([0.4], self.evaluate(call_g))
def testTanhSymGrad(self):
@function.Defun(dtypes.float32)
def Forward(x):
return math_ops.reduce_sum(math_ops.tanh(x))
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32)
y = Forward(x)
dx = gradients_impl.gradients([y], [x])
inp = np.array([-1, 1, 2, -2], dtype=np.float32)
feed = {x: inp}
cfg = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True)))
with session.Session(graph=g, config=cfg) as sess:
out, = sess.run(dx, feed)
self.assertAllClose(1 - np.square(np.tanh(inp)), out)
def testCustomGradient(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype)
def XentLossGrad(logits, labels, dloss):
dlogits = array_ops.reshape(dloss, [-1, 1]) * (
nn_ops.softmax(logits) - labels)
dlabels = array_ops.zeros_like(labels)
# Takes exp(dlogits) to differentiate it from the "correct" gradient.
return math_ops.exp(dlogits), dlabels
@function.Defun(dtype, dtype, grad_func=XentLossGrad)
def XentLoss(logits, labels):
return math_ops.reduce_sum(labels * math_ops.log(nn_ops.softmax(logits)),
1)
g = ops.Graph()
with g.as_default():
logits = array_ops.placeholder(dtype)
labels = array_ops.placeholder(dtype)
loss = XentLoss(logits, labels)
dlogits = gradients_impl.gradients([loss], [logits])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
prob = np.exp(x) / np.sum(np.exp(x), 1, keepdims=1)
y = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
with session.Session(graph=g, config=cfg) as sess:
out, = sess.run(dlogits, {logits: x, labels: y})
self.assertAllClose(out, np.exp(prob - y))
@test_util.disable_xla("b/124286351") # No error is raised
def testCustomGradientError(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype)
def Grad(x, dy, dz):
# Should have returned 1 result.
return x, dy + dz
@function.Defun(dtype, grad_func=Grad)
def Forward(x):
return x, x
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(dtype)
out = math_ops.add_n(Forward(inp))
dinp = gradients_impl.gradients(out, [inp])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
with session.Session(graph=g) as sess:
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"SymGrad expects to return 1.*but get 2.*instead"):
_ = sess.run(dinp, {inp: x})
def testSymGradShape(self):
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32, [25, 4])
y = array_ops.placeholder(dtypes.float32, [200, 100])
dz = array_ops.placeholder(dtypes.float32, [1])
# We assume Foo is a function of (x, y) -> (z) Then, Foo's
# gradient function is (x, y, dz) -> (dx, dy). dx's shape
# should be the same as x's; and dy's shape should be the same
# as y's.
dx, dy = functional_ops.symbolic_gradient(
input=[x, y, dz], Tout=[dtypes.float32] * 2, f="Foo")
self.assertEqual(x.get_shape(), dx.get_shape())
self.assertEqual(y.get_shape(), dy.get_shape())
@test_util.run_deprecated_v1
def testSymGradAttr(self):
@function.Defun(noinline=True)
def Foo(x):
return x * 2
self.assertTrue(
Foo.instantiate([dtypes.float32]).definition.attr["_noinline"].b)
g = ops.Graph()
with g.as_default():
x = constant_op.constant(3.0)
y = Foo(x)
dx, = gradients_impl.gradients(y, [x])
cfg = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True)))
with self.session(graph=g, config=cfg):
self.assertAllClose(y.eval(), 6.)
self.assertAllClose(dx.eval(), 2.)
def _testZNoDepOnY(self, use_const_grad_ys):
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(x, y): # pylint: disable=unused-argument
return x * 2
with ops.Graph().as_default():
# z = Foo(x, y). z doe
x = constant_op.constant(1.0)
y = constant_op.constant(2.0)
z = Foo(x, y)
if use_const_grad_ys:
dx, dy = gradients_impl.gradients([z], [x, y], grad_ys=[1.0])
else:
dx, dy = gradients_impl.gradients([z], [x, y])
with session.Session() as sess:
dx_val, dy_val = self.evaluate([dx, dy])
self.assertEqual([2.0], dx_val)
self.assertEqual([0.0], dy_val)
def testZNoDepOnY(self):
self._testZNoDepOnY(False)
def testZNoDepOnYConstGradYs(self):
# Tests for constant folding of grad_ys
self._testZNoDepOnY(True)
def testDefineFunctionNoArgs(self):
@function.Defun(func_name="AConstant")
def AConstant():
return constant_op.constant([42])
with ops.Graph().as_default():
call = AConstant()
self.assertEqual("AConstant", call.op.name)
with session.Session() as sess:
self.assertAllEqual([42], self.evaluate(call))
def testDefineFunctionNames(self):
@function.Defun(dtypes.float32, func_name="Foo")
def Foo(a):
return a + 1
with ops.Graph().as_default():
call1 = Foo([1.0])
self.assertEqual("Foo", call1.op.name)
call2 = Foo([1.0])
self.assertEqual("Foo_1", call2.op.name)
# pylint: disable=unexpected-keyword-arg
call3 = Foo([1.0], name="mine")
self.assertEqual("mine", call3.op.name)
with ops.name_scope("my"):
call4 = Foo([1.0], name="precious")
self.assertEqual("my/precious", call4.op.name)
def testNoOp(self):
@function.Defun(dtypes.float32)
def Foo(x):
y = logging_ops.Print(x, [], "Hello")
with ops.control_dependencies([y]):
z = control_flow_ops.no_op()
with ops.control_dependencies([z]):
return x * 2
with ops.Graph().as_default(), self.cached_session():
z = Foo(constant_op.constant(3.0))
self.assertAllEqual(z.eval(), 6.0)
def testAssertOp(self):
@function.Defun(dtypes.float32)
def Foo(x):
check = gen_logging_ops._assert(math_ops.greater(x, 0), [x])
with ops.control_dependencies([check]):
return x * 2
# Foo contains a stateful op (Assert).
self.assertEqual([("Assert", "Assert")], Foo.stateful_ops)
g = ops.Graph()
with g.as_default(), self.cached_session():
self.assertAllEqual(Foo(constant_op.constant(3.0)).eval(), 6.0)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion failed.*-3"):
self.assertAllEqual(Foo(constant_op.constant(-3.0)).eval(), 6.0)
@test_util.run_deprecated_v1
def testAssertWrapper(self):
@function.Defun(dtypes.float32)
def MyFn(x):
with ops.control_dependencies(
[control_flow_ops.Assert(math_ops.less_equal(x, 10.0), [x])]):
return array_ops.identity(x)
with self.cached_session():
self.assertEqual(1.0, MyFn(1.0).eval())
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion"):
_ = MyFn(100.0).eval()
@test_util.run_deprecated_v1
def testWhileLoopCallsFunc(self):
with self.session(use_gpu=True) as sess:
@function.Defun(dtypes.float32)
def Times2(x):
constant_two = constant_op.constant(2, dtypes.int32)
two_on_gpu = math_ops.cast(constant_two, dtypes.float32)
return x * two_on_gpu
def Body(x):
x2 = Times2(x)
x2.set_shape([])
return x2
loop = control_flow_ops.while_loop(lambda x: x < 1e5, Body, [1.0])
ans = self.evaluate(loop)
self.assertAllClose(ans, 131072.)
@test_util.run_deprecated_v1
def testControlFlowStrictness(self):
"""Inlined functions must not execute in a untaken control flow branch."""
@function.Defun(dtypes.int32)
def AssertFail(x):
# Assertion that always fails and does not have a data dependency on `x`.
assert_false = control_flow_ops.Assert(False, [42])
with ops.control_dependencies([assert_false]):
return array_ops.identity(x)
with ops.device("CPU"):
pred = array_ops.placeholder(dtypes.bool)
x = array_ops.placeholder(dtypes.int32)
cond = control_flow_ops.cond(pred, lambda: x + 1, lambda: AssertFail(x))
# pylint: disable=unnecessary-lambda
loop = control_flow_ops.while_loop(lambda y: pred,
lambda y: AssertFail(y), [x])
# pylint: enable=unnecessary-lambda
rewriter_config = rewriter_config_pb2.RewriterConfig(
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
# Enables inlining.
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True),
rewrite_options=rewriter_config))
with session.Session(config=config) as sess:
# Since the 'False' branch is not taken, the assertion should not fire.
self.assertEqual(4, sess.run(cond, {pred: True, x: 3}))
# The assertion should still fire if the False branch is taken.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion"):
sess.run(cond, {pred: False, x: 3})
# Similarly for loops.
self.assertEqual(3, sess.run(loop, {pred: False, x: 3}))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion"):
sess.run(loop, {pred: True, x: 3})
@test_util.run_deprecated_v1
def testVar(self):
@function.Defun(dtypes.float32)
def Foo(x):
return x * x + 1
g = ops.Graph()
with g.as_default():
v = variables.Variable(constant_op.constant(10.0))
z = Foo(v)
with self.session(graph=g):
variables.global_variables_initializer().run()
self.assertAllEqual(z.eval(), 101.)
@test_util.run_deprecated_v1
def testResourceVarAsImplicitInput(self):
g = ops.Graph()
with g.as_default(), ops.device("cpu:0"):
expected_type = dtypes.float32
expected_shape = tensor_shape.TensorShape((4, 4))
v = variable_scope.get_variable(
"var", expected_shape, expected_type, use_resource=True)
@function.Defun()
def Foo():
captured = array_ops.identity(v)
self.assertEqual(expected_type, captured.dtype)
self.assertEqual(expected_shape, captured.shape)
return captured, array_ops.shape(captured)
expected_val = v.value()
actual_val, actual_shape = Foo()
with self.session(graph=g):
v.initializer.run()
self.assertAllEqual(expected_val.eval(), self.evaluate(actual_val))
self.assertAllEqual(expected_shape, self.evaluate(actual_shape))
def testDefineErrors(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "can not return None"):
@function.Defun()
def TwoNone():
return None, None
_ = TwoNone.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def DefaultArg(unused_a=12):
return constant_op.constant([1])
_ = DefaultArg.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def KwArgs(**unused_kwargs):
return constant_op.constant([1])
_ = KwArgs.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(dtypes.float32)
def PlusMinusV2(a, b):
return a + b, b - a
_ = PlusMinusV2.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(dtypes.float32, dtypes.float32, dtypes.float32)
def PlusMinusV3(a, b):
return a + b, b - a
_ = PlusMinusV3.definition
def testCallErrors(self):
@function.Defun()
def Const():
return constant_op.constant(1)
@function.Defun(dtypes.int32)
def PlusOne(a):
return a + 1
@function.Defun(dtypes.int32, dtypes.int32)
def PlusMinus(a, b):
return a + b, b - a
with ops.Graph().as_default():
_ = Const()
# pylint: disable=too-many-function-args
# pylint: disable=unexpected-keyword-arg
# pylint: disable=no-value-for-parameter
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1)
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne()
_ = PlusOne(1)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus()
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus(1)
_ = PlusMinus(1, 2)
_ = PlusOne(1, name="p1")
with self.assertRaisesRegexp(ValueError, "Unknown keyword arguments"):
_ = PlusOne(1, device="/device:GPU:0")
def testFunctionDecorator(self):
@function.Defun(dtypes.float32, func_name="Minus1")
def Minus1(b):
return b - 1.0
with ops.Graph().as_default():
call1 = Minus1([2.])
self.assertTrue(isinstance(Minus1, function._DefinedFunction))
self.assertEqual(Minus1.name, "Minus1")
# pylint: disable=unexpected-keyword-arg
call2 = Minus1(call1, name="next")
# pylint: enable=unexpected-keyword-arg
self.assertEqual("next", call2.op.name)
with session.Session() as sess:
self.assertAllEqual([1], self.evaluate(call1))
self.assertAllEqual([0], self.evaluate(call2))
def testNestedFunction(self):
@function.Defun(dtypes.float32)
def Cube(x):
return x * x * x
@function.Defun(dtypes.float32, dtypes.float32)
def CubeXPlusY(x, y):
return Cube(x) + y
with ops.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.cached_session():
self.assertAllEqual(z.eval(), 25.0)
def testNestedDefinedFunction(self):
@function.Defun(dtypes.float32, dtypes.float32)
def CubeXPlusY(x, y):
@function.Defun(dtypes.float32)
def Cube(x):
return x * x * x
return Cube(x) + y
with ops.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.cached_session():
self.assertAllEqual(z.eval(), 25.0)
def testUnusedFunction(self):
invoked = False
# pylint: disable=unused-variable
@function.Defun()
def Unused():
invoked = True
return constant_op.constant(42.)
self.assertFalse(invoked)
g = ops.Graph()
with g.as_default():
@function.Defun()
def Unused2():
invoked = True
return constant_op.constant(7.)
constant_op.constant(3.)
# pylint: enable=unused-variable
self.assertFalse(invoked)
gdef = g.as_graph_def()
self.assertEqual(0, len(gdef.library.function))
@test_util.run_deprecated_v1
def testReduction(self):
g = ops.Graph()
# BN0 is computing batch normed matrix along rows.
def BN0(x):
mean = math_ops.reduce_mean(x, [0])
var = math_ops.reduce_mean(math_ops.square(x - mean)) # biased var
rstd = math_ops.rsqrt(var + 1e-8)
return (x - mean) * rstd
# Wraps BatchNorm in a tf function.
@function.Defun(dtypes.float32)
def BN1(x):
return BN0(x)
with g.as_default():
x = array_ops.placeholder(dtypes.float32)
y0 = BN0(x) # A plain graph
y1 = BN1(x) # A tf function
dx0, = gradients_impl.gradients([y0], [x])
dx1, = gradients_impl.gradients([y1], [x])
# Both should produce the same result and gradient.
with self.session(graph=g) as sess:
vals = sess.run([y0, y1, dx0, dx1], {x: np.random.uniform(size=(3, 7))})
self.assertAllClose(vals[0], vals[1])
self.assertAllClose(vals[2], vals[3])
@test_util.run_deprecated_v1
def testCapture(self):
g = ops.Graph()
with g.as_default():
w = variables.Variable(constant_op.constant([[1.0]]))
b = variables.Variable(constant_op.constant([2.0]))
# Foo() captures w and b.
@function.Defun(dtypes.float32)
def Foo(x):
# Plus() captures b.
@function.Defun(dtypes.float32)
def Plus(y):
return y + b
return Plus(math_ops.matmul(w, x))
y = Foo(constant_op.constant([[10.]]))
@function.Defun()
def Bar():
return w
z = Bar()
with self.session(graph=g):
variables.global_variables_initializer().run()
self.assertAllEqual(y.eval(), [[12.0]])
self.assertAllEqual(z.eval(), [[1.0]])
def testCaptureControls(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([10.0])
x = logging_ops.Print(x, [x], "outer")
@function.Defun(dtypes.float32)
def Foo(y):
with ops.control_dependencies([x]):
y = logging_ops.Print(y, [y], "inner")
return y
with self.assertRaisesRegexp(ValueError, "not an element of this graph."):
# NOTE: We still do not support capturing control deps.
_ = Foo(x)
@test_util.run_deprecated_v1
def testCaptureInWhileLoop(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
@function.Defun()
def Foo():
return control_flow_ops.while_loop(lambda i: i < 10, lambda i: i + x,
[0])
y = Foo()
with self.session(graph=g) as sess:
self.assertEqual(self.evaluate(y), 10)
@test_util.run_deprecated_v1
def testCaptureInCond(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
@function.Defun(dtypes.bool)
def Foo(pred):
return control_flow_ops.cond(pred, lambda: x, lambda: x + 1)
y = Foo(True)
z = Foo(False)
with self.session(graph=g) as sess:
self.assertEqual(self.evaluate(y), 1)
self.assertEqual(self.evaluate(z), 2)
@test_util.run_deprecated_v1
def testSignatureHash(self):
# Foo.Inner and Bar.Inner have identical function body but have
# different signatures. They should be treated as two different functions.
@function.Defun()
def Foo(x):
@function.Defun()
def Inner(x):
return x + 10.
return Inner(x)
@function.Defun()
def Bar(x):
@function.Defun()
def Inner(x, unused_y, unused_z):
return x + 10.
return Inner(x, 2., 3.)
g = ops.Graph()
with g.as_default():
x = constant_op.constant(10.0)
y = Foo(x)
z = Bar(x)
with self.session(graph=g) as sess:
v0, v1 = self.evaluate([y, z])
self.assertAllEqual(v0, 20.)
self.assertAllEqual(v1, 20.)
def testShapeFunction(self):
@function.Defun(
dtypes.float32, shape_func=lambda op: [op.inputs[0].get_shape()])
def Foo(x):
return x + 1.0
@function.Defun(
shape_func=lambda op: [[1] + op.inputs[0].get_shape().as_list()])
def Bar(x):
return array_ops.stack([x])
g = ops.Graph()
with g.as_default():
x = Foo([1.0, 2.0])
self.assertEqual(x.get_shape().as_list(), [2])
y = Bar(array_ops.zeros([1, 2, 3]))
self.assertAllEqual(y.get_shape().as_list(), [1, 1, 2, 3])
@test_util.run_deprecated_v1
def testVariableReuse(self):
def LinearWithReuse(input_tensor, reuse=None):
size = input_tensor.shape.dims[1]
with variable_scope.variable_scope("linear", reuse=reuse):
w = variable_scope.get_variable(
"w", shape=[size, size], dtype=input_tensor.dtype)
return math_ops.matmul(input_tensor, w)
@function.Defun(dtypes.float32)
def Foo(inputs):
inputs = array_ops.reshape(inputs, [32, 100])
hidden = LinearWithReuse(inputs)
return LinearWithReuse(hidden, reuse=True)
input_op = array_ops.placeholder(shape=[32, 100], dtype=dtypes.float32)
output_op = Foo(input_op)
global_vars = variables.global_variables()
self.assertEqual(len(global_vars), 1)
self.assertEqual(global_vars[0].name, "linear/w:0")
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
output_val = sess.run(
output_op, feed_dict={input_op: np.random.rand(32, 100)})
self.assertEqual(output_val.shape, (32, 100))
@test_util.run_deprecated_v1
def testFunctionCallInDifferentVariableScopes(self):
@function.Defun(dtypes.float32)
def Foo(inputs):
var = variable_scope.get_variable(
"var",
shape=[10],
dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
return inputs + var
input_op = array_ops.placeholder(shape=[10], dtype=dtypes.float32)
with variable_scope.variable_scope("vs1"):
out1_op = Foo(input_op)
with variable_scope.variable_scope("vs2"):
out2_op = Foo(input_op)
global_vars = variables.global_variables()
self.assertEqual(len(global_vars), 1)
self.assertEqual(global_vars[0].name, "vs1/var:0")
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
out1, out2 = sess.run(
[out1_op, out2_op], feed_dict={input_op: np.linspace(1, 10, 10)})
self.assertAllEqual(out1, np.linspace(2, 11, 10))
self.assertAllEqual(out2, np.linspace(2, 11, 10))
def testTwoInputsSameOp(self):
g = ops.Graph()
with g.as_default():
m = array_ops.placeholder(dtypes.float32)
s, u, v = linalg_ops.svd(m)
ss = math_ops.reduce_sum(s)
uu = math_ops.reduce_sum(u)
vv = math_ops.reduce_sum(v)
result = ss + uu + vv
f = graph_to_function_def.graph_to_function_def(
g,
g.get_operations()[1:], # skip the placeholder
[s, u, v],
[result])
self.assertEqual(len(f.signature.input_arg), 3)
def testGradientWithIntegerFunctionArgument(self):
@function.Defun(dtypes.int32, dtypes.float32)
def Foo(t, x):
return x[t]
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(dtypes.float32)
t = constant_op.constant(0, dtypes.int32)
out = Foo(t, inp)
dinp, = gradients_impl.gradients(out, [inp])
x = np.zeros((2,)).astype(np.float32)
with session.Session(graph=g) as sess:
self.assertAllClose(
np.array([1.0, 0.0]).astype(np.float32), sess.run(dinp, {inp: x}))
@test_util.run_deprecated_v1
def testFunctionMarkedStateful(self):
@function.Defun(dtypes.int32, dtypes.float32)
def Foo(t, x):
return x[t]
@function.Defun(dtypes.int64)
def Bar(x):
return x
# NOTE(mrry): All functions are currently considered stateless by the
# runtime, so we simulate a "stateful" function.
# TODO(b/70565970): Remove this hack when we are able to build stateful
# functions using the API.
# pylint: disable=protected-access
Foo._signature.is_stateful = True
Bar._signature.is_stateful = True
# pylint: enable=protected-access
result_1 = Foo(3, [1.0, 2.0, 3.0, 4.0])
result_2 = Bar(constant_op.constant(100, dtype=dtypes.int64))
with session.Session() as sess:
self.assertEqual(4.0, self.evaluate(result_1))
self.assertEqual(100, self.evaluate(result_2))
self.assertEqual((4.0, 100), sess.run((result_1, result_2)))
@test_util.run_deprecated_v1
def testStatefulFunction(self):
@function.Defun()
def FunctionWithStatelessOp():
return constant_op.constant(42.0)
@function.Defun()
def FunctionWithStatefulOp():
return random_ops.random_uniform([100], maxval=10, dtype=dtypes.int32)
@function.Defun()
def FunctionWithStatelessFunctionCall():
return FunctionWithStatelessOp()
@function.Defun()
def FunctionWithStatefulFunctionCall():
return FunctionWithStatefulOp()
# Test that the `is_stateful` bit is propagated.
self.assertFalse(FunctionWithStatelessOp.definition.signature.is_stateful)
self.assertTrue(FunctionWithStatefulOp.definition.signature.is_stateful)
self.assertFalse(
FunctionWithStatelessFunctionCall.definition.signature.is_stateful)
self.assertTrue(
FunctionWithStatefulFunctionCall.definition.signature.is_stateful)
# Ensure that two invocations of the same random-number-generating
# function produce different results.
result1 = FunctionWithStatefulFunctionCall()
result2 = FunctionWithStatefulFunctionCall()
# Statefulness affects how the function is treated by the various
# optimization passes, so run the test in each optimizer
# configuration.
for config in _OptimizerOptions():
with session.Session(config=config) as sess:
val1, val2 = sess.run((result1, result2))
self.assertFalse(all(val1 == val2))
val3, val4 = sess.run((result1, result2))
self.assertFalse(all(val3 == val1))
self.assertFalse(all(val4 == val2))
@test_util.run_v1_only("currently failing on v2")
def testStatefulFunctionWithWhitelisting(self):
t = random_ops.random_uniform([100], maxval=10, dtype=dtypes.int32)
@function.Defun(capture_by_value=True)
def StatefulFn():
return t + constant_op.constant(3, dtype=dtypes.int32)
# First time we try to capture a stateful RandomUniform op.
with self.assertRaisesRegexp(ValueError, "Cannot capture a stateful node"):
res = StatefulFn()
# This time we whitelist this op, so that its recreated.
@function.Defun(capture_by_value=True, whitelisted_stateful_ops=set([t.op]))
def StatefulFn2():
return t + constant_op.constant(3, dtype=dtypes.int32)
res = StatefulFn2()
with session.Session() as sess:
r = sess.run(res)
for i in r:
self.assertGreaterEqual(i, 3)
@test_util.run_deprecated_v1
def testSameFunctionOnTwoDevices(self):
@function.Defun(dtypes.float32)
def AddOne(x):
return x + 1.0
with ops.device("/cpu:0"):
f_0 = AddOne(41.0)
with ops.device("/cpu:1"):
f_1 = AddOne(43.0)
for config in _OptimizerOptions():
config.device_count["CPU"] = 2
with session.Session(config=config) as sess:
self.assertEqual(42.0, self.evaluate(f_0))
self.assertEqual(44.0, self.evaluate(f_1))
self.assertEqual((42.0, 44.0), sess.run((f_0, f_1)))
@test_util.run_deprecated_v1
def testGuaranteedConstsAreCaptured(self):
var = variables.Variable(1.0)
const = array_ops.guarantee_const(var)
also_const = array_ops.identity(const)
still_const = array_ops.identity(also_const)
not_const = still_const + var
also_not_const = array_ops.placeholder(dtypes.float32)
@function.Defun()
def CapturesGuaranteedConst():
output = const + also_const + still_const + not_const + also_not_const
first, second, third, fourth, fifth = function.get_extra_args()
self.assertEqual("GuaranteeConst", first.consumers()[0].node_def.op)
self.assertEqual("GuaranteeConst", second.consumers()[0].node_def.op)
self.assertEqual("GuaranteeConst", third.consumers()[0].node_def.op)
self.assertNotEqual("GuaranteeConst", fourth.consumers()[0].node_def.op)
self.assertNotEqual("GuaranteeConst", fifth.consumers()[0].node_def.op)
return output
with self.session(use_gpu=False) as sess:
self.evaluate(var.initializer)
_ = sess.run(CapturesGuaranteedConst(), {also_not_const: 1.0})
@test_util.run_deprecated_v1
def testSameFunctionDifferentGrads(self):
def PartOne(x):
# Default grad is dx = dy * 2
@function.Defun(dtypes.float32)
def Foo(x):
return x * 2
return Foo(x)
def PartTwo(x):
@function.Defun(dtypes.float32, dtypes.float32)
def Bar(x, dy):
return x + dy # crazy backprop
@function.Defun(dtypes.float32, grad_func=Bar)
def Foo(x):
return x * 2
return Foo(x)
def PartThree(x):
def Bar(op, dy):
return op.inputs[0] * dy / 2 # crazy backprop
@function.Defun(dtypes.float32, python_grad_func=Bar)
def Foo(x):
return x * 2
return Foo(x)
g = ops.Graph()
with g.as_default():
x = constant_op.constant(100.)
x0 = x
y0 = PartOne(x0)
dx0, = gradients_impl.gradients(ys=[y0], xs=[x0])
x1 = x
y1 = PartTwo(x1)
dx1, = gradients_impl.gradients(ys=[y1], xs=[x1])
x2 = x
y2 = PartThree(x2)
dx2, = gradients_impl.gradients(ys=[y2], xs=[x2])
with self.session(graph=g) as sess:
v0, v1, v2 = self.evaluate([dx0, dx1, dx2])
self.assertAllEqual(v0, 2.)
self.assertAllEqual(v1, 101.)
self.assertAllEqual(v2, 50.)
class FunctionsFromProtos(test.TestCase):
def expectFunctionsEqual(self, func, grad_func=None, new_func=None):
if new_func is None:
# Make a copy of func.definition to avoid any bugs masked by using the
# same object
serialized_fdef = func.definition.SerializeToString()
# Serialize and then deserialize `func` to create `new_func`
fdef = function_pb2.FunctionDef.FromString(serialized_fdef)
new_func = function._from_definition(fdef, grad_func=grad_func)
self.assertEqual(func.name, new_func.name)
self.assertEqual(func.definition, new_func.definition)
self.assertEqual(func.grad_func_name, new_func.grad_func_name)
self.assertEqual(func.declared_input_types, new_func.declared_input_types)
self.assertEqual(func.captured_inputs, new_func.captured_inputs)
@test_util.run_deprecated_v1
def testBasic(self):
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(x, y):
return x + y
self.expectFunctionsEqual(Foo)
def testGradFunc(self):
@function.Defun(dtypes.float32, dtypes.float32)
def G(x, dy):
return x * dy
@function.Defun(dtypes.float32, grad_func=G)
def F(x):
return math_ops.exp(x) - math_ops.exp(-x)
self.expectFunctionsEqual(F, grad_func=G)
def testCapturedInputs(self):
c = constant_op.constant(10, dtypes.int64)
@function.Defun(dtypes.int64)
def Foo(x):
return x + c
new_func = function._from_definition(Foo.definition)
self.assertEqual(Foo.name, new_func.name)
self.assertEqual(Foo.definition, new_func.definition)
self.assertEqual(Foo.grad_func_name, new_func.grad_func_name)
# Captured inputs are added as regular inputs to the function definition
self.assertEqual(new_func.declared_input_types,
Foo.declared_input_types + (dtypes.int64,))
self.assertEqual(len(new_func.captured_inputs), 0)
def testNestedFunctions(self):
@function.Defun(dtypes.float32)
def Outer(x):
@function.Defun(dtypes.float32)
def Inner(y):
return y + 1
return Inner(Inner(x))
self.expectFunctionsEqual(Outer)
def testFromLibrary(self):
# Define some functions with different gradient functions. Note that many of
# the below functions are identical since function bodies don't matter for
# this test.
@function.Defun(dtypes.float32, dtypes.float32)
def G1(x, dy):
return x * dy
@function.Defun(dtypes.float32, dtypes.float32)
def G2(x, dy):
return x * dy
# F1 and F2 have the same gradient function
@function.Defun(dtypes.float32, grad_func=G1)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
@function.Defun(dtypes.float32, grad_func=G1)
def F2(x):
return math_ops.exp(x) - math_ops.exp(-x)
# F3 has a different gradient function
@function.Defun(dtypes.float32, grad_func=G2)
def F3(x):
return math_ops.exp(x) - math_ops.exp(-x)
# F4 has no gradient function
@function.Defun(dtypes.float32)
def F4(x):
return math_ops.exp(x) - math_ops.exp(-x)
# Instantiate all functions
g = ops.Graph()
with g.as_default():
c = constant_op.constant(1.0, dtypes.float32)
f1 = F1(c)
f2 = F2(c)
f3 = F3(c)
f4 = F4(c)
gradients_impl.gradients([f1, f2, f3, f4], c)
library = g.as_graph_def().library
new_funcs = function.from_library(library)
def CheckNewFunc(func):
new_func = [f for f in new_funcs if f.name == func.name]
self.assertEqual(len(new_func), 1)
self.expectFunctionsEqual(func, new_func=new_func[0])
CheckNewFunc(G1)
CheckNewFunc(G2)
CheckNewFunc(F1)
CheckNewFunc(F2)
CheckNewFunc(F3)
CheckNewFunc(F4)
def testFromLibraryEmptyLib(self):
library = function_pb2.FunctionDefLibrary()
self.assertEqual(len(function.from_library(library)), 0)
def testFromLibraryMissingFuncDef(self):
@function.Defun(dtypes.float32, dtypes.float32)
def G1(x, dy):
return x * dy
@function.Defun(dtypes.float32)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
gradient = function_pb2.GradientDef()
gradient.function_name = F1.name
gradient.gradient_func = G1.name
# Create invalid function def that is missing G1 function def
library = function_pb2.FunctionDefLibrary()
library.gradient.extend([gradient])
library.function.extend([F1.definition])
with self.assertRaisesRegexp(
ValueError,
"FunctionDefLibrary missing 'G1_[0-9a-zA-Z]{8,11}' FunctionDef"):
function.from_library(library)
# Create invalid function def that is missing F1 function def
library = function_pb2.FunctionDefLibrary()
library.gradient.extend([gradient])
library.function.extend([G1.definition])
with self.assertRaisesRegexp(
ValueError,
"FunctionDefLibrary missing 'F1_[0-9a-zA-Z]{8,11}' FunctionDef"):
function.from_library(library)
def testFromLibraryCyclicGradFuncs(self):
@function.Defun(dtypes.float32)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
@function.Defun(dtypes.float32)
def F2(x):
return math_ops.exp(x) - math_ops.exp(-x)
# Create invalid function def library where F1 has gradient function F2 and
# F2 has gradient function F1
library = function_pb2.FunctionDefLibrary()
library.function.extend([F1.definition, F2.definition])
gradient1 = function_pb2.GradientDef()
gradient1.function_name = F1.name
gradient1.gradient_func = F2.name
gradient2 = function_pb2.GradientDef()
gradient2.function_name = F2.name
gradient2.gradient_func = F1.name
library.gradient.extend([gradient1, gradient2])
with self.assertRaisesRegexp(
ValueError, "FunctionDefLibrary contains cyclic gradient functions!"):
function.from_library(library)
def testExperimentalAttrs(self):
@function.Defun(dtypes.int32, experimental_tag="tag_value")
def FunctionWithStrAttr(i):
return array_ops.identity(i)
@function.Defun(dtypes.int32, experimental_tag=123)
def FunctionWithIntAttr(i):
return array_ops.identity(i)
@function.Defun(dtypes.int32, experimental_tag=123.0)
def FunctionWithFloatAttr(i):
return array_ops.identity(i)
@function.Defun(dtypes.int32, experimental_tag=True)
def FunctionWithBoolAttr(i):
return array_ops.identity(i)
self.assertTrue("experimental_tag" in FunctionWithStrAttr.definition.attr)
self.assertEqual(FunctionWithStrAttr.definition.attr["experimental_tag"].s,
b"tag_value")
self.assertTrue("experimental_tag" in FunctionWithIntAttr.definition.attr)
self.assertEqual(FunctionWithIntAttr.definition.attr["experimental_tag"].i,
123)
self.assertTrue("experimental_tag" in FunctionWithFloatAttr.definition.attr)
self.assertEqual(
FunctionWithFloatAttr.definition.attr["experimental_tag"].f, 123.0)
self.assertTrue("experimental_tag" in FunctionWithBoolAttr.definition.attr)
self.assertEqual(FunctionWithBoolAttr.definition.attr["experimental_tag"].b,
True)
class FunctionOverloadTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
@function.Defun()
def Sinh(x):
return 1 / 2. * (math_ops.exp(x) - math_ops.exp(-x))
g = ops.Graph()
with g.as_default():
x = Sinh(constant_op.constant(0.25, dtypes.float32))
y = Sinh(constant_op.constant(0.25, dtypes.float64))
with self.session(graph=g):
self.assertAllClose(x.eval(), np.sinh(0.25))
self.assertAllClose(y.eval(), np.sinh(0.25))
def testGradient(self):
@function.Defun(func_name="Spec")
def G(x, dy):
return x * dy
@function.Defun(grad_func=G)
def F(x):
return math_ops.exp(x) - math_ops.exp(-x)
for dtype in [dtypes.float32, dtypes.float64]:
g = ops.Graph()
with g.as_default():
x = constant_op.constant(0.25, dtype)
y = F(x)
dx, = gradients_impl.gradients(y, x)
with self.session(graph=g):
self.assertAllClose(dx.eval(), 0.25)
def testDocString(self):
@function.Defun()
def Foo(x):
"""Successor of x."""
return x + 1
g = ops.Graph()
with g.as_default():
_ = Foo(1)
self.assertEqual(g.as_graph_def().library.function[0].signature.description,
"Successor of x.")
class FunctionCaptureByValueTest(test.TestCase):
@test_util.run_deprecated_v1
def testCaptureByValue(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant([[1.0]])
b = constant_op.constant([2.0])
# Foo() captures w and b.
@function.Defun(dtypes.float32, capture_by_value=True)
def Foo(x):
# Plus() captures b.
@function.Defun(dtypes.float32, capture_by_value=True)
def Plus(y):
return y + b
self.assertEqual(0, len(Plus.captured_inputs))
return Plus(math_ops.matmul(w, x))
y = Foo(constant_op.constant([[10.]]))
self.assertEqual(0, len(Foo.captured_inputs))
with self.session(graph=g):
self.assertAllEqual(y.eval(), [[12.0]])
class UnrollLSTMTest(test.TestCase):
BATCH_SIZE = 16
LSTM_DIMS = 32
NUM_UNROLL = 20
def _Weights(self):
dims = self.LSTM_DIMS
return random_ops.random_uniform([2 * dims, 4 * dims], -1, 1, seed=123456)
def _Input(self):
return random_ops.random_uniform(
[self.NUM_UNROLL, self.BATCH_SIZE, self.LSTM_DIMS], seed=654321)
# Helper to construct a LSTM cell graph.
@classmethod
def LSTMCell(cls, x, mprev, cprev, weights):
xm = array_ops.concat([x, mprev], 1)
i_i, i_g, f_g, o_g = array_ops.split(
value=math_ops.matmul(xm, weights), num_or_size_splits=4, axis=1)
new_c = math_ops.sigmoid(f_g) * cprev + math_ops.sigmoid(
i_g) * math_ops.tanh(i_i)
new_c = math_ops.maximum(math_ops.minimum(new_c, 50.0), -50.0)
new_m = math_ops.sigmoid(o_g) * math_ops.tanh(new_c)
return new_m, new_c
def _BuildForward(self, weights, inp, mode="cell"):
def Loop(cell, w, i):
x = array_ops.unstack(i, self.NUM_UNROLL)
m = array_ops.zeros_like(x[0])
c = array_ops.zeros_like(x[0])
for i in range(self.NUM_UNROLL):
m, c = cell(x[i], m, c, w)
return m
cell = UnrollLSTMTest.LSTMCell
if mode == "complete":
# Constructs the complete graph in python.
return Loop(cell, weights, inp)
cell = function.Defun(dtypes.float32, dtypes.float32, dtypes.float32,
dtypes.float32)(
cell)
if mode == "cell":
# Just represent the LSTM as a function.
return Loop(cell, weights, inp)
if mode == "loop":
# Wraps the whole loop as a function.
@function.Defun(dtypes.float32, dtypes.float32)
def LSTMLoop(w, i):
return Loop(cell, w, i)
return LSTMLoop(weights, inp)
if mode == "loop10":
# Wraps 10 lstm steps into one function, and the whole loop
# into another calling the formers.
# Groups 10 steps at a time.
@function.Defun(dtypes.float32, dtypes.float32, dtypes.float32,
*([dtypes.float32] * 10))
def Loop10(w, m, c, *args):
for x in args:
m, c = cell(x, m, c, w)
return m, c
@function.Defun(dtypes.float32, dtypes.float32)
def LSTMLoop10(weights, inp):
x = array_ops.unstack(inp, self.NUM_UNROLL)
m = array_ops.zeros_like(x[0])
c = array_ops.zeros_like(x[0])
assert self.NUM_UNROLL % 10 == 0
for i in range(0, self.NUM_UNROLL, 10):
m, c = Loop10(weights, m, c, *x[i:i + 10])
return m
return LSTMLoop10(weights, inp)
def testUnrollLSTM(self):
# Run one step of the unrolled lstm graph.
def RunForward(mode, cfg=None):
tf_logging.info("mode = %s", mode)
g = ops.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
gdef = g.as_graph_def()
finish = time.time()
tf_logging.info("time: %f txt size: %d gdef bin size: %d", finish - start,
len(str(gdef)), len(gdef.SerializeToString()))
with g.as_default(), session.Session(config=cfg) as sess:
return self.evaluate(m)
mv0 = RunForward("complete")
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
mv1 = RunForward("cell", cfg)
mv2 = RunForward("loop", cfg)
mv3 = RunForward("loop10", cfg)
self.assertAllClose(mv0, mv1, rtol=1e-4)
self.assertAllClose(mv0, mv2, rtol=1e-4)
self.assertAllClose(mv0, mv3, rtol=1e-4)
def testUnrollLSTMGrad(self):
# Run one step of the unrolled lstm graph.
def RunForwardBackward(mode, cfg=None):
tf_logging.info("mode = %s", mode)
g = ops.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
loss = math_ops.reduce_sum(math_ops.square(m))
dw = gradients_impl.gradients([loss], [weights])
gdef = g.as_graph_def()
finish = time.time()
tf_logging.info("time: %f txt size: %d gdef bin size: %d", finish - start,
len(str(gdef)), len(gdef.SerializeToString()))
with g.as_default(), session.Session(config=cfg) as sess:
return self.evaluate(dw)
d0 = RunForwardBackward("complete")
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
d1 = RunForwardBackward("cell", cfg)
d2 = RunForwardBackward("loop", cfg)
d3 = RunForwardBackward("loop10", cfg)
self.assertAllClose(d0, d1, rtol=1e-4, atol=1e-4)
self.assertAllClose(d0, d2, rtol=1e-4, atol=1e-4)
self.assertAllClose(d0, d3, rtol=1e-4, atol=1e-4)
class FunctionInlineControlTest(test.TestCase):
@test_util.disable_xla("XLA changes the names, breaking graph analysis")
def testFoo(self):
dtype = dtypes.float32
cfg = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True)))
cell_func_call_pattern = re.compile(r"Cell[^/]*\(")
for noinline in [False, True]:
@function.Defun(dtype, noinline=noinline)
def Cell(v):
# If v is a vector [n, 1], x is a big square matrix.
x = math_ops.tanh(v + array_ops.transpose(v, [1, 0]))
return math_ops.reduce_sum(x, 1, keepdims=True)
@function.Defun(dtype)
def Forward(x):
for _ in range(10):
# pylint: disable=cell-var-from-loop
x = Cell(x)
return math_ops.reduce_sum(x, [0, 1])
self.assertEqual(noinline, Cell.definition.attr["_noinline"].b)
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype)
y = Forward(x)
dx, = gradients_impl.gradients([y], [x])
np.random.seed(321)
inp = np.random.uniform(-1, 1, [16, 1]).astype(np.float32)
run_metadata = config_pb2.RunMetadata()
with session.Session(graph=g, config=cfg) as sess:
ans = sess.run(
[y, dx], {x: inp},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
print(ans[0], np.sum(ans[1]))
self.assertAllClose(ans[0], 255.971, rtol=1e-3)
self.assertAllClose(np.sum(ans[1]), 13.0408, rtol=1e-3)
def MetadataHasCell(run_metadata):
for dev_stats in run_metadata.step_stats.dev_stats:
for node_stats in dev_stats.node_stats:
if cell_func_call_pattern.search(node_stats.timeline_label):
return True
return False
self.assertEqual(MetadataHasCell(run_metadata), noinline)
class ModuleFunctionTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
@function.Defun(*[dtypes.float32] * 3)
def LinearWithCApi(w, b, x):
return nn_ops.relu(math_ops.matmul(x, w) + b)
@function.Defun(*[dtypes.float32] * 5)
def Linear2WithCApi(w1, b1, w2, b2, x):
return LinearWithCApi(w2, b2, LinearWithCApi(w1, b1, x))
with ops.Graph().as_default():
a, b, c, d, e = [
constant_op.constant([[_]], dtype=dtypes.float32) for _ in range(5)
]
y = LinearWithCApi(a, b, c)
z = Linear2WithCApi(a, b, c, d, e)
with session.Session() as sess:
self.assertAllEqual([[1]], self.evaluate(y))
self.assertAllEqual([[5]], self.evaluate(z))
class VariableHoistingTest(test.TestCase):
def _testSimpleModel(self, use_forward_func, use_resource=False):
def _Model(x):
w = variable_scope.get_variable(
"w", (64, 64),
initializer=init_ops.random_uniform_initializer(seed=312),
use_resource=use_resource)
b = variable_scope.get_variable(
"b", (64),
initializer=init_ops.zeros_initializer(),
use_resource=use_resource),
return math_ops.sigmoid(math_ops.matmul(x, w) + b)
@function.Defun()
def Model(x):
return _Model(x)
cvars = []
@function.Defun()
def Grad(x, y0):
if use_forward_func:
y = Model(x)
else:
y = _Model(x)
loss = math_ops.reduce_mean(
math_ops.reduce_sum(y0 * math_ops.log(y), 1), 0)
arg_w, arg_b = function.get_extra_args()
self.assertEqual(arg_w.get_shape(), tensor_shape.TensorShape([64, 64]))
self.assertEqual(arg_b.get_shape(), tensor_shape.TensorShape([64]))
dw, db = gradients_impl.gradients(loss, [arg_w, arg_b])
cvars.extend(function.get_extra_vars())
return loss, dw, db
g = ops.Graph()
with g.as_default():
x = random_ops.random_normal([64, 64], seed=100)
y0 = random_ops.random_normal([64, 64], seed=200)
with variable_scope.variable_scope("Foo"):
loss, dw, db = Grad(x, y0)
self.assertEqual(2, len(cvars))
w, b = cvars[:2]
self.assertEqual("Foo/w", w.op.name)
self.assertEqual("Foo/b", b.op.name)
with self.session(graph=g) as sess:
self.evaluate(variables.global_variables_initializer())
w, b, x, y0, loss, dw, db = self.evaluate([w, b, x, y0, loss, dw, db])
self.assertAllEqual(w.shape, (64, 64))
self.assertAllClose(np.sum(w), 2050.44)
self.assertAllEqual(b.shape, (64,))
self.assertAllClose(np.sum(b), 0.0)
self.assertAllClose(loss, -2.27, rtol=1e-2)
self.assertAllEqual(dw.shape, (64, 64))
self.assertAllClose(np.sum(dw), -1.04, rtol=1e-2)
self.assertAllEqual(db.shape, (64,))
self.assertAllClose(np.sum(db), 0.509, rtol=1e-2)
@test_util.run_deprecated_v1
def testBasic(self):
self._testSimpleModel(False)
self._testSimpleModel(True)
@test_util.run_deprecated_v1
def testBasicResource(self):
self._testSimpleModel(False, use_resource=True)
self._testSimpleModel(True, use_resource=True)
class TemplateTest(test.TestCase):
@test_util.run_v1_only("make_template not supported in TF2")
def testBasic(self):
self.assertTemplateVariableSharing(use_resource=True, defun_first=False)
@test_util.run_v1_only("make_template not supported in TF2")
def testBasicRef(self):
self.assertTemplateVariableSharing(use_resource=False, defun_first=False)
@test_util.run_v1_only("make_template not supported in TF2")
def testBasicDefunFirst(self):
self.assertTemplateVariableSharing(use_resource=True, defun_first=True)
@test_util.run_v1_only("make_template not supported in TF2")
def testBasicRefDefunFirst(self):
self.assertTemplateVariableSharing(use_resource=False, defun_first=True)
def assertTemplateVariableSharing(self, use_resource, defun_first):
parameters = []
def MakeModel(x):
w = variable_scope.get_variable(
"w", (64, 64),
initializer=init_ops.random_uniform_initializer(seed=312),
use_resource=use_resource)
b = variable_scope.get_variable(
"b", (64),
initializer=init_ops.zeros_initializer(),
use_resource=use_resource)
parameters.extend((w, b))
return math_ops.sigmoid(math_ops.matmul(x, w) + b)
model = template.make_template("f", MakeModel, create_scope_now_=True)
@function.Defun()
def ModelDefun(x):
return model(x)
x = array_ops.placeholder(dtypes.float32)
if defun_first:
ModelDefun(x)
model(x)
else:
model(x)
ModelDefun(x)
w1, b1, w2, b2 = parameters # pylint: disable=unbalanced-tuple-unpacking
self.assertIs(w1, w2)
self.assertIs(b1, b2)
class DevicePlacementTest(test.TestCase):
def testNoDeviceGraph(self):
with ops.Graph().as_default():
@function.Defun(*[dtypes.float32] * 2)
def Matmul(a, b):
return math_ops.matmul(a, b)
Matmul(1., 2.)
gdef = ops.get_default_graph().as_graph_def()
self.assertAllEqual(len(gdef.library.function), 1)
fdef = gdef.library.function[0]
for node in fdef.node_def:
self.assertAllEqual(node.device, "")
def testNestedDevices(self):
with ops.Graph().as_default(), ops.device("CPU:0"):
@function.Defun(*[dtypes.float32] * 2)
def Matmul(a, b):
return math_ops.matmul(a, b)
with ops.device("CPU:1"):
@function.Defun(*[dtypes.float32] * 2)
def Divide(a, b):
return math_ops.divide(a, b)
Divide(Matmul(1., 2.), 3.)
gdef = ops.get_default_graph().as_graph_def()
matmul_fdef = [
f for f in gdef.library.function if "Matmul" in f.signature.name
]
divide_fdef = [
f for f in gdef.library.function if "Divide" in f.signature.name
]
self.assertAllEqual(len(matmul_fdef), 1)
self.assertAllEqual(len(divide_fdef), 1)
for node in matmul_fdef[0].node_def:
self.assertAllEqual(node.device, "/device:CPU:0")
for node in divide_fdef[0].node_def:
self.assertAllEqual(node.device, "/device:CPU:1")
def _testNestedDeviceWithSameFunction(self, func_name):
def MatmulWrap(a, b):
@function.Defun(
func_name=func_name, *[dtypes.int32] * 2)
def Matmul(a, b):
return math_ops.matmul(a, b)
return Matmul(a, b)
with ops.Graph().as_default(), ops.device("CPU:0"):
c = MatmulWrap(1, 2)
with ops.device("CPU:1"):
MatmulWrap(c, 3)
gdef = ops.get_default_graph().as_graph_def()
devices = []
for node in gdef.library.function[0].node_def:
devices.append(node.device)
for node in gdef.library.function[1].node_def:
devices.append(node.device)
self.assertAllEqual(sorted(devices), ["/device:CPU:0", "/device:CPU:1"])
def testFunctionWithName(self):
with self.assertRaises(InvalidArgumentError) as cm:
self._testNestedDeviceWithSameFunction("MatmulTest")
self.assertEqual(
cm.exception.message,
"Cannot add function \'MatmulTest\' because a different "
"function with the same name already exists.")
def testFunctionWithoutName(self):
self._testNestedDeviceWithSameFunction(None)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/framework/function_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
from tensorflow.python.ops import math_ops as math_ops_lib
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training.saver import export_meta_graph
# Utility device function to use for testing
def test_device_func_pin_variable_to_cpu(op):
if op.device:
return op.device
return "/cpu:0" if op.node_def.op in ["Variable", "VariableV2"] else op.device
class DeviceFunctionsTest(test.TestCase):
def testTwoDeviceFunctions(self):
with ops.Graph().as_default() as g:
var_0 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_0",
container="",
shared_name="")
with g.device(test_device_func_pin_variable_to_cpu):
var_1 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_1",
container="",
shared_name="")
var_2 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_2",
container="",
shared_name="")
var_3 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_3",
container="",
shared_name="")
with g.device(test_device_func_pin_variable_to_cpu):
var_4 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_4",
container="",
shared_name="")
with g.device("/device:GPU:0"):
var_5 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_5",
container="",
shared_name="")
var_6 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_6",
container="",
shared_name="")
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, None)
self.assertDeviceEqual(var_3.device, None)
self.assertDeviceEqual(var_4.device, "/device:CPU:0")
self.assertDeviceEqual(var_5.device, "/device:GPU:0")
self.assertDeviceEqual(var_6.device, "/device:CPU:0")
@test_util.run_v1_only("b/120545219")
def testNestedDeviceFunctions(self):
with ops.Graph().as_default():
var_0 = variables.VariableV1(0)
with ops.device(test_device_func_pin_variable_to_cpu):
var_1 = variables.VariableV1(1)
with ops.device(lambda op: "/device:GPU:0"):
var_2 = variables.VariableV1(2)
with ops.device("/device:GPU:0"): # Implicit merging device function.
var_3 = variables.VariableV1(3)
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, "/device:GPU:0")
self.assertDeviceEqual(var_3.device, "/device:GPU:0")
def testExplicitDevice(self):
with ops.Graph().as_default() as g:
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/job:ps"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, None)
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/job:ps")
def testDefaultDevice(self):
with ops.Graph().as_default() as g, g.device(
test_device_func_pin_variable_to_cpu):
with g.device("/job:ps"):
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/replica:0"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, "/job:ps")
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/replica:0")
def testExtractSubGraph(self):
graph_def = graph_pb2.GraphDef()
n1 = graph_def.node.add()
n1.name = "n1"
n1.input.extend(["n5"])
n2 = graph_def.node.add()
n2.name = "n2"
# Take the first output of the n1 node as the input.
n2.input.extend(["n1:0"])
n3 = graph_def.node.add()
n3.name = "n3"
# Add a control input (which isn't really needed by the kernel, but
# rather to enforce execution order between nodes).
n3.input.extend(["^n2"])
n4 = graph_def.node.add()
n4.name = "n4"
# It is fine to have a loops in the graph as well.
n5 = graph_def.node.add()
n5.name = "n5"
n5.input.extend(["n1"])
sub_graph = graph_util.extract_sub_graph(graph_def, ["n3"])
self.assertEqual("n1", sub_graph.node[0].name)
self.assertEqual("n2", sub_graph.node[1].name)
self.assertEqual("n3", sub_graph.node[2].name)
self.assertEqual("n5", sub_graph.node[3].name)
def testExtractSubGraphWithInvalidDestNodes(self):
graph_def = graph_pb2.GraphDef()
n1 = graph_def.node.add()
n1.name = "n1"
with self.assertRaisesRegexp(TypeError, "must be a list"):
graph_util.extract_sub_graph(graph_def, "n1")
def _test_convert_variables_with_functions(self, inline_functions):
"""Freezes a graph with functions."""
@function.Defun(dtypes.float32)
def plus_one(x):
return x + 1.0
with ops.Graph().as_default():
variable_node = variables.Variable(1.0, name="variable_node")
_ = variables.Variable(1.0, name="unused_variable_node")
defun_node = plus_one(variable_node)
_ = math_ops_lib.multiply(defun_node, 2.0, name="output_node")
with session.Session() as sess:
self.evaluate(variables.variables_initializer([variable_node]))
variable_graph_def = sess.graph.as_graph_def()
if inline_functions:
# Run Grappler to create the VarOpHandle --> Placeholder -->
# ResourceVariable pattern.
meta_graph = export_meta_graph(graph_def=variable_graph_def)
fetch_collection = meta_graph_pb2.CollectionDef()
for name in ["variable_node", "output_node"]:
fetch_collection.node_list.value.append(name)
meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
# Initialize RewriterConfig with everything disabled except function
# inlining.
config = config_pb2.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
rewrite_options.optimizers.append("function")
variable_graph_def = tf_optimizer.OptimizeGraph(config, meta_graph)
constant_graph_def = graph_util.convert_variables_to_constants(
sess, variable_graph_def, ["output_node"])
# Ensure there are no variables after freezing.
for node in constant_graph_def.node:
self.assertNotIn(
node.op, ["Variable", "VariableV2", "VarHandleOp", "ReadVariableOp"])
def testConvertVariablesToConstsWithFunctions(self):
"""Freezes a graph with functions."""
self._test_convert_variables_with_functions(inline_functions=False)
def testConvertVariableToConstsWithFunctionsInlined(self):
"""Freezes a graph with functions that have been inlined using Grappler."""
self._test_convert_variables_with_functions(inline_functions=True)
def _get_tensors(self, sess, tensor_list):
"""Returns a list of Tensor objects from the Session."""
return [
sess.graph.get_tensor_by_name(tensor.name) for tensor in tensor_list
]
def _evaluate_graph_def(self, graph_def, inputs, outputs, input_data):
"""Evaluates the GraphDef using Sessions."""
with ops.Graph().as_default() as graph:
importer.import_graph_def(graph_def, name="")
sess = session.Session(graph=graph)
input_tensors = self._get_tensors(sess, inputs)
output_tensors = self._get_tensors(sess, outputs)
return sess.run(
output_tensors, feed_dict=dict(zip(input_tensors, input_data)))
@test_util.run_v1_only("Incompatible with TF 2.0")
def testConvertVariablesToConstsWithEmbeddings(self):
"""Freezes a graph with embeddings."""
input_data = np.array(np.random.random_sample([1, 1]), dtype=np.int32)
# Make model.
state_input = keras.layers.Input(
shape=(1,), name="state_input", dtype="int32")
output = keras.layers.Embedding(
output_dim=16, input_dim=100, input_length=1, name="state")(
state_input)
model = keras.models.Model(inputs=[state_input], outputs=[output])
model.compile(
loss={"state": "sparse_categorical_crossentropy"}, optimizer="adam")
# Get associated session.
sess = keras.backend.get_session()
variable_graph_def = sess.graph_def
output_tensor = [tensor.name.split(":")[0] for tensor in model.outputs]
constant_graph_def = graph_util.convert_variables_to_constants(
sess, variable_graph_def, output_tensor)
# Ensure graph has no variables.
for node in constant_graph_def.node:
self.assertNotIn(
node.op, ["Variable", "VariableV2", "VarHandleOp", "ReadVariableOp"])
# Compare the value of the graphs.
expected_value = model.predict(input_data)
actual_value = self._evaluate_graph_def(constant_graph_def, model.inputs,
model.outputs, [input_data])
np.testing.assert_almost_equal(np.array([expected_value]), actual_value, 5)
def testConvertVariablesToConsts(self):
self._test_variable_to_const_conversion(use_resource=False)
def testConvertResourceVariablesToConsts(self):
self._test_variable_to_const_conversion(use_resource=True)
def _test_variable_to_const_conversion(self, use_resource):
with ops.Graph().as_default():
with variable_scope.variable_scope("", use_resource=use_resource):
variable_node = variable_scope.get_variable(
"variable_node", initializer=1.0)
another_variable = variable_scope.get_variable(
"unused_variable_node", initializer=1.0)
output_node = math_ops_lib.multiply(
variable_node, 2.0, name="output_node")
with session.Session() as sess:
self.evaluate(variable_node.initializer)
output = self.evaluate(output_node)
self.assertNear(2.0, output, 0.00001)
variable_graph_def = sess.graph.as_graph_def()
# First get the constant_graph_def when variable_names_whitelist is
# set, note that if variable_names_whitelist is not set an error will
# be thrown because unused_variable_node is not initialized.
constant_graph_def = graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node"],
variable_names_whitelist=set(["variable_node"]))
# Then initialize the unused variable, and get another
# constant_graph_def when variable_names_whitelist is not set.
self.evaluate(another_variable.initializer)
constant_graph_def_without_variable_whitelist = (
graph_util.convert_variables_to_constants(
sess, variable_graph_def, ["output_node"]))
# The unused variable should be cleared so the two graphs should be
# equivalent.
self.assertEqual(
str(constant_graph_def),
str(constant_graph_def_without_variable_whitelist))
# Test variable name black list. This should result in the variable
# not being a const.
constant_graph_def_with_blacklist = (
graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node"],
variable_names_blacklist=set(["variable_node"])))
variable_node = None
for node in constant_graph_def_with_blacklist.node:
if node.name == "variable_node":
variable_node = node
self.assertIsNotNone(variable_node)
if use_resource:
self.assertEqual(variable_node.op, "VarHandleOp")
else:
self.assertEqual(variable_node.op, "VariableV2")
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
_ = importer.import_graph_def(constant_graph_def, name="")
self.assertEqual(4, len(constant_graph_def.node))
for node in constant_graph_def.node:
self.assertNotIn(
node.op,
["Variable", "VariableV2", "VarHandleOp", "ReadVariableOp"])
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = self.evaluate(output_node)
self.assertNear(2.0, output, 0.00001)
def create_node_def(self, op, name, inputs):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node_def(self, name, value, dtype,
shape=None, inputs=None):
node = self.create_node_def("Const", name, inputs or [])
self.set_attr_dtype(node, "dtype", dtype)
self.set_attr_tensor(node, "value", value, dtype, shape)
return node
def set_attr_dtype(self, node, key, value):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(type=value.as_datatype_enum))
def set_attr_tensor(self, node, key, value, dtype, shape=None):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape)))
def testRemoveTrainingNodes(self):
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = self.create_node_def("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = self.create_node_def(
"Identity", a_identity_name, [a_constant_name, "^" + a_check_name])
graph_def.node.extend([a_identity_node])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = self.create_node_def("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = self.create_node_def(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = self.create_node_def("Add", add_name,
[a_identity_name, b_identity_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
expected_output = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = self.create_node_def("Add", add_name,
[a_constant_name, b_constant_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
output = graph_util.remove_training_nodes(graph_def)
self.assertProtoEquals(expected_output, output)
def testRemoveIdentityChains(self):
"""Check that chains of Identity nodes are correctly pruned.
Create a chain of four nodes, A, B, C, and D where A inputs B, B inputs C,
and C inputs D. Nodes B and C are "Identity" and should be pruned, resulting
in the nodes A and D, where A inputs D.
"""
graph_def = graph_pb2.GraphDef()
graph_def.node.extend([
self.create_node_def("Aop", "A", ["B"]), self.create_node_def(
"Identity", "B", ["C"]), self.create_node_def(
"Identity", "C", ["D"]), self.create_node_def("Dop", "D", [])
])
expected_graph_def = graph_pb2.GraphDef()
expected_graph_def.node.extend([
self.create_node_def("Aop", "A", ["D"]), self.create_node_def(
"Dop", "D", [])
])
self.assertProtoEquals(expected_graph_def,
graph_util.remove_training_nodes(graph_def))
def testRemoveIdentityUsedAsControlInputInConst(self):
"""Check that Identity nodes used as control inputs are not removed."""
graph_def = graph_pb2.GraphDef()
graph_def.node.extend([
self.create_constant_node_def("C", 1, dtypes.float32, inputs=["^I"]),
self.create_node_def("Identity", "I", ["Base"]),
self.create_node_def("BaseOp", "Base", [])
])
self.assertProtoEquals(graph_def,
graph_util.remove_training_nodes(graph_def))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/framework/graph_util_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for configuring TensorFlow execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.util.tf_export import tf_export
@tf_export('config.threading.get_intra_op_parallelism_threads')
def get_intra_op_parallelism_threads():
"""Get number of threads used within an individual op for parallelism.
Certain operations like matrix multiplication and reductions can utilize
parallel threads for speed ups. A value of 0 means the system picks an
appropriate number.
Returns:
Number of parallel threads
"""
return context.context().intra_op_parallelism_threads
@tf_export('config.threading.set_intra_op_parallelism_threads')
def set_intra_op_parallelism_threads(num_threads):
"""Set number of threads used within an individual op for parallelism.
Certain operations like matrix multiplication and reductions can utilize
parallel threads for speed ups. A value of 0 means the system picks an
appropriate number.
Args:
num_threads: Number of parallel threads
"""
context.context().intra_op_parallelism_threads = num_threads
@tf_export('config.threading.get_inter_op_parallelism_threads')
def get_inter_op_parallelism_threads():
"""Get number of threads used for parallelism between independent operations.
Determines the number of threads used by independent non-blocking operations.
0 means the system picks an appropriate number.
Returns:
Number of parallel threads
"""
return context.context().inter_op_parallelism_threads
@tf_export('config.threading.set_inter_op_parallelism_threads')
def set_inter_op_parallelism_threads(num_threads):
"""Set number of threads used for parallelism between independent operations.
Determines the number of threads used by independent non-blocking operations.
0 means the system picks an appropriate number.
Args:
num_threads: Number of parallel threads
"""
context.context().inter_op_parallelism_threads = num_threads
@tf_export('config.optimizer.get_jit')
def get_optimizer_jit():
"""Get if JIT compilation is enabled.
Note that optimizations are only applied in graph mode, (within tf.function).
Returns:
If JIT compilation is enabled.
"""
return context.context().optimizer_jit
@tf_export('config.optimizer.set_jit')
def set_optimizer_jit(enabled):
"""Set if JIT compilation is enabled.
Args:
enabled: Whether to enable JIT compilation.
"""
context.context().optimizer_jit = enabled
@tf_export('config.optimizer.get_experimental_options')
def get_optimizer_experimental_options():
"""Get experimental optimizer options.
Refer to tf.config.optimizer.set_experimental_options for a list of current
options.
Note that optimizations are only applied in graph mode, (within tf.function).
In addition, as these are experimental options, the list is subject to change.
Returns:
Dictionary of configured experimental optimizer options
"""
return context.context().get_optimizer_experimental_options()
@tf_export('config.optimizer.set_experimental_options')
def set_optimizer_experimental_options(options):
"""Set experimental optimizer options.
Note that optimizations are only applied in graph mode, (within tf.function).
In addition, as these are experimental options, the list is subject to change.
Args:
options: Dictionary of experimental optimizer options to configure.
Valid keys:
- layout_optimizer: Optimize tensor layouts
e.g. This will try to use NCHW layout on GPU which is faster.
- constant_folding: Fold constants
Statically infer the value of tensors when possible, and materialize the
result using constants.
- shape_optimization: Simplify computations made on shapes.
- remapping: Remap subgraphs onto more efficient implementations.
- arithmetic_optimization: Simplify arithmetic ops with common
sub-expression elimination and arithmetic simplification.
- dependency_optimization: Control dependency optimizations. Remove
redundant control dependencies, which may enable other optimization.
This optimizer is also essential for pruning Identity and NoOp nodes.
- loop_optimization: Loop optimizations.
- function_optimization: Function optimizations and inlining.
- debug_stripper: Strips debug-related nodes from the graph.
- disable_model_pruning: Disable removal of unnecessary ops from the graph
- scoped_allocator_optimization: Try to allocate some independent Op
outputs contiguously in order to merge or eliminate downstream Ops.
- pin_to_host_optimization: Force small ops onto the CPU.
- implementation_selector: Enable the swap of kernel implementations based
on the device placement.
- auto_mixed_precision: Change certain float32 ops to float16 on Volta
GPUs and above. Without the use of loss scaling, this can cause
numerical underflow (see
`keras.mixed_precision.experimental.LossScaleOptimizer`).
- disable_meta_optimizer: Disable the entire meta optimizer.
- min_graph_nodes: The minimum number of nodes in a graph to optimizer.
For smaller graphs, optimization is skipped.
"""
context.context().set_optimizer_experimental_options(options)
@tf_export('config.get_soft_device_placement')
def get_soft_device_placement():
"""Get if soft device placement is enabled.
If enabled, an op will be placed on CPU if any of the following are true
1. there's no GPU implementation for the OP
2. no GPU devices are known or registered
3. need to co-locate with reftype input(s) which are from CPU
Returns:
If soft placement is enabled.
"""
return context.context().soft_device_placement
@tf_export('config.set_soft_device_placement')
def set_soft_device_placement(enabled):
"""Set if soft device placement is enabled.
If enabled, an op will be placed on CPU if any of the following are true
1. there's no GPU implementation for the OP
2. no GPU devices are known or registered
3. need to co-locate with reftype input(s) which are from CPU
Args:
enabled: Whether to enable soft placement.
"""
context.context().soft_device_placement = enabled
@tf_export('config.experimental.get_device_policy')
def get_device_policy():
"""Gets the current device policy.
The device policy controls how operations requiring inputs on a specific
device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1).
This function only gets the device policy for the current thread. Any
subsequently started thread will again use the default policy.
Returns:
Current thread device policy
"""
device_policy = context.context().device_policy
if device_policy == context.DEVICE_PLACEMENT_SILENT:
return 'silent'
elif device_policy == context.DEVICE_PLACEMENT_SILENT_FOR_INT32:
return 'silent_for_int32'
elif device_policy == context.DEVICE_PLACEMENT_WARN:
return 'warn'
elif device_policy == context.DEVICE_PLACEMENT_EXPLICIT:
return 'explicit'
else:
raise ValueError('Not a valid device policy: %r' % device_policy)
@tf_export('config.experimental.set_device_policy')
def set_device_policy(device_policy):
"""Sets the current thread device policy.
The device policy controls how operations requiring inputs on a specific
device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1).
When using the default, an appropriate policy will be picked automatically.
The default policy may change over time.
This function only sets the device policy for the current thread. Any
subsequently started thread will again use the default policy.
Args:
device_policy: A device policy.
Valid values:
- None: Switch to a system default.
- 'warn': Copies the tensors which are not on the right device and logs
a warning.
- 'explicit': Raises an error if the placement is not as required.
- 'silent': Silently copies the tensors. Note that this may hide
performance problems as there is no notification provided when
operations are blocked on the tensor being copied between devices.
- 'silent_for_int32': silently copies `int32` tensors, raising errors on
the other ones.
Raises:
ValueError: If an invalid `device_policy` is passed.
"""
if device_policy == 'silent':
context.context().device_policy = context.DEVICE_PLACEMENT_SILENT
elif device_policy == 'silent_for_int32':
context.context().device_policy = context.DEVICE_PLACEMENT_SILENT_FOR_INT32
elif device_policy == 'warn':
context.context().device_policy = context.DEVICE_PLACEMENT_WARN
elif device_policy == 'explicit':
context.context().device_policy = context.DEVICE_PLACEMENT_EXPLICIT
elif device_policy is None:
context.context().device_policy = None
else:
raise ValueError('Not a valid device policy: %r' % device_policy)
@tf_export('config.experimental.get_synchronous_execution')
def get_synchronous_execution():
"""Gets whether operations are executed synchronously or asynchronously.
TensorFlow can execute operations synchronously or asynchronously. If
asynchronous execution is enabled, operations may return "non-ready" handles.
Returns:
Current thread execution mode
"""
return context.context().execution_mode == context.SYNC
@tf_export('config.experimental.set_synchronous_execution')
def set_synchronous_execution(enable):
"""Specifies whether operations are executed synchronously or asynchronously.
TensorFlow can execute operations synchronously or asynchronously. If
asynchronous execution is enabled, operations may return "non-ready" handles.
When `enable` is set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Args:
enable: Whether operations should be dispatched synchronously.
Valid values:
- None: sets the system default.
- True: executes each operation synchronously.
- False: executes each operation asynchronously.
"""
if enable is None:
context.context().execution_mode = None
elif enable:
context.context().execution_mode = context.SYNC
else:
context.context().execution_mode = context.ASYNC
@tf_export('config.experimental.list_physical_devices')
def list_physical_devices(device_type=None):
"""Return a list of physical devices visible to the runtime.
Physical devices are hardware devices locally present on the current machine.
By default all discovered CPU and GPU devices are considered visible. The
`list_physical_devices` allows querying the hardware prior to runtime
initialization.
The following example ensures the machine can see at least 1 GPU.
```python
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "No GPUs found."
```
Args:
device_type: (optional) Device type to filter by such as "CPU" or "GPU"
Returns:
List of PhysicalDevice objects
"""
return context.context().list_physical_devices(device_type)
@tf_export('config.experimental.list_logical_devices')
def list_logical_devices(device_type=None):
"""Return a list of logical devices created by runtime.
Logical devices may correspond to physical devices or remote devices in the
cluster. Operations and tensors may be placed on these devices by using the
`name` of the LogicalDevice.
For example:
```python
logical_devices = tf.config.experimental.list_logical_devices('GPU')
# Allocate on GPU:0
with tf.device(logical_devices[0].name):
one = tf.constant(1)
# Allocate on GPU:1
with tf.device(logical_devices[1].name):
two = tf.constant(2)
```
Args:
device_type: (optional) Device type to filter by such as "CPU" or "GPU"
Returns:
List of LogicalDevice objects
"""
return context.context().list_logical_devices(device_type=device_type)
@tf_export('config.experimental.get_visible_devices')
def get_visible_devices(device_type=None):
"""Get the list of visible physical devices.
Returns a list of PhysicalDevice objects that are current marked as visible to
the runtime. Any visible devices will have LogicalDevices assigned to them
once the runtime is initialized.
The following example verifies all visible GPUs have been disabled:
```python
physical_devices = config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
# Disable all GPUS
tf.config.experimental.set_visible_devices([], 'GPU')
visible_devices = tf.config.experimental.get_visible_devices()
for device in visible_devices:
assert device.device_type != 'GPU'
```
Args:
device_type: (optional) Device types to limit query to.
Returns:
List of PhysicalDevice objects
"""
return context.context().get_visible_devices(device_type)
@tf_export('config.experimental.set_visible_devices')
def set_visible_devices(devices, device_type=None):
"""Set the list of visible devices.
Sets the list of PhysicalDevices to be marked as visible to the runtime. Any
devices that are not marked as visible means TensorFlow will not allocate
memory on it and will not be able to place any operations on it as no
LogicalDevice will be created on it. By default all discovered devices are
marked as visible.
The following example demonstrates disabling the first GPU on the machine.
```python
physical_devices = config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
# Disable first GPU
tf.config.experimental.set_visible_devices(physical_devices[1:], 'GPU')
logical_devices = config.experimental.list_logical_devices('GPU')
# Logical device was not created for first GPU
assert len(logical_devices) == len(physical_devices) - 1
```
Args:
devices: (optional) List of PhysicalDevice objects to make visible
device_type: (optional) Device types to limit visibility configuration to.
Other device types will be left unaltered.
"""
context.context().set_visible_devices(devices, device_type)
@tf_export('config.experimental.get_memory_growth')
def get_memory_growth(device):
"""Get if memory growth is enabled for a PhysicalDevice.
A PhysicalDevice with memory growth set will not allocate all memory on the
device upfront.
For example:
```python
physical_devices = config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
assert tf.config.experimental.get_memory_growth(physical_devices[0]) == True
```
Args:
device: PhysicalDevice to query
Returns:
Current memory growth setting.
"""
return context.context().get_memory_growth(device)
@tf_export('config.experimental.set_memory_growth')
def set_memory_growth(device, enable):
"""Set if memory growth should be enabled for a PhysicalDevice.
A PhysicalDevice with memory growth set will not allocate all memory on the
device upfront. Memory growth cannot be configured on a PhysicalDevice with
virtual devices configured.
For example:
```python
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
```
Args:
device: PhysicalDevice to configure
enable: Whether to enable or disable memory growth
"""
context.context().set_memory_growth(device, enable)
@tf_export('config.experimental.get_virtual_device_configuration')
def get_virtual_device_configuration(device):
"""Get the virtual device configuration for a PhysicalDevice.
Returns the list of VirtualDeviceConfiguration objects previously configured
by a call to `tf.config.experimental.set_virtual_device_configuration()`.
For example:
```python
physical_devices = tf.config.experimental.list_physical_devices('CPU')
assert len(physical_devices) == 1, "No CPUs found"
configs = tf.config.experimental.get_virtual_device_configuration(
physical_devices[0])
assert configs is None
tf.config.experimental.set_virtual_device_configuration(
physical_devices[0],
[tf.config.experimental.VirtualDeviceConfiguration(),
tf.config.experimental.VirtualDeviceConfiguration()])
configs = tf.config.experimental.get_virtual_device_configuration(
physical_devices[0])
assert len(configs) == 2
```
Args:
device: PhysicalDevice to query
Returns:
List of `tf.config.experimental.VirtualDeviceConfiguration` objects or
`None` if no virtual device configuration has been set for this physical
device.
"""
return context.context().get_virtual_device_configuration(device)
@tf_export('config.experimental.set_virtual_device_configuration')
def set_virtual_device_configuration(device, virtual_devices):
"""Set the virtual device configuration for a PhysicalDevice.
A PhysicalDevice marked as visible will by default have a single LogicalDevice
allocated to it once the runtime is configured. Specifying a list of
tf.config.experimental.VirtualDeviceConfiguration objects allows multiple
devices to be configured that utilize the same PhysicalDevice.
The following example splits the CPU into 2 virtual devices:
```python
physical_devices = tf.config.experimental.list_physical_devices('CPU')
assert len(physical_devices) == 1, "No CPUs found"
# Specify 2 virtual CPUs. Note currently memory limit is not supported.
tf.config.experimental.set_virtual_device_configuration(
physical_devices[0],
[tf.config.experimental.VirtualDeviceConfiguration(),
tf.config.experimental.VirtualDeviceConfiguration()])
logical_devices = tf.config.experimental.list_logical_devices('CPU')
assert len(logical_devices) == 2
try:
tf.config.experimental.set_virtual_device_configuration(
physical_devices[0],
[tf.config.experimental.VirtualDeviceConfiguration(),
tf.config.experimental.VirtualDeviceConfiguration(),
tf.config.experimental.VirtualDeviceConfiguration(),
tf.config.experimental.VirtualDeviceConfiguration()])
except:
print('Cannot modify the virtual devices once they have been initialized.')
```
The following example splits the GPU into 2 virtual devices with 100 MB each:
```python
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "No GPUs found"
tf.config.experimental.set_virtual_device_configuration(
physical_devices[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=100),
tf.config.experimental.VirtualDeviceConfiguration(memory_limit=100)])
try:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
print('Cannot set memory growth when virtual devices configured')
logical_devices = tf.config.experimental.list_logical_devices('GPU')
assert len(logical_devices) == len(physical_devices) + 1
try:
tf.config.experimental.set_virtual_device_configuration(
physical_devices[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=10),
tf.config.experimental.VirtualDeviceConfiguration(memory_limit=10)])
except:
print('Cannot modify the virtual devices once they have been initialized.')
```
Args:
device: (optional) Need to update
virtual_devices: (optional) Need to update
"""
context.context().set_virtual_device_configuration(device, virtual_devices)
|
tensorflow-master
|
tensorflow/python/framework/config.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers for handling composite tensors and composite tensor values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
def is_composite_or_composite_value(tensor):
"""Returns true if 'tensor' is a CompositeTensor or a CT Value object."""
# TODO(b/125094323): This should be isinstance(CompositeTensor) or
# isinstance(CompositeTensorValue) once we support that.
return isinstance(
tensor,
(composite_tensor.CompositeTensor, sparse_tensor.SparseTensorValue,
ragged_tensor_value.RaggedTensorValue))
def get_shape(tensor):
"""Returns the shape of the passed composite tensor."""
if isinstance(tensor, sparse_tensor.SparseTensorValue):
# SparseTensorValues use a 'dense_shape' attribute
return tensor.dense_shape
else:
return tensor.shape
def _append_sparse_tensor_value(target, to_append):
"""Append sparse tensor value objects."""
# Make sure the sparse tensors are of the same size (except for the 0th dim).
if len(target.dense_shape) != len(to_append.dense_shape):
raise RuntimeError(
'Unable to concatenate %s and %s. The inner dense shapes do not '
'have the same number of dimensions (%s vs %s)' %
(target, to_append, target.dense_shape, to_append.dense_shape))
if target.dense_shape[1:] != to_append.dense_shape[1:]:
raise RuntimeError(
'Unable to concatenate %s and %s. The inner dense shapes do not '
'match inner dimensions (%s vs %s)' %
(target, to_append, target.dense_shape[1:], to_append.dense_shape[1:]))
# Add the to_append indices to target, updating the 0th value, and keeping
# track of the maximum so we know the final dense_shape of this tensor.
base_dim0_value = target.dense_shape[0]
max_dim0_value = target.dense_shape[0]
new_indices = target.indices
for index in to_append.indices:
# Here, we iterate through the sparse indices of the tensor to append. For
# each index, we update its zeroth value (the batch index) by adding the
# number of batch items in the tensor we are appending to (so an index
# of [0, 0, 1] for a value that is being appended to a tensor with 0th dim
# size 3 would become [3, 0, 1].)
index[0] += base_dim0_value
max_dim0_value = max(max_dim0_value, index[0])
new_indices = np.append(new_indices, [index], axis=0)
# Extend the values array to contain all of the appended values. These will
# be in the same order as the indices added above.
new_values = np.concatenate((target.values, to_append.values), axis=0)
# Create a new dense shape by replacing the value for the 0th dimension
# with the new max dim0 value.
new_dense_shape = list(target.dense_shape)
new_dense_shape[0] = max_dim0_value + 1
new_dense_shape = tuple(new_dense_shape)
return sparse_tensor.SparseTensorValue(
indices=new_indices, values=new_values, dense_shape=new_dense_shape)
def _append_ragged_tensor_value(target, to_append):
"""Append ragged tensor value objects."""
# Make sure the ragged tensors are of the same size (save for the 0th dim).
if len(target.shape) != len(to_append.shape):
raise RuntimeError('Unable to concatenate %s and %s' % (target, to_append))
if target.shape[1:] != to_append.shape[1:]:
raise RuntimeError('Unable to concatenate %s and %s' % (target, to_append))
adjusted_row_splits = to_append.row_splits[1:] + target.row_splits[-1]
new_row_splits = np.append(target.row_splits, adjusted_row_splits)
if isinstance(target.values, ragged_tensor_value.RaggedTensorValue):
new_values = _append_ragged_tensor_value(target.values, to_append.values)
else:
new_values = np.concatenate((target.values, to_append.values), axis=0)
return ragged_tensor_value.RaggedTensorValue(new_values, new_row_splits)
def append_composite_tensor(target, to_append):
"""Helper function to append composite tensors to each other in the 0 axis.
In order to support batching within a fit/evaluate/predict call, we need
to be able to aggregate within a CompositeTensor. Unfortunately, the CT
API currently does not make this easy - especially in V1 mode, where we're
working with CompositeTensor Value objects that have no connection with the
CompositeTensors that created them.
Arguments:
target: CompositeTensor or CompositeTensor value object that will be
appended to.
to_append: CompositeTensor or CompositeTensor value object to append to.
'target'.
Returns:
A CompositeTensor or CompositeTensor value object.
Raises:
RuntimeError: if concatenation is not possible.
"""
if type(target) is not type(to_append):
raise RuntimeError('Unable to concatenate %s and %s' %
(type(target), type(to_append)))
# Perform type-specific concatenation.
# TODO(b/125094323): This should be replaced by a simple call to
# target.append() that should work on all of the below classes.
# If we're seeing a CompositeTensor here, we know it's because we're in
# Eager mode (or else we'd have evaluated the CT to a CT Value object
# already). Therefore, it's safe to call concat() on it without evaluating
# the result any further. If not - that is, if we're seeing a
# SparseTensorValue or a RaggedTensorValue - we need to hand-update it
# since we're outside of the graph anyways.
if isinstance(target, sparse_tensor.SparseTensor):
# We need to invoke the sparse version of concatenate here - tf.concat
# won't work.
return sparse_ops.sparse_concat(sp_inputs=[target, to_append], axis=0)
elif isinstance(target, ragged_tensor.RaggedTensor):
return ragged_concat_ops.concat([target, to_append], axis=0)
elif isinstance(target, sparse_tensor.SparseTensorValue):
return _append_sparse_tensor_value(target, to_append)
elif isinstance(target, ragged_tensor_value.RaggedTensorValue):
return _append_ragged_tensor_value(target, to_append)
else:
raise RuntimeError('Attempted to concatenate unsupported object %s.' %
type(target))
|
tensorflow-master
|
tensorflow/python/framework/composite_tensor_utils.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A utility function for importing TensorFlow graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.core.framework import graph_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python import tf2
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.tf_export import tf_export
def _IsControlInput(input_name):
# Expected format: '^operation_name' (control input).
return input_name.startswith('^')
def _ParseTensorName(tensor_name):
"""Parses a tensor name into an operation name and output index.
This function will canonicalize tensor names as follows:
* "foo:0" -> ("foo", 0)
* "foo:7" -> ("foo", 7)
* "foo" -> ("foo", 0)
* "foo:bar:baz" -> ValueError
Args:
tensor_name: The name of a tensor.
Returns:
A tuple containing the operation name, and the output index.
Raises:
ValueError: If `tensor_name' cannot be interpreted as the name of a tensor.
"""
components = tensor_name.split(':')
if len(components) == 2:
# Expected format: 'operation_name:output_index'.
try:
output_index = int(components[1])
except ValueError:
raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))
return components[0], output_index
elif len(components) == 1:
# Expected format: 'operation_name' (implicit 0th output).
return components[0], 0
else:
raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))
@contextlib.contextmanager
def _MaybeDevice(device):
"""Applies the given device only if device is not None or empty."""
if device:
with ops.device(device):
yield
else:
yield
def _ProcessGraphDefParam(graph_def, op_dict):
"""Type-checks and possibly canonicalizes `graph_def`."""
if not isinstance(graph_def, graph_pb2.GraphDef):
# `graph_def` could be a dynamically-created message, so try a duck-typed
# approach
try:
old_graph_def = graph_def
graph_def = graph_pb2.GraphDef()
graph_def.MergeFrom(old_graph_def)
except TypeError:
raise TypeError('graph_def must be a GraphDef proto.')
else:
# If we're using the graph_def provided by the caller, modify graph_def
# in-place to add attr defaults to the NodeDefs (this is visible to the
# caller).
# NOTE(skyewm): this is undocumented behavior that at least meta_graph.py
# depends on. It might make sense to move this to meta_graph.py and have
# import_graph_def not modify the graph_def argument (we'd have to make sure
# this doesn't break anything else.)
for node in graph_def.node:
if node.op not in op_dict:
# Assume unrecognized ops are functions for now. TF_ImportGraphDef will
# report an error if the op is actually missing.
continue
op_def = op_dict[node.op]
_SetDefaultAttrValues(node, op_def)
return graph_def
def _ProcessInputMapParam(input_map):
"""Type-checks and possibly canonicalizes `input_map`."""
if input_map is None:
input_map = {}
else:
if not (isinstance(input_map, dict) and all(
isinstance(k, compat.bytes_or_text_types) for k in input_map.keys())):
raise TypeError('input_map must be a dictionary mapping strings to '
'Tensor objects.')
return input_map
def _ProcessReturnElementsParam(return_elements):
"""Type-checks and possibly canonicalizes `return_elements`."""
if return_elements is None:
return None
if not all(
isinstance(x, compat.bytes_or_text_types) for x in return_elements):
raise TypeError('return_elements must be a list of strings.')
return tuple(compat.as_str(x) for x in return_elements)
def _FindAttrInOpDef(attr_name, op_def):
for attr_def in op_def.attr:
if attr_name == attr_def.name:
return attr_def
return None
def _RemoveDefaultAttrs(op_dict, producer_op_list, graph_def):
"""Removes unknown default attrs according to `producer_op_list`.
Removes any unknown attrs in `graph_def` (i.e. attrs that do not appear in
the OpDefs in `op_dict`) that have a default value in `producer_op_list`.
Args:
op_dict: dict mapping operation name to OpDef.
producer_op_list: OpList proto.
graph_def: GraphDef proto
"""
producer_op_dict = {op.name: op for op in producer_op_list.op}
for node in graph_def.node:
# Remove any default attr values that aren't in op_def.
if node.op in producer_op_dict:
op_def = op_dict[node.op]
producer_op_def = producer_op_dict[node.op]
# We make a copy of node.attr to iterate through since we may modify
# node.attr inside the loop.
for key in list(node.attr):
if _FindAttrInOpDef(key, op_def) is None:
# No attr_def in consumer, look in producer.
attr_def = _FindAttrInOpDef(key, producer_op_def)
if (attr_def and attr_def.HasField('default_value') and
node.attr[key] == attr_def.default_value):
# Unknown attr had default value in producer, delete it so it can be
# understood by consumer.
del node.attr[key]
def _ConvertInputMapValues(name, input_map):
"""Ensures all input map values are tensors.
This should be called from inside the import name scope.
Args:
name: the `name` argument passed to import_graph_def
input_map: the `input_map` argument passed to import_graph_def.
Returns:
An possibly-updated version of `input_map`.
Raises:
ValueError: if input map values cannot be converted due to empty name scope.
"""
if not all(isinstance(v, ops.Tensor) for v in input_map.values()):
if name == '': # pylint: disable=g-explicit-bool-comparison
raise ValueError(
'tf.import_graph_def() requires a non-empty `name` if `input_map` '
'contains non-Tensor values. Try calling tf.convert_to_tensor() on '
'`input_map` values before calling tf.import_graph_def().')
with ops.name_scope('_inputs'):
input_map = {k: ops.convert_to_tensor(v) for k, v in input_map.items()}
return input_map
def _PopulateTFImportGraphDefOptions(options, prefix, input_map,
return_elements):
"""Populates the TF_ImportGraphDefOptions `options`."""
c_api.TF_ImportGraphDefOptionsSetPrefix(options, prefix)
c_api.TF_ImportGraphDefOptionsSetUniquifyNames(options, True)
for input_src, input_dst in input_map.items():
input_src = compat.as_str(input_src)
if input_src.startswith('^'):
src_name = compat.as_str(input_src[1:])
dst_op = input_dst._as_tf_output().oper # pylint: disable=protected-access
c_api.TF_ImportGraphDefOptionsRemapControlDependency(
options, src_name, dst_op)
else:
src_name, src_idx = _ParseTensorName(input_src)
src_name = compat.as_str(src_name)
dst_output = input_dst._as_tf_output() # pylint: disable=protected-access
c_api.TF_ImportGraphDefOptionsAddInputMapping(options, src_name, src_idx,
dst_output)
for name in return_elements or []:
if ':' in name:
op_name, index = _ParseTensorName(name)
op_name = compat.as_str(op_name)
c_api.TF_ImportGraphDefOptionsAddReturnOutput(options, op_name, index)
else:
c_api.TF_ImportGraphDefOptionsAddReturnOperation(options,
compat.as_str(name))
def _ProcessNewOps(graph):
"""Processes the newly-added TF_Operations in `graph`."""
# Maps from a node to the names of the ops it's colocated with, if colocation
# is specified in the attributes.
colocation_pairs = {}
for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access
original_device = new_op.device
new_op._set_device('') # pylint: disable=protected-access
colocation_names = _GetColocationNames(new_op)
if colocation_names:
colocation_pairs[new_op] = colocation_names
# Don't set a device for this op, since colocation constraints override
# device functions and the original device. Note that this op's device may
# still be set by the loop below.
# TODO(skyewm): why does it override the original device?
else:
with _MaybeDevice(original_device):
graph._apply_device_functions(new_op) # pylint: disable=protected-access
# The following loop populates the device field of ops that are colocated
# with another op. This is implied by the colocation attribute, but we
# propagate the device field for completeness.
for op, coloc_op_list in colocation_pairs.items():
coloc_device = None
# Find any device in the list of colocated ops that have a device, if it
# exists. We assume that if multiple ops have devices, they refer to the
# same device. Otherwise, a runtime error will occur since the colocation
# property cannot be guaranteed. Note in TF2 colocations have been removed
# from the public API and will be considered a hint, so there is no runtime
# error.
#
# One possible improvement is to try to check for compatibility of all
# devices in this list at import time here, which would require
# implementing a compatibility function for device specs in python.
for coloc_op_name in coloc_op_list:
try:
coloc_op = graph._get_operation_by_name_unsafe(coloc_op_name) # pylint: disable=protected-access
except KeyError:
# Do not error in TF2 if the colocation cannot be guaranteed
if tf2.enabled() or control_flow_util.EnableControlFlowV2(graph):
continue
raise ValueError('Specified colocation to an op that '
'does not exist during import: %s in %s' %
(coloc_op_name, op.name))
if coloc_op.device:
coloc_device = pydev.DeviceSpec.from_string(coloc_op.device)
break
if coloc_device:
op._set_device(coloc_device) # pylint: disable=protected-access
def _GetColocationNames(op):
"""Returns names of the ops that `op` should be colocated with."""
colocation_names = []
try:
class_values = op.get_attr('_class')
except ValueError:
# No _class attr
return
for val in class_values:
val = compat.as_str(val)
if val.startswith('loc:@'):
colocation_node_name = val[len('loc:@'):]
if colocation_node_name != op.name:
colocation_names.append(colocation_node_name)
return colocation_names
def _GatherReturnElements(requested_return_elements, graph, results):
"""Returns the requested return elements from results.
Args:
requested_return_elements: list of strings of operation and tensor names
graph: Graph
results: wrapped TF_ImportGraphDefResults
Returns:
list of `Operation` and/or `Tensor` objects
"""
return_outputs = c_api.TF_ImportGraphDefResultsReturnOutputs(results)
return_opers = c_api.TF_ImportGraphDefResultsReturnOperations(results)
combined_return_elements = []
outputs_idx = 0
opers_idx = 0
for name in requested_return_elements:
if ':' in name:
combined_return_elements.append(
graph._get_tensor_by_tf_output(return_outputs[outputs_idx])) # pylint: disable=protected-access
outputs_idx += 1
else:
combined_return_elements.append(
graph._get_operation_by_tf_operation(return_opers[opers_idx])) # pylint: disable=protected-access
opers_idx += 1
return combined_return_elements
def _SetDefaultAttrValues(node_def, op_def):
"""Set any default attr values in `node_def` that aren't present."""
assert node_def.op == op_def.name
for attr_def in op_def.attr:
key = attr_def.name
if attr_def.HasField('default_value'):
value = node_def.attr[key]
if value is None or value.WhichOneof('value') is None:
node_def.attr[key].CopyFrom(attr_def.default_value)
@tf_export('graph_util.import_graph_def', 'import_graph_def')
@deprecated_args(None, 'Please file an issue at '
'https://github.com/tensorflow/tensorflow/issues if you depend'
' on this feature.', 'op_dict')
def import_graph_def(graph_def,
input_map=None,
return_elements=None,
name=None,
op_dict=None,
producer_op_list=None):
"""Imports the graph from `graph_def` into the current default `Graph`.
This function provides a way to import a serialized TensorFlow
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and extract individual objects in the `GraphDef` as
`tf.Tensor` and `tf.Operation` objects. Once extracted,
these objects are placed into the current default `Graph`. See
`tf.Graph.as_graph_def` for a way to create a `GraphDef`
proto.
Args:
graph_def: A `GraphDef` proto containing operations to be imported into
the default graph.
input_map: A dictionary mapping input names (as strings) in `graph_def`
to `Tensor` objects. The values of the named input tensors in the
imported graph will be re-mapped to the respective `Tensor` values.
return_elements: A list of strings containing operation names in
`graph_def` that will be returned as `Operation` objects; and/or
tensor names in `graph_def` that will be returned as `Tensor` objects.
name: (Optional.) A prefix that will be prepended to the names in
`graph_def`. Note that this does not apply to imported function names.
Defaults to `"import"`.
op_dict: (Optional.) Deprecated, do not use.
producer_op_list: (Optional.) An `OpList` proto with the (possibly stripped)
list of `OpDef`s used by the producer of the graph. If provided,
unrecognized attrs for ops in `graph_def` that have their default value
according to `producer_op_list` will be removed. This will allow some more
`GraphDef`s produced by later binaries to be accepted by earlier binaries.
Returns:
A list of `Operation` and/or `Tensor` objects from the imported graph,
corresponding to the names in `return_elements`,
and None if `returns_elements` is None.
Raises:
TypeError: If `graph_def` is not a `GraphDef` proto,
`input_map` is not a dictionary mapping strings to `Tensor` objects,
or `return_elements` is not a list of strings.
ValueError: If `input_map`, or `return_elements` contains names that
do not appear in `graph_def`, or `graph_def` is not well-formed (e.g.
it refers to an unknown tensor).
"""
op_dict = op_def_registry.get_registered_ops()
graph_def = _ProcessGraphDefParam(graph_def, op_dict)
input_map = _ProcessInputMapParam(input_map)
return_elements = _ProcessReturnElementsParam(return_elements)
if producer_op_list is not None:
# TODO(skyewm): make a copy of graph_def so we're not mutating the argument?
_RemoveDefaultAttrs(op_dict, producer_op_list, graph_def)
graph = ops.get_default_graph()
with ops.name_scope(name, 'import', input_map.values()) as scope:
# Save unique prefix generated by name_scope
if scope:
assert scope.endswith('/')
prefix = scope[:-1]
else:
prefix = ''
# Generate any input map tensors inside name scope
input_map = _ConvertInputMapValues(name, input_map)
scoped_options = c_api_util.ScopedTFImportGraphDefOptions()
options = scoped_options.options
_PopulateTFImportGraphDefOptions(options, prefix, input_map,
return_elements)
# _ProcessNewOps mutates the new operations. _mutation_lock ensures a
# Session.run call cannot occur between creating the TF_Operations in the
# TF_GraphImportGraphDefWithResults call and mutating the them in
# _ProcessNewOps.
with graph._mutation_lock(): # pylint: disable=protected-access
with c_api_util.tf_buffer(graph_def.SerializeToString()) as serialized:
try:
results = c_api.TF_GraphImportGraphDefWithResults(
graph._c_graph, serialized, options) # pylint: disable=protected-access
results = c_api_util.ScopedTFImportGraphDefResults(results)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
# Create _DefinedFunctions for any imported functions.
#
# We do this by creating _DefinedFunctions directly from `graph_def`, and
# adding them to `graph`. Adding an existing function to a TF_Graph is a
# no-op, so this only has the effect of updating the Python state (usually
# _DefinedFunction.add_to_graph also adds the function to the TF_Graph).
#
# TODO(skyewm): fetch the TF_Functions directly from the TF_Graph
# TODO(skyewm): avoid sending serialized FunctionDefs back to the TF_Graph
_ProcessNewOps(graph)
if graph_def.library and graph_def.library.function:
functions = function.from_library(graph_def.library)
for f in functions:
f.add_to_graph(graph)
# Treat input mappings that don't appear in the graph as an error, because
# they are likely to be due to a typo.
missing_unused_input_keys = (
c_api.TF_ImportGraphDefResultsMissingUnusedInputMappings_wrapper(
results.results))
if missing_unused_input_keys:
missing_unused_input_keys = [
compat.as_str(s) for s in missing_unused_input_keys
]
raise ValueError(
'Attempted to map inputs that were not found in graph_def: [%s]' %
', '.join(missing_unused_input_keys))
if return_elements is None:
return None
else:
return _GatherReturnElements(return_elements, graph, results.results)
|
tensorflow-master
|
tensorflow/python/framework/importer.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.random_seed."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class RandomSeedTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testRandomSeed(self):
test_cases = [
# Each test case is a tuple with input to get_seed:
# (input_graph_seed, input_op_seed)
# and output from get_seed:
# (output_graph_seed, output_op_seed)
((None, None), (None, None)),
((None, 1), (random_seed.DEFAULT_GRAPH_SEED, 1)),
((1, 1), (1, 1)),
((0, 0), (0, 2**31 - 1)), # Avoid nondeterministic (0, 0) output
((2**31 - 1, 0), (0, 2**31 - 1)), # Don't wrap to (0, 0) either
((0, 2**31 - 1), (0, 2**31 - 1)), # Wrapping for the other argument
]
if context.executing_eagerly():
# operation seed is random number generated based on global seed.
# it's not tested due to possibility of platform or version difference.
pass
else:
# 0 will be the default_graph._lastid.
test_cases.append(((1, None), (1, 0)))
for tc in test_cases:
tinput, toutput = tc[0], tc[1]
random_seed.set_random_seed(tinput[0])
g_seed, op_seed = random_seed.get_seed(tinput[1])
msg = 'test_case = {0}, got {1}, want {2}'.format(tinput,
(g_seed, op_seed),
toutput)
self.assertEqual((g_seed, op_seed), toutput, msg=msg)
random_seed.set_random_seed(None)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/framework/random_seed_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutomaticControlDependencies and related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
# Op types that should not run in program order, e.g. because they need to run
# asynchronously to avoid deadlock.
ASYNC_STATEFUL_OPS = [
"CollectiveGather",
"CollectiveReduce",
"CollectiveBcastSend",
"CollectiveBcastRecv",
"NcclAllReduce",
]
LEGACY_RANDOM_OPS = [
# These may be used in variable initializers -- thus their execution should
# not be dependent on other stateful operations. This is because although
# according to program order, tf.Variables may be created in sequence,
# their initialization happens outside of the program order (specifically,
# in graph mode their initialization happens by calling a grouped
# initializer operation or in eager mode, where initialization is lifted
# out of the tf.function and executed the first time the function is
# executed).
#
# Unless there is a specific dependency between the initializers
# themselves (e.g. one initializer depends on a Variable whose value depends
# on another initializer), the initialization can happen in any order so
# long as it's before the associated Variable read operations.
#
# Note that in general the randomness of legacy random operations is only
# guaranteed by providing a graph-level and op-level seed (and ordering of
# the same op across multiple iterations of a while_loop is specifically not
# guaranteed; see the discussion below).
#
# There is a possible race condition inside while_loop where the same
# random OpKernel instantiation is reused across multiple steps
# of the loop. Since legacy Random OpKernels have an internal rng state,
# automatic dependency tracking across loop steps would likely
# fix this race; and for that case this blacklist is problematic.
# However, since automatic dependency tracking inside while loops is not
# currently supported, and there are no other examples of OpKernel reuse
# (each OpKernel is associated with a unique op in graph mode),
# this blacklist has no effect on the aforementioned behavior.
#
# TODO(ebrevdo,skyewm): Modify the check against this blacklist to
# only occur when the op is inside a "variable initialization scope"; and
# add proper autodeps inside while_loops that respects this updated check.
"RandomUniform",
"RandomUniformInt",
"RandomStandardNormal",
"ParameterizedTruncatedNormal",
"TruncatedNormal",
"RandomShuffle",
"Multinomial",
"RandomGamma",
"RandomGammaGrad",
"RandomPoisson",
"RandomPoissonV2",
]
_ALL_BLACKLISTED_OPS = set(ASYNC_STATEFUL_OPS) | set(LEGACY_RANDOM_OPS)
def op_is_stateful(op_def):
return op_def.is_stateful and op_def.name not in _ALL_BLACKLISTED_OPS
class AutomaticControlDependencies(object):
"""Context manager to automatically add control dependencies.
Code under this context manager will act as if a sensible set of control
dependencies were present. More specifically:
1. All stateful ops in the scope will execute (with the exception of ops in
ASYNC_STATEFUL_OPS and LEGACY_RANDOM_OPS)
2. Stateful ops which modify the same resource will execute in program order
Note: creating variables in an automatic control dependencies context is not
supported (the value of the variables will never change as they will keep
getting reinitialized).
NOT THREAD SAFE
"""
def __init__(self):
self._returned_tensors = set()
self.ops_which_must_run = set()
def mark_as_return(self, tensor):
"""Acts like identity but marks the `Tensor` as a return value.
This will possibly return a copy of the `Tensor`. Usage:
```
with AutomaticControlDependencies() as a:
...
t = a.mark_as_return(t)
_ = ...(t...) # i.e. it's safe to use t here
```
Args:
tensor: the `Tensor` to be marked
Returns:
a copy of the `Tensor`.
"""
if isinstance(tensor, ops.IndexedSlices):
values = array_ops.identity(tensor.values)
indices = array_ops.identity(tensor.indices)
self._returned_tensors.add(indices)
self._returned_tensors.add(values)
return ops.IndexedSlices(values, indices, dense_shape=tensor.dense_shape)
elif isinstance(tensor, sparse_tensor.SparseTensor):
values = array_ops.identity(tensor.values)
indices = array_ops.identity(tensor.indices)
self._returned_tensors.add(indices)
self._returned_tensors.add(values)
return sparse_tensor.SparseTensor(
indices, values, dense_shape=tensor.dense_shape)
elif isinstance(tensor, tensor_array_ops.TensorArray):
flow = array_ops.identity(tensor.flow)
self._returned_tensors.add(flow)
return tensor_array_ops.build_ta_with_new_flow(tensor, flow)
# We want to make the return values depend on the stateful operations, but
# we don't want to introduce a cycle, so we make the return value the result
# of a new identity operation that the stateful operations definitely don't
# depend on.
tensor = array_ops.identity(tensor)
self._returned_tensors.add(tensor)
return tensor
def __enter__(self):
if context.executing_eagerly():
return self
# This code assumes no other thread is adding ops to the graph while
# we're adding ops to the graph.
# TODO(apassos): Fix this by locking the graph or using a temporary
# graph (but that would mess up devices and collections at least,
# probably other things as well).
self._graph = ops.get_default_graph()
self._graph._add_control_dependencies = True # pylint: disable=protected-access
self._n_operations = len(self._graph.get_operations())
return self
def _process_switch(self, switch_op, ops_which_must_run,
last_op_using_resource_tensor, merge_for_resource):
"""Processes a switch node for a resource input.
When tensorflow creates a cond, it creates a control flow context for each
branch of the cond. Each external tensor accessed by that branch is routed
through a switch op, which gets created in the graph _after_ the op which
uses that tensor get created.
If the resource comes from another switch op we process that one first.
_process_switch creates a corresponding merge node for the switch node. This
merge node is added to the outer control flow context of the switch
node. We also ensure that:
1. The switch node executes after the previous op which used the resource
tensor
2. Any op which uses a resource output of the switch node executes before
the merge for the switch node.
3. The next op which uses the input resource to the switch node (which
might be another switch node for the other branch of the conditional)
will execute after the merge node is done.
4. The merge node is marked as must_run so it will run even if no
subsequent operation uses the resource.
Args:
switch_op: the switch op to be processed
ops_which_must_run: the set of ops which must run
last_op_using_resource_tensor: map from resource tensor to last op using
it
merge_for_resource: map from resource tensor to merge which must follow
all usages of it.
"""
inp = switch_op.inputs[0]
if inp.dtype == dtypes_module.resource and inp.op.type == "Switch":
self._process_switch(inp.op, ops_which_must_run,
last_op_using_resource_tensor, merge_for_resource)
if switch_op.outputs[0] in merge_for_resource:
return
new_merge = control_flow_ops.merge(switch_op.outputs,
name="artificial_merge")
new_merge[0].op._control_flow_context = ( # pylint: disable=protected-access
switch_op._control_flow_context.outer_context) # pylint: disable=protected-access
# Ensures the merge always runs
ops_which_must_run.add(new_merge[0].op)
if inp in last_op_using_resource_tensor:
# Ensures the switch executes after the previous op using the resource.
switch_op._add_control_input(last_op_using_resource_tensor[inp]) # pylint: disable=protected-access
# Ensure the next op outside the cond happens after the merge.
last_op_using_resource_tensor[inp] = new_merge[0].op
if inp in merge_for_resource:
merge_for_resource[inp]._add_control_input(new_merge[0].op) # pylint: disable=protected-access
for o in switch_op.outputs:
# Ensures the merge will execute after all ops inside the cond
merge_for_resource[o] = new_merge[0].op
def __exit__(self, unused_type, unused_value, unused_traceback):
if context.executing_eagerly():
return
if self._graph is not ops.get_default_graph():
raise RuntimeError(
"Graph changed while trying to add control dependencies.")
# pylint: disable=protected-access
if hasattr(self._graph, "outer_graph"):
outer_val = self._graph.outer_graph._add_control_dependencies
self._graph._add_control_dependencies = outer_val
else:
self._graph._add_control_dependencies = False
# pylint: enable=protected-access
# map from resource tensor to the last op which used it
last_op_using_resource_tensor = {}
# set of conditional and loop exits
ops_which_must_run = set()
# merge which must depend on ops which use this resource
merge_for_resource = {}
new_operations = self._graph.get_operations()[self._n_operations:]
# Ensures that uses of resource tensors get serialized properly and all
# execute. This is done by keeping a map from resource tensor to the last op
# in graph-construction order which used it (last_op_using_resource_tensor).
#
# Conditionals are written in TensorFlow such that every external tensor
# accessed in the conditional goes through a switch op and every return
# tensor (it's guaranteed that there will be at least one) goes through a
# merge op.
#
# To handle conditionals, switches are handled in a special way (see
# comments for _process_switch). Merge nodes created by TF's conditional
# logic (as opposed to by _process_switch) are forced to run and also get a
# control dependency added to them to ensure all stateful ops inside their
# control flow context run.
#
# We also ensure that if an op is using a resource output by a switch node
# (that is, a resource tensor for which there's a value in
# merge_for_resource) this op will run before the merge for that resource.
#
# We try to add control inputs to nodes respecting their control flow
# contexts to avoid dead nodes propagating everywhere and leading to
# "retval[0] doesn't have value" errors. If a node gets a control dependency
# on a dead node (i.e. a note from an untaken control flow branch) that node
# will be marked as dead unless it's a merge node.
#
# TODO(apassos): serialize non-resource-taking stateful ops as well, and
# test that it works. Support while loops. Support init_scope escaping from
# this.
for op in new_operations:
# TODO(apassos) make this code safely support while loops.
if control_flow_util.IsInWhileLoop(op):
continue
control_inputs = set()
# Ensure stateful ops run
if (op.type not in self._graph._registered_ops # pylint: disable=protected-access
or op_is_stateful(self._graph._registered_ops[op.type])): # pylint: disable=protected-access
ops_which_must_run.add(op)
# Ignore switches (they're handled separately)
if op.type == "Switch" and op.inputs[0].dtype == dtypes_module.resource:
continue
# Make merges trigger all other computation which must run
if op.type == "Merge":
for o in ops_which_must_run:
op._add_control_input(o) # pylint: disable=protected-access
for inp in o.inputs:
if inp in last_op_using_resource_tensor:
last_op_using_resource_tensor[inp] = op
ops_which_must_run = set([op])
continue
found_resource = False
# Check for any resource inputs. If we find any, we update control_inputs
# and last_op_using_resource_tensor. Note that we dedup op.inputs in case
# op receives the same resource tensor twice as input, which would result
# in op getting a control dependency on itself.
for inp in set(op.inputs):
if inp.dtype != dtypes_module.resource:
continue
found_resource = True
# Deal with switches, finally.
if inp.op.type == "Switch":
self._process_switch(inp.op, ops_which_must_run,
last_op_using_resource_tensor,
merge_for_resource)
# Ensure uses of resources are serialized
if inp in last_op_using_resource_tensor:
if (last_op_using_resource_tensor[inp]._control_flow_context # pylint: disable=protected-access
is op._control_flow_context): # pylint: disable=protected-access
control_inputs.add(last_op_using_resource_tensor[inp])
# Ensure merges happen after the closing of a cond block
if inp in merge_for_resource:
merge_for_resource[inp]._add_control_input(op) # pylint: disable=protected-access
last_op_using_resource_tensor[inp] = op
if (op_is_stateful(op.op_def) and not found_resource
and op._control_flow_context is None): # pylint: disable=protected-access
if None in last_op_using_resource_tensor:
op._add_control_input(last_op_using_resource_tensor[None]) # pylint: disable=protected-access
last_op_using_resource_tensor[None] = op
control_inputs = [c for c in control_inputs
if c._control_flow_context is op._control_flow_context] # pylint: disable=protected-access
op._add_control_inputs(control_inputs) # pylint: disable=protected-access
# Ensure all ops which must run do run
self.ops_which_must_run.update(ops_which_must_run)
for r in nest.flatten(list(self._returned_tensors), expand_composites=True):
if self.ops_which_must_run:
r.op._add_control_inputs( # pylint: disable=protected-access
[o for o in self.ops_which_must_run
if o._control_flow_context is r.op._control_flow_context]) # pylint: disable=protected-access
def automatic_control_dependencies(f):
"""Wraps f to automatically insert control dependencies.
The inserted dependencies ensure that:
1. All stateful ops in f run when the result of f runs
2. Updates to the same resources happen in order.
Args:
f: the function to be wrapped.
Returns:
The wrapped function.
"""
def wrapper(*args, **kwargs):
with AutomaticControlDependencies() as a:
result = f(*args, **kwargs)
result_flat = [a.mark_as_return(t) for t in nest.flatten(result)]
return nest.pack_sequence_as(result, result_flat)
return tf_decorator.make_decorator(f, wrapper)
|
tensorflow-master
|
tensorflow/python/framework/auto_control_deps.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for querying registered kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import kernels
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class GetAllRegisteredKernelsTest(test_util.TensorFlowTestCase):
def testFindsAtLeastOneKernel(self):
kernel_list = kernels.get_all_registered_kernels()
self.assertGreater(len(kernel_list.kernel), 0)
class GetRegisteredKernelsForOp(test_util.TensorFlowTestCase):
def testFindsAtLeastOneKernel(self):
kernel_list = kernels.get_registered_kernels_for_op("KernelLabel")
self.assertGreater(len(kernel_list.kernel), 0)
self.assertEqual(kernel_list.kernel[0].op, "KernelLabel")
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/kernels_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.traceable_stack."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.framework import traceable_stack
from tensorflow.python.platform import googletest
from tensorflow.python.util import tf_inspect as inspect
_LOCAL_OBJECT = lambda x: x
_THIS_FILENAME = inspect.getsourcefile(_LOCAL_OBJECT)
class TraceableObjectTest(test_util.TensorFlowTestCase):
def testSetFilenameAndLineFromCallerUsesCallersStack(self):
t_obj = traceable_stack.TraceableObject(17)
# Do not separate placeholder from the set_filename_and_line_from_caller()
# call one line below it as it is used to calculate the latter's line
# number.
placeholder = lambda x: x
result = t_obj.set_filename_and_line_from_caller()
expected_lineno = inspect.getsourcelines(placeholder)[1] + 1
self.assertEqual(expected_lineno, t_obj.lineno)
self.assertEqual(_THIS_FILENAME, t_obj.filename)
self.assertEqual(t_obj.SUCCESS, result)
def testSetFilenameAndLineFromCallerRespectsOffset(self):
def call_set_filename_and_line_from_caller(t_obj):
# We expect to retrieve the line number from _our_ caller.
return t_obj.set_filename_and_line_from_caller(offset=1)
t_obj = traceable_stack.TraceableObject(None)
# Do not separate placeholder from the
# call_set_filename_and_line_from_caller() call one line below it as it is
# used to calculate the latter's line number.
placeholder = lambda x: x
result = call_set_filename_and_line_from_caller(t_obj)
expected_lineno = inspect.getsourcelines(placeholder)[1] + 1
self.assertEqual(expected_lineno, t_obj.lineno)
self.assertEqual(t_obj.SUCCESS, result)
def testSetFilenameAndLineFromCallerHandlesRidiculousOffset(self):
t_obj = traceable_stack.TraceableObject('The quick brown fox.')
# This line shouldn't die.
result = t_obj.set_filename_and_line_from_caller(offset=300)
# We expect a heuristic to be used because we are not currently 300 frames
# down on the stack. The filename and lineno of the outermost frame are not
# predictable -- in some environments the filename is this test file, but in
# other environments it is not (e.g. due to a test runner calling this
# file). Therefore we only test that the called function knows it applied a
# heuristic for the ridiculous stack offset.
self.assertEqual(t_obj.HEURISTIC_USED, result)
class TraceableStackTest(test_util.TensorFlowTestCase):
def testPushPeekPopObj(self):
t_stack = traceable_stack.TraceableStack()
t_stack.push_obj(42.0)
t_stack.push_obj('hope')
expected_lifo_peek = ['hope', 42.0]
self.assertEqual(expected_lifo_peek, list(t_stack.peek_objs()))
self.assertEqual('hope', t_stack.pop_obj())
self.assertEqual(42.0, t_stack.pop_obj())
def testPushPeekTopObj(self):
t_stack = traceable_stack.TraceableStack()
t_stack.push_obj(42.0)
t_stack.push_obj('hope')
self.assertEqual('hope', t_stack.peek_top_obj())
def testPushPopPreserveLifoOrdering(self):
t_stack = traceable_stack.TraceableStack()
t_stack.push_obj(0)
t_stack.push_obj(1)
t_stack.push_obj(2)
t_stack.push_obj(3)
obj_3 = t_stack.pop_obj()
obj_2 = t_stack.pop_obj()
obj_1 = t_stack.pop_obj()
obj_0 = t_stack.pop_obj()
self.assertEqual(3, obj_3)
self.assertEqual(2, obj_2)
self.assertEqual(1, obj_1)
self.assertEqual(0, obj_0)
def testPushObjSetsFilenameAndLineInfoForCaller(self):
t_stack = traceable_stack.TraceableStack()
# We expect that the line number recorded for the 1-object will come from
# the call to t_stack.push_obj(1). Do not separate the next two lines!
placeholder_1 = lambda x: x
t_stack.push_obj(1)
# We expect that the line number recorded for the 2-object will come from
# the call to call_push_obj() and _not_ the call to t_stack.push_obj().
def call_push_obj(obj):
t_stack.push_obj(obj, offset=1)
# Do not separate the next two lines!
placeholder_2 = lambda x: x
call_push_obj(2)
expected_lineno_1 = inspect.getsourcelines(placeholder_1)[1] + 1
expected_lineno_2 = inspect.getsourcelines(placeholder_2)[1] + 1
t_obj_2, t_obj_1 = t_stack.peek_traceable_objs()
self.assertEqual(expected_lineno_2, t_obj_2.lineno)
self.assertEqual(expected_lineno_1, t_obj_1.lineno)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/traceable_stack_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to represent a device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python import tf2
from tensorflow.python.framework import device_spec
if tf2.enabled():
DeviceSpec = device_spec.DeviceSpecV2
else:
DeviceSpec = device_spec.DeviceSpecV1
def check_valid(spec):
"""Check that a device spec is valid.
Args:
spec: a string.
Raises:
An exception if the spec is invalid.
"""
# Construct a DeviceSpec. It will assert a failure if spec is invalid.
DeviceSpec.from_string(spec)
def is_device_spec(obj):
"""Abstract away the fact that DeviceSpecV2 is the base class."""
return isinstance(obj, device_spec.DeviceSpecV2)
def canonical_name(device):
"""Returns a canonical name for the given `DeviceSpec` or device name."""
if device is None:
return ""
if is_device_spec(device):
return device.to_string()
else:
device = DeviceSpec.from_string(device)
return device.to_string()
# Performance caches
_cached_mergers = {}
_cache_lock = threading.RLock()
_string_merge_cache = {}
def merge_device(spec):
"""Returns a device function that merges devices specifications.
This can be used to merge partial specifications of devices. The
innermost setting for a device field takes precedence. For example:
with tf.device(merge_device("/device:GPU:0"))
# Nodes created here have device "/device:GPU:0"
with tf.device(merge_device("/job:worker")):
# Nodes created here have device "/job:worker/device:GPU:0"
with tf.device(merge_device("/device:CPU:0")):
# Nodes created here have device "/job:worker/device:CPU:0"
with tf.device(merge_device("/job:ps")):
# Nodes created here have device "/job:ps/device:CPU:0"
Args:
spec: A `DeviceSpec` or a device spec string (partially) describing the
device that should be used for all nodes created in the scope of
the returned device function's with block.
Returns:
A MergeDevice object with the above-described behavior.
Raises:
ValueError: if the spec was not valid.
"""
if isinstance(spec, MergeDevice):
return spec
with _cache_lock:
merger = _cached_mergers.get(spec)
if merger:
return merger
merger = MergeDevice(spec)
_cached_mergers[spec] = merger
return merger
class MergeDevice(object):
"""Wraps a device specification (DeviceSpec or str) with merge functionality.
When called, this class will merge a node_def with its own spec. It also
exposes a `shortcut_string_merge` method which can significantly improve
performance of device placement.
"""
def __init__(self, spec):
if isinstance(spec, device_spec.DeviceSpecV2):
self._spec = spec
elif isinstance(spec, device_spec.DeviceSpecV1):
# Capture a snapshot of spec.
self._spec = spec.__class__.from_string(spec.to_string())
else:
self._spec = DeviceSpec.from_string(spec)
def __call__(self, node_def):
# In general a user may create a device function which takes into account
# arbitrary properties of an op. (For instance dynamically placing ops based
# on type.) So even though the standard DeviceSpec route only uses the
# device attribute, we take an entire node_def to maintain a consistent
# signature with general device functions.
current_device = DeviceSpec.from_string(node_def.device or "")
return self._spec.make_merged_spec(current_device)
def shortcut_string_merge(self, node_def):
"""Merge a node def without materializing a full DeviceSpec object.
Often a device merge is invoked in order to generate a string which can be
passed into the c api. In such a case, we can cache the
node_def.device -> merge_result_string
map, and in most cases avoid:
- Materializing a copy of self._spec (In the case of DeviceSpecV1)
- Materializing a DeviceSpec for node_def.device
- A DeviceSpec.merge_from invocation
In practice the cache hit rate for this function is very high, because the
number of invocations when iterating through the device stack is much
larger than the number of devices.
Args:
node_def: An Operation (or Operation-like) to merge device constraints
with self._spec
Returns:
A string containing the merged device specification.
"""
device = node_def.device or ""
merge_key = (self._spec, device)
result = _string_merge_cache.get(merge_key)
if result is None:
# This update is not atomic, however because the merge is stateless
# we don't need to lock when updating the cache.
result = self.__call__(node_def).to_string()
_string_merge_cache[merge_key] = result
return result
def __repr__(self):
return "{} (spec: {})".format(
super(MergeDevice, self).__repr__(), self._spec.to_string())
@property
def is_null_merge(self):
"""Indicate whether the wrapped spec is empty.
In the degenerate case where self._spec is an empty specification, a caller
may wish to skip a merge step entirely. (However this class does not have
enough information to make that determination.)
Returns:
A boolean indicating whether a device merge will be trivial.
"""
return not bool(self._spec.to_string())
|
tensorflow-master
|
tensorflow/python/framework/device.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registry mechanism for "registering" classes/functions for general use.
This is typically used with a decorator that calls Register for adding
a class or function to a registry.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import tf_stack
# Registry mechanism below is based on mapreduce.python.mrpython.Register.
_LOCATION_TAG = "location"
_TYPE_TAG = "type"
class Registry(object):
"""Provides a registry for saving objects."""
def __init__(self, name):
"""Creates a new registry."""
self._name = name
self._registry = {}
def register(self, candidate, name=None):
"""Registers a Python object "candidate" for the given "name".
Args:
candidate: The candidate object to add to the registry.
name: An optional string specifying the registry key for the candidate.
If None, candidate.__name__ will be used.
Raises:
KeyError: If same name is used twice.
"""
if not name:
name = candidate.__name__
if name in self._registry:
(filename, line_number, function_name, _, _) = (
self._registry[name][_LOCATION_TAG])
raise KeyError("Registering two %s with name '%s'! "
"(Previous registration was in %s %s:%d)" %
(self._name, name, function_name, filename, line_number))
logging.vlog(1, "Registering %s (%s) in %s.", name, candidate, self._name)
# stack trace is [this_function, Register(), user_function,...]
# so the user function is #2.
stack = tf_stack.extract_stack(limit=3)
stack_index = min(2, len(stack)-1)
if stack_index >= 0:
location_tag = stack[stack_index]
else:
location_tag = ("UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN")
self._registry[name] = {_TYPE_TAG: candidate, _LOCATION_TAG: location_tag}
def list(self):
"""Lists registered items.
Returns:
A list of names of registered objects.
"""
return self._registry.keys()
def lookup(self, name):
"""Looks up "name".
Args:
name: a string specifying the registry key for the candidate.
Returns:
Registered object if found
Raises:
LookupError: if "name" has not been registered.
"""
name = compat.as_str(name)
if name in self._registry:
return self._registry[name][_TYPE_TAG]
else:
raise LookupError(
"%s registry has no entry for: %s" % (self._name, name))
|
tensorflow-master
|
tensorflow/python/framework/registry.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.function_def_to_graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function_def_to_graph
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.framework import test_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class FunctionDefToGraphTest(test.TestCase):
def _build_function_def(self):
with ops.Graph().as_default() as g:
# Inputs
x = array_ops.placeholder(dtypes.float32, name="x")
y = array_ops.placeholder(dtypes.float32, name="y")
# Outputs
sum_squares = math_ops.add_n(
[math_ops.pow(x, 2), math_ops.pow(y, 2)], name="sum_squares")
sum_cubes = math_ops.add_n(
[math_ops.pow(x, 3), math_ops.pow(y, 3)], name="sum_cubes")
fdef = graph_to_function_def.graph_to_function_def(
g,
g.get_operations(),
[x, y], # Inputs
[sum_squares, sum_cubes]) # Outputs.
fdef.signature.name = "_whats_in_a_name"
return fdef
@test_util.run_deprecated_v1
def testInputsAndOutputs(self):
fdef = self._build_function_def()
g = function_def_to_graph.function_def_to_graph(fdef)
self.assertEqual(g.name, "_whats_in_a_name")
with self.session(graph=g) as sess:
inputs = sess.run(g.inputs, feed_dict={"x:0": 2, "y:0": 3})
self.assertSequenceEqual(inputs, [2.0, 3.0])
outputs = sess.run(g.outputs, feed_dict={"x:0": 2, "y:0": 3})
self.assertSequenceEqual(outputs, [13.0, 35.0])
def testShapes(self):
fdef = self._build_function_def()
g = function_def_to_graph.function_def_to_graph(fdef)
self.assertIsNone(g.inputs[0].shape.dims) # Unknown dims.
self.assertIsNone(g.inputs[1].shape.dims) # Unknown dims.
self.assertIsNone(g.outputs[0].shape.dims) # Unknown dims.
self.assertIsNone(g.outputs[1].shape.dims) # Unknown dims.
g = function_def_to_graph.function_def_to_graph(
fdef, input_shapes=[tensor_shape.vector(5),
tensor_shape.vector(5)])
self.assertSequenceEqual(g.inputs[0].shape.dims, [5])
self.assertSequenceEqual(g.inputs[1].shape.dims, [5])
self.assertSequenceEqual(g.outputs[0].shape.dims, [5])
self.assertSequenceEqual(g.outputs[1].shape.dims, [5])
g = function_def_to_graph.function_def_to_graph(
fdef, input_shapes=[None, tensor_shape.matrix(5, 7)])
self.assertIsNone(g.inputs[0].shape.dims)
self.assertSequenceEqual(g.inputs[1].shape.dims, [5, 7])
self.assertSequenceEqual(g.outputs[0].shape.dims, [5, 7])
self.assertSequenceEqual(g.outputs[1].shape.dims, [5, 7])
# Should raise a ValueError if the length of input_shapes does not match
# the number of input args in FunctionDef.signature.input_arg.
with self.assertRaises(ValueError):
g = function_def_to_graph.function_def_to_graph(
fdef, input_shapes=[tensor_shape.matrix(5, 7)])
class FunctionDefToGraphDefTest(test.TestCase):
def _build_function_def(self):
with ops.Graph().as_default() as g:
# Inputs: x y z
# |\ | /
# | \ | /
# | foo_1 list_output
# | / \ / \
# | d_1 e_1 a:1 a:0
# | \ | / |
# | \ | / |
# | foo_2 |
# | / \ |
# Outputs: x d_2 e_2 a:0
x = array_ops.placeholder(dtypes.float32, name="x")
y = array_ops.placeholder(dtypes.int32, name="y")
z = array_ops.placeholder(dtypes.int32, name="z")
d_1, e_1 = test_ops._op_def_lib.apply_op(
"Foo1", name="foo_1", a=x, b=y, c=z)
list_output0, list_output1 = test_ops.list_output(
T=[dtypes.int32, dtypes.int32], name="list_output")
d_2, e_2 = test_ops.foo1(a=d_1, b=e_1, c=list_output1, name="foo_2")
fdef = graph_to_function_def.graph_to_function_def(
g,
g.get_operations(),
[x, y, z], # Inputs
[x, d_2, e_2, list_output0]) # Outputs.
# Assert that the FunctionDef was correctly built.
assert len(fdef.node_def) == 3 # 2 Foo1 nodes and 1 ListOutput node.
assert fdef.node_def[0].op == "Foo1"
assert fdef.node_def[0].input == ["x", "y", "z"]
assert fdef.node_def[1].op == "ListOutput"
assert not fdef.node_def[1].input
assert fdef.node_def[2].op == "Foo1"
assert fdef.node_def[2].input == [
"foo_1:d:0", "foo_1:e:0", "list_output:a:1"
]
return fdef
def testTensorNames(self):
fdef = self._build_function_def()
g, tensor_name_map = function_def_to_graph.function_def_to_graph_def(fdef)
# Verify that inputs of body nodes are correctly renamed.
# foo_1
self.assertSequenceEqual(g.node[3].input, ["x:0", "y:0", "z:0"])
# foo_2
self.assertSequenceEqual(g.node[5].input,
["foo_1:0", "foo_1:1", "list_output:1"])
# Verify that the `tensor_name_map` has the correct mapping.
self.assertDictEqual(
tensor_name_map, {
"x": "x:0",
"^x": "^x",
"y": "y:0",
"^y": "^y",
"z": "z:0",
"^z": "^z",
"foo_1:d:0": "foo_1:0",
"foo_1:e:0": "foo_1:1",
"^foo_1": "^foo_1",
"list_output:a:0": "list_output:0",
"list_output:a:1": "list_output:1",
"^list_output": "^list_output",
"foo_2:d:0": "foo_2:0",
"foo_2:e:0": "foo_2:1",
"^foo_2": "^foo_2",
})
def testShapes(self):
fdef = self._build_function_def()
g, _ = function_def_to_graph.function_def_to_graph_def(
fdef,
input_shapes=[tensor_shape.scalar(),
tensor_shape.vector(5), None])
self.assertEqual("shape" in g.node[0].attr, True)
self.assertSequenceEqual(
tensor_shape.TensorShape(g.node[0].attr["shape"].shape).as_list(), [])
self.assertEqual(g.node[0].attr["shape"].shape.unknown_rank, False)
self.assertEqual("shape" in g.node[1].attr, True)
self.assertSequenceEqual(
tensor_shape.TensorShape(g.node[1].attr["shape"].shape).as_list(), [5])
self.assertEqual(g.node[0].attr["shape"].shape.unknown_rank, False)
self.assertFalse("shape" in g.node[2].attr)
@test_util.run_deprecated_v1
def testFunctionCallsFromFunction(self):
x = constant_op.constant(5.0)
y = constant_op.constant(10.0)
@function.defun
def fn():
@function.defun
def inner_fn():
return x + y
return inner_fn()
@function.defun
def fn2():
return 2 * fn()
fn2_defun = fn2.get_concrete_function()
# Call `fn2` to make sure `fn` is correctly instantiated so
# `function_def_to_graph` can find it.
fn2_defun()
fdef = fn2_defun._inference_function.definition
func_graph = function_def_to_graph.function_def_to_graph(fdef)
with func_graph.as_default():
x_ph, y_ph = func_graph.inputs
with self.session(graph=func_graph) as sess:
self.assertEqual(
sess.run(func_graph.outputs[0], feed_dict={
x_ph: 5.0,
y_ph: 10.0
}), 30.0)
def testControlDependencies(self):
v = variables.Variable(1)
@function.defun
def fn(inp):
assign = v.assign(3, name="assign", read_value=False)
x = constant_op.constant(2.0, name="x")
# TODO(b/79881896): Test external control dependency once that's
# supported.
with ops.control_dependencies([x, inp, assign]):
constant_op.constant(3.0, name="y")
return 4.0
inp = constant_op.constant(1.0)
fdef = fn.get_concrete_function(inp).function_def
func_graph = function_def_to_graph.function_def_to_graph(fdef)
op = func_graph.get_operation_by_name("y")
self.assertEqual(len(op.control_inputs), 3)
self.assertEqual(op.control_inputs[0].name, "assign")
self.assertEqual(op.control_inputs[1].name, "inp")
self.assertEqual(op.control_inputs[2].name, "x")
def testAttributesForArgDef(self):
@function.defun
def fn(x):
return x
inp = constant_op.constant(1.0)
fdef = fn.get_concrete_function(inp).function_def
fdef.arg_attr[0].attr["_test_attr"].s = "value".encode("ascii")
graph_def = function_def_to_graph.function_def_to_graph_def(fdef)
placeholders = [
ndef for ndef in graph_def[0].node if ndef.op == "Placeholder"
]
self.assertEqual(1, len(placeholders))
self.assertEqual(placeholders[0].attr["_test_attr"].s,
"value".encode("ascii"))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/framework/function_def_to_graph_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import traceable_stack
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import tf_stack
def _make_frame_with_filename(op, idx, filename):
"""Return a copy of an existing stack frame with a new filename."""
stack_frame = list(op._traceback[idx])
stack_frame[tf_stack.TB_FILENAME] = filename
return tuple(stack_frame)
def _modify_op_stack_with_filenames(op, num_user_frames, user_filename,
num_inner_tf_frames):
"""Replace op._traceback with a new traceback using special filenames."""
tf_filename = "%d" + error_interpolation._BAD_FILE_SUBSTRINGS[0]
user_filename = os.path.join("%d", "my_favorite_file.py")
num_requested_frames = num_user_frames + num_inner_tf_frames
num_actual_frames = len(op._traceback)
num_outer_frames = num_actual_frames - num_requested_frames
assert num_requested_frames <= num_actual_frames, "Too few real frames."
# The op's traceback has outermost frame at index 0.
stack = []
for idx in range(0, num_outer_frames):
stack.append(op._traceback[idx])
for idx in range(len(stack), len(stack) + num_user_frames):
stack.append(_make_frame_with_filename(op, idx, user_filename % idx))
for idx in range(len(stack), len(stack) + num_inner_tf_frames):
stack.append(_make_frame_with_filename(op, idx, tf_filename % idx))
op._traceback = stack
class ComputeDeviceSummaryFromOpTest(test.TestCase):
def testCorrectFormatWithActiveDeviceAssignments(self):
assignments = []
assignments.append(
traceable_stack.TraceableObject(
"/cpu:0", filename="hope.py", lineno=24))
assignments.append(
traceable_stack.TraceableObject(
"/gpu:2", filename="please.py", lineno=42))
summary = error_interpolation._compute_device_summary_from_list(
"nodename", assignments, prefix=" ")
self.assertIn("nodename", summary)
self.assertIn("tf.device(/cpu:0)", summary)
self.assertIn("<hope.py:24>", summary)
self.assertIn("tf.device(/gpu:2)", summary)
self.assertIn("<please.py:42>", summary)
def testCorrectFormatWhenNoColocationsWereActive(self):
device_assignment_list = []
summary = error_interpolation._compute_device_summary_from_list(
"nodename", device_assignment_list, prefix=" ")
self.assertIn("nodename", summary)
self.assertIn("No device assignments", summary)
class ComputeColocationSummaryFromOpTest(test.TestCase):
def testCorrectFormatWithActiveColocations(self):
t_obj_1 = traceable_stack.TraceableObject(
None, filename="test_1.py", lineno=27)
t_obj_2 = traceable_stack.TraceableObject(
None, filename="test_2.py", lineno=38)
colocation_dict = {
"test_node_1": t_obj_1,
"test_node_2": t_obj_2,
}
summary = error_interpolation._compute_colocation_summary_from_dict(
"node_name", colocation_dict, prefix=" ")
self.assertIn("node_name", summary)
self.assertIn("colocate_with(test_node_1)", summary)
self.assertIn("<test_1.py:27>", summary)
self.assertIn("colocate_with(test_node_2)", summary)
self.assertIn("<test_2.py:38>", summary)
def testCorrectFormatWhenNoColocationsWereActive(self):
colocation_dict = {}
summary = error_interpolation._compute_colocation_summary_from_dict(
"node_name", colocation_dict, prefix=" ")
self.assertIn("node_name", summary)
self.assertIn("No node-device colocations", summary)
@test_util.run_deprecated_v1
class InterpolateFilenamesAndLineNumbersTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
# Add nodes to the graph for retrieval by name later.
constant_op.constant(1, name="One")
constant_op.constant(2, name="Two")
three = constant_op.constant(3, name="Three")
self.graph = three.graph
# Change the list of bad file substrings so that constant_op.py is chosen
# as the defining stack frame for constant_op.constant ops.
self.old_bad_strings = error_interpolation._BAD_FILE_SUBSTRINGS
error_interpolation._BAD_FILE_SUBSTRINGS = [
"%sops.py" % os.sep,
"%sutil" % os.sep,
]
def tearDown(self):
error_interpolation._BAD_FILE_SUBSTRINGS = self.old_bad_strings
def testFindIndexOfDefiningFrameForOp(self):
local_op = constant_op.constant(42).op
user_filename = "hope.py"
_modify_op_stack_with_filenames(
local_op,
num_user_frames=3,
user_filename=user_filename,
num_inner_tf_frames=5)
idx = error_interpolation._find_index_of_defining_frame_for_op(local_op)
# Expected frame is 6th from the end because there are 5 inner frames witih
# TF filenames.
expected_frame = len(local_op._traceback) - 6
self.assertEqual(expected_frame, idx)
def testFindIndexOfDefiningFrameForOpReturnsZeroOnError(self):
local_op = constant_op.constant(43).op
# Truncate stack to known length.
local_op._traceback = local_op._traceback[:7]
# Ensure all frames look like TF frames.
_modify_op_stack_with_filenames(
local_op,
num_user_frames=0,
user_filename="user_file.py",
num_inner_tf_frames=7)
idx = error_interpolation._find_index_of_defining_frame_for_op(local_op)
self.assertEqual(0, idx)
def testNothingToDo(self):
normal_string = "This is just a normal string"
interpolated_string = error_interpolation.interpolate(
normal_string, self.graph)
self.assertEqual(interpolated_string, normal_string)
def testOneTagWithAFakeNameResultsInPlaceholders(self):
one_tag_string = "{{node MinusOne}}"
interpolated_string = error_interpolation.interpolate(
one_tag_string, self.graph)
self.assertEqual(one_tag_string, interpolated_string)
def testTwoTagsNoSeps(self):
two_tags_no_seps = "{{node One}}{{node Three}}"
interpolated_string = error_interpolation.interpolate(
two_tags_no_seps, self.graph)
self.assertRegexpMatches(interpolated_string,
"constant_op.py:[0-9]+.*constant_op.py:[0-9]+")
def testTwoTagsWithSeps(self):
two_tags_with_seps = ";;;{{node Two}},,,{{node Three}};;;"
interpolated_string = error_interpolation.interpolate(
two_tags_with_seps, self.graph)
expected_regex = (
r"^;;;.*constant_op.py:[0-9]+\) ,,,.*constant_op.py:[0-9]+\) ;;;$")
self.assertRegexpMatches(interpolated_string, expected_regex)
def testNewLine(self):
newline = "\n\n{{node One}}"
interpolated_string = error_interpolation.interpolate(newline, self.graph)
self.assertRegexpMatches(interpolated_string, "constant_op.py:[0-9]+.*")
@test_util.run_deprecated_v1
class InputNodesTest(test.TestCase):
def setUp(self):
# Add nodes to the graph for retrieval by name later.
one = constant_op.constant(1, name="One")
two = constant_op.constant(2, name="Two")
three = math_ops.add(one, two, name="Three")
self.graph = three.graph
# Change the list of bad file substrings so that constant_op.py is chosen
# as the defining stack frame for constant_op.constant ops.
self.old_bad_strings = error_interpolation._BAD_FILE_SUBSTRINGS
error_interpolation._BAD_FILE_SUBSTRINGS = [
"%sops.py" % os.sep,
"%sutil" % os.sep,
]
def tearDown(self):
error_interpolation._BAD_FILE_SUBSTRINGS = self.old_bad_strings
def testNoInputs(self):
two_tags_with_seps = ";;;{{node One}},,,{{node Two}};;;"
interpolated_string = error_interpolation.interpolate(
two_tags_with_seps, self.graph)
expected_regex = (
r"^;;;.*constant_op.py:[0-9]+\) ,,,.*constant_op.py:[0-9]+\) ;;;$")
self.assertRegexpMatches(interpolated_string, expected_regex)
def testBasicInputs(self):
tag = ";;;{{node Three}};;;"
interpolated_string = error_interpolation.interpolate(tag, self.graph)
expected_regex = re.compile(
r"^;;;.*op_def_library.py:[0-9]+\) ;;;.*Input.*constant_op.py:[0-9]+\)",
re.DOTALL)
self.assertRegexpMatches(interpolated_string, expected_regex)
@test_util.run_deprecated_v1
class InterpolateDeviceSummaryTest(test.TestCase):
def _fancy_device_function(self, unused_op):
return "/cpu:*"
def setUp(self):
ops.reset_default_graph()
self.zero = constant_op.constant([0.0], name="zero")
with ops.device("/cpu"):
self.one = constant_op.constant([1.0], name="one")
with ops.device("/cpu:0"):
self.two = constant_op.constant([2.0], name="two")
with ops.device(self._fancy_device_function):
self.three = constant_op.constant(3.0, name="three")
self.graph = self.three.graph
def testNodeZeroHasNoDeviceSummaryInfo(self):
message = "{{colocation_node zero}}"
result = error_interpolation.interpolate(message, self.graph)
self.assertIn("No device assignments were active", result)
def testNodeOneHasExactlyOneInterpolatedDevice(self):
message = "{{colocation_node one}}"
result = error_interpolation.interpolate(message, self.graph)
self.assertEqual(2, result.count("tf.device(/cpu)"))
def testNodeTwoHasTwoInterpolatedDevice(self):
message = "{{colocation_node two}}"
result = error_interpolation.interpolate(message, self.graph)
self.assertEqual(2, result.count("tf.device(/cpu)"))
self.assertEqual(2, result.count("tf.device(/cpu:0)"))
def testNodeThreeHasFancyFunctionDisplayNameForInterpolatedDevice(self):
message = "{{colocation_node three}}"
result = error_interpolation.interpolate(message, self.graph)
num_devices = result.count("tf.device")
self.assertEqual(2, num_devices)
name_re = r"_fancy_device_function<.*error_interpolation_test.py, [0-9]+>"
expected_re = r"with tf.device\(.*%s\)" % name_re
self.assertRegexpMatches(result, expected_re)
@test_util.run_deprecated_v1
class InterpolateColocationSummaryTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
# Add nodes to the graph for retrieval by name later.
node_one = constant_op.constant(1, name="One")
node_two = constant_op.constant(2, name="Two")
# node_three has one colocation group, obviously.
with ops.colocate_with(node_one):
node_three = constant_op.constant(3, name="Three_with_one")
# node_four has one colocation group even though three is (transitively)
# colocated with one.
with ops.colocate_with(node_three):
constant_op.constant(4, name="Four_with_three")
# node_five has two colocation groups because one and two are not colocated.
with ops.colocate_with(node_two):
with ops.colocate_with(node_one):
constant_op.constant(5, name="Five_with_one_with_two")
self.graph = node_three.graph
def testNodeThreeHasColocationInterpolation(self):
message = "{{colocation_node Three_with_one}}"
result = error_interpolation.interpolate(message, self.graph)
self.assertIn("colocate_with(One)", result)
def testNodeFourHasColocationInterpolationForNodeThreeOnly(self):
message = "{{colocation_node Four_with_three}}"
result = error_interpolation.interpolate(message, self.graph)
self.assertIn("colocate_with(Three_with_one)", result)
self.assertNotIn(
"One", result,
"Node One should not appear in Four_with_three's summary:\n%s" % result)
def testNodeFiveHasColocationInterpolationForNodeOneAndTwo(self):
message = "{{colocation_node Five_with_one_with_two}}"
result = error_interpolation.interpolate(message, self.graph)
self.assertIn("colocate_with(One)", result)
self.assertIn("colocate_with(Two)", result)
def testColocationInterpolationForNodeLackingColocation(self):
message = "{{colocation_node One}}"
result = error_interpolation.interpolate(message, self.graph)
self.assertIn("No node-device colocations", result)
self.assertNotIn("Two", result)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/framework/error_interpolation_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function for loading TensorFlow plugins."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import errno
import hashlib
import imp
import os
import platform
import sys
import threading # pylint: disable=unused-import
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.lib.core import error_codes_pb2 # pylint: disable=unused-import
from tensorflow.python import pywrap_tensorflow as py_tf
from tensorflow.python.lib.io import file_io
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export('load_op_library')
def load_op_library(library_filename):
"""Loads a TensorFlow plugin, containing custom ops and kernels.
Pass "library_filename" to a platform-specific mechanism for dynamically
loading a library. The rules for determining the exact location of the
library are platform-specific and are not documented here. When the
library is loaded, ops and kernels registered in the library via the
`REGISTER_*` macros are made available in the TensorFlow process. Note
that ops with the same name as an existing op are rejected and not
registered with the process.
Args:
library_filename: Path to the plugin.
Relative or absolute filesystem path to a dynamic library file.
Returns:
A python module containing the Python wrappers for Ops defined in
the plugin.
Raises:
RuntimeError: when unable to load the library or get the python wrappers.
"""
lib_handle = py_tf.TF_LoadLibrary(library_filename)
op_list_str = py_tf.TF_GetOpList(lib_handle)
op_list = op_def_pb2.OpList()
op_list.ParseFromString(compat.as_bytes(op_list_str))
wrappers = py_tf.GetPythonWrappers(op_list_str)
# Delete the library handle to release any memory held in C
# that are no longer needed.
py_tf.TF_DeleteLibraryHandle(lib_handle)
# Get a unique name for the module.
module_name = hashlib.md5(wrappers).hexdigest()
if module_name in sys.modules:
return sys.modules[module_name]
module = imp.new_module(module_name)
# pylint: disable=exec-used
exec(wrappers, module.__dict__)
# Stash away the library handle for making calls into the dynamic library.
module.LIB_HANDLE = lib_handle
# OpDefs of the list of ops defined in the library.
module.OP_LIST = op_list
# Allow this to be recognized by AutoGraph.
setattr(module, '_IS_TENSORFLOW_PLUGIN', True)
sys.modules[module_name] = module
return module
@deprecation.deprecated(date=None,
instructions='Use `tf.load_library` instead.')
@tf_export(v1=['load_file_system_library'])
def load_file_system_library(library_filename):
"""Loads a TensorFlow plugin, containing file system implementation.
Pass `library_filename` to a platform-specific mechanism for dynamically
loading a library. The rules for determining the exact location of the
library are platform-specific and are not documented here.
Args:
library_filename: Path to the plugin.
Relative or absolute filesystem path to a dynamic library file.
Returns:
None.
Raises:
RuntimeError: when unable to load the library.
"""
py_tf.TF_LoadLibrary(library_filename)
def _is_shared_object(filename):
"""Check the file to see if it is a shared object, only using extension."""
if platform.system() == 'Linux':
if filename.endswith('.so'):
return True
else:
index = filename.rfind('.so.')
if index == -1:
return False
else:
# A shared object with the API version in filename
return filename[index + 4].isdecimal()
elif platform.system() == 'Darwin':
return filename.endswith('.dylib')
elif platform.system() == 'Windows':
return filename.endswith('.dll')
else:
return False
@tf_export('load_library')
def load_library(library_location):
"""Loads a TensorFlow plugin.
"library_location" can be a path to a specific shared object, or a folder.
If it is a folder, all shared objects that are named "libtfkernel*" will be
loaded. When the library is loaded, kernels registered in the library via the
`REGISTER_*` macros are made available in the TensorFlow process.
Args:
library_location: Path to the plugin or the folder of plugins.
Relative or absolute filesystem path to a dynamic library file or folder.
Returns:
None
Raises:
OSError: When the file to be loaded is not found.
RuntimeError: when unable to load the library.
"""
if file_io.file_exists(library_location):
if file_io.is_directory(library_location):
directory_contents = file_io.list_directory(library_location)
kernel_libraries = [
os.path.join(library_location, f) for f in directory_contents
if _is_shared_object(f)]
else:
kernel_libraries = [library_location]
for lib in kernel_libraries:
py_tf.TF_LoadLibrary(lib)
else:
raise OSError(
errno.ENOENT,
'The file or folder to load kernel libraries from does not exist.',
library_location)
|
tensorflow-master
|
tensorflow/python/framework/load_library.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for using the TensorFlow C API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import api_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.util import compat
from tensorflow.python.util import tf_contextlib
class ScopedTFStatus(object):
"""Wrapper around TF_Status that handles deletion."""
def __init__(self):
self.status = c_api.TF_NewStatus()
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteStatus is not None:
c_api.TF_DeleteStatus(self.status)
class ScopedTFGraph(object):
"""Wrapper around TF_Graph that handles deletion."""
def __init__(self):
self.graph = c_api.TF_NewGraph()
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteGraph is not None:
c_api.TF_DeleteGraph(self.graph)
class ScopedTFImportGraphDefOptions(object):
"""Wrapper around TF_ImportGraphDefOptions that handles deletion."""
def __init__(self):
self.options = c_api.TF_NewImportGraphDefOptions()
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteImportGraphDefOptions is not None:
c_api.TF_DeleteImportGraphDefOptions(self.options)
class ScopedTFImportGraphDefResults(object):
"""Wrapper around TF_ImportGraphDefOptions that handles deletion."""
def __init__(self, results):
self.results = results
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteImportGraphDefResults is not None:
c_api.TF_DeleteImportGraphDefResults(self.results)
class ScopedTFFunction(object):
"""Wrapper around TF_Function that handles deletion."""
def __init__(self, func):
self.func = func
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteFunction is not None:
if self.func is not None:
c_api.TF_DeleteFunction(self.func)
self.func = None
class ApiDefMap(object):
"""Wrapper around Tf_ApiDefMap that handles querying and deletion.
The OpDef protos are also stored in this class so that they could
be queried by op name.
"""
def __init__(self):
op_def_proto = op_def_pb2.OpList()
buf = c_api.TF_GetAllOpList()
try:
op_def_proto.ParseFromString(c_api.TF_GetBuffer(buf))
self._api_def_map = c_api.TF_NewApiDefMap(buf)
finally:
c_api.TF_DeleteBuffer(buf)
self._op_per_name = {}
for op in op_def_proto.op:
self._op_per_name[op.name] = op
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteApiDefMap is not None:
c_api.TF_DeleteApiDefMap(self._api_def_map)
def put_api_def(self, text):
c_api.TF_ApiDefMapPut(self._api_def_map, text, len(text))
def get_api_def(self, op_name):
api_def_proto = api_def_pb2.ApiDef()
buf = c_api.TF_ApiDefMapGet(self._api_def_map, op_name, len(op_name))
try:
api_def_proto.ParseFromString(c_api.TF_GetBuffer(buf))
finally:
c_api.TF_DeleteBuffer(buf)
return api_def_proto
def get_op_def(self, op_name):
if op_name in self._op_per_name:
return self._op_per_name[op_name]
raise ValueError("No entry found for " + op_name + ".")
def op_names(self):
return self._op_per_name.keys()
@tf_contextlib.contextmanager
def tf_buffer(data=None):
"""Context manager that creates and deletes TF_Buffer.
Example usage:
with tf_buffer() as buf:
# get serialized graph def into buf
...
proto_data = c_api.TF_GetBuffer(buf)
graph_def.ParseFromString(compat.as_bytes(proto_data))
# buf has been deleted
with tf_buffer(some_string) as buf:
c_api.TF_SomeFunction(buf)
# buf has been deleted
Args:
data: An optional `bytes`, `str`, or `unicode` object. If not None, the
yielded buffer will contain this data.
Yields:
Created TF_Buffer
"""
if data:
buf = c_api.TF_NewBufferFromString(compat.as_bytes(data))
else:
buf = c_api.TF_NewBuffer()
try:
yield buf
finally:
c_api.TF_DeleteBuffer(buf)
def tf_output(c_op, index):
"""Returns a wrapped TF_Output with specified operation and index.
Args:
c_op: wrapped TF_Operation
index: integer
Returns:
Wrapped TF_Output
"""
ret = c_api.TF_Output()
ret.oper = c_op
ret.index = index
return ret
def tf_operations(graph):
"""Generator that yields every TF_Operation in `graph`.
Args:
graph: Graph
Yields:
wrapped TF_Operation
"""
# pylint: disable=protected-access
pos = 0
c_op, pos = c_api.TF_GraphNextOperation(graph._c_graph, pos)
while c_op is not None:
yield c_op
c_op, pos = c_api.TF_GraphNextOperation(graph._c_graph, pos)
# pylint: enable=protected-access
def new_tf_operations(graph):
"""Generator that yields newly-added TF_Operations in `graph`.
Specifically, yields TF_Operations that don't have associated Operations in
`graph`. This is useful for processing nodes added by the C API.
Args:
graph: Graph
Yields:
wrapped TF_Operation
"""
# TODO(b/69679162): do this more efficiently
for c_op in tf_operations(graph):
try:
graph._get_operation_by_tf_operation(c_op) # pylint: disable=protected-access
except KeyError:
yield c_op
|
tensorflow-master
|
tensorflow/python/framework/c_api_util.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.composite_tensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import sys
import weakref
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.platform import googletest
from tensorflow.python.util import nest
class CTSpec(type_spec.TypeSpec):
"""A generic CompositeTensor TypeSpec, used for constructing tests."""
def __init__(self, component_specs, metadata=None):
self.component_specs = component_specs
self.metadata = metadata
value_type = property(lambda self: CT)
_component_specs = property(lambda self: self.component_specs)
def _serialize(self):
return (self.component_specs, self.metadata)
def _to_components(self, value):
return value.components
def _from_components(self, tensor_list):
return CT(tensor_list, self.metadata)
class CT(composite_tensor.CompositeTensor):
"""A generic CompositeTensor, used for constructing tests."""
_type_spec_class = CTSpec
def __init__(self, components, metadata=None):
if isinstance(components, list):
components = tuple(components)
self.components = components
self.metadata = metadata
@property
def _type_spec(self):
component_specs = nest.map_structure(type_spec.type_spec_from_value,
self.components)
return self._type_spec_class(component_specs, self.metadata)
def __repr__(self):
return '%s(%r, %r)' % (type(self).__name__, self.components, self.metadata)
def __eq__(self, other):
return (type(self) is type(other) and
self.components == other.components and
self.metadata == other.metadata)
# Another test CompositeTensor class. `tf.nest` should treat different CT
# classes as different structure types (e.g. for assert_same_structure).
class CTSpec2(CTSpec):
pass
class CT2(CT):
_type_spec_class = CTSpec2
@test_util.run_all_in_graph_and_eager_modes
class CompositeTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters([
{'structure': CT(0),
'expected': [0],
'paths': [('CT',)]},
{'structure': CT('a'),
'expected': ['a'],
'paths': [('CT',)]},
{'structure': CT(['a', 'b', 'c']),
'expected': ['a', 'b', 'c'],
'paths': [('CT', 0), ('CT', 1), ('CT', 2)]},
{'structure': CT({'x': 'a', 'y': 'b', 'z': 'c'}),
'expected': ['a', 'b', 'c'],
'paths': [('CT', 'x'), ('CT', 'y'), ('CT', 'z')]},
{'structure': [{'k1': CT('a')}, CT(['b', {'x': CT({'y': 'c'})}])],
'expected': ['a', 'b', 'c'],
'paths': [(0, 'k1', 'CT'), (1, 'CT', 0), (1, 'CT', 1, 'x', 'CT', 'y')]},
{'structure': CT(0),
'expand_composites': False,
'expected': [CT(0)],
'paths': [()]},
{'structure': [{'k1': CT('a')}, CT(['b', {'x': CT({'y': 'c'})}])],
'expand_composites': False,
'expected': [CT('a'), CT(['b', {'x': CT({'y': 'c'})}])],
'paths': [(0, 'k1'), (1,)]},
]) # pyformat: disable
def testNestFlatten(self, structure, expected, paths, expand_composites=True):
result = nest.flatten(structure, expand_composites=expand_composites)
self.assertEqual(result, expected)
result_with_paths = nest.flatten_with_tuple_paths(
structure, expand_composites=expand_composites)
self.assertEqual(result_with_paths, list(zip(paths, expected)))
string_paths = ['/'.join(str(p) for p in path) for path in paths] # pylint: disable=g-complex-comprehension
result_with_string_paths = nest.flatten_with_joined_string_paths(
structure, expand_composites=expand_composites)
self.assertEqual(result_with_string_paths,
list(zip(string_paths, expected)))
flat_paths_result = list(
nest.yield_flat_paths(structure, expand_composites=expand_composites))
self.assertEqual(flat_paths_result, paths)
@parameterized.parameters([
{'s1': [1, 2, 3],
's2': [CT(['a', 'b']), 'c', 'd'],
'expand_composites': False,
'expected': [CT(['a', 'b']), 'c', 'd'],
'paths': [(0,), (1,), (2,)]},
{'s1': [CT([1, 2, 3])],
's2': [5],
'expand_composites': False,
'expected': [5],
'paths': [(0,)]},
{'s1': [[CT([9, 9, 9])], 999, {'y': CT([9, 9])}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
'expand_composites': False,
'expected': [CT([1, 2, 3]), 100, CT([CT([4, 5]), 6])],
'paths': [(0, 0), (1,), (2, 'y')]},
{'s1': [[CT([9, 9, 9])], 999, {'y': CT([CT([9, 9]), 9])}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([5, 6])}],
'expand_composites': False,
'expected': [CT([1, 2, 3]), 100, CT([5, 6])],
'paths': [(0, 0), (1,), (2, 'y')]},
]) # pyformat: disable
def testNestFlattenUpTo(self, s1, s2, expected, paths,
expand_composites=True):
result = nest.flatten_up_to(s1, s2, expand_composites=expand_composites)
self.assertEqual(expected, result)
result_with_paths = nest.flatten_with_tuple_paths_up_to(
s1, s2, expand_composites=expand_composites)
self.assertEqual(result_with_paths, list(zip(paths, expected)))
@parameterized.parameters([
{'structure': CT(0),
'sequence': [5],
'expected': CT(5)},
{'structure': CT(['a', 'b', 'c']),
'sequence': ['A', CT(['b']), {'x': 'y'}],
'expected': CT(['A', CT(['b']), {'x': 'y'}])},
{'structure': [{'k1': CT('a')}, CT(['b', {'x': CT({'y': 'c'})}])],
'sequence': ['A', 'B', 'C'],
'expected': [{'k1': CT('A')}, CT(['B', {'x': CT({'y': 'C'})}])]},
{'structure': [{'k1': CT('a')}, CT(['b', {'x': CT({'y': 'c'})}])],
'sequence': ['A', 'B'],
'expand_composites': False,
'expected': [{'k1': 'A'}, 'B']},
{'structure': CT(0, metadata='abc'),
'sequence': [5],
'expected': CT(5, metadata='abc')},
]) # pyformat: disable
def testNestPackSequenceAs(self,
structure,
sequence,
expected,
expand_composites=True):
result = nest.pack_sequence_as(
structure, sequence, expand_composites=expand_composites)
self.assertEqual(result, expected)
@parameterized.parameters([
{'s1': CT('abc'), 's2': CT('xyz')},
{'s1': CT(['a', 'b', 'c']), 's2': CT(['d', 'e', 'f'])},
{'s1': [1, CT([10]), CT(200, metadata='xyz')],
's2': [8, CT([55]), CT(100, metadata='xyz')]},
]) # pyformat: disable
def testNestAssertSameStructure(self, s1, s2, expand_composites=True):
nest.assert_same_structure(s1, s2, expand_composites=expand_composites)
nest.assert_shallow_structure(s1, s2, expand_composites=expand_composites)
@parameterized.parameters([
{'s1': CT(0), 's2': CT(['x'])},
{'s1': CT([1]), 's2': CT([1, 2])},
{'s1': CT({'x': 1}), 's2': CT({'y': 1})},
{'s1': CT(0), 's2': CT(0, metadata='xyz')},
{'s1': CT(0, metadata='xyz'), 's2': CT(0)},
{'s1': CT(0, metadata='xyz'), 's2': CT(0, metadata='abc')},
{'s1': CT(['a', 'b', 'c']), 's2': CT(['d', 'e'])},
{'s1': [1, CT(['a']), CT('b', metadata='xyz')],
's2': [8, CT([55, 66]), CT(100, metadata='abc')]},
{'s1': CT(0), 's2': CT2(0), 'error': TypeError},
]) # pyformat: disable
def testNestAssertSameStructureCompositeMismatch(self,
s1,
s2,
error=ValueError):
# s1 and s2 have the same structure if expand_composites=False; but
# different structures if expand_composites=True.
nest.assert_same_structure(s1, s2, expand_composites=False)
nest.assert_shallow_structure(s1, s2, expand_composites=False)
with self.assertRaises(error): # pylint: disable=g-error-prone-assert-raises
nest.assert_same_structure(s1, s2, expand_composites=True)
@parameterized.parameters([
# Note: there are additional test cases in testNestAssertSameStructure.
{'s1': [1], 's2': [CT(1)]},
{'s1': [[CT([1, 2, 3])], 100, {'y': CT([5, 6])}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
'expand_composites': False},
{'s1': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([5, 6])}],
'expand_composites': False},
]) # pyformat: disable
def testNestAssertShallowStructure(self, s1, s2, expand_composites=True):
nest.assert_shallow_structure(s1, s2, expand_composites=expand_composites)
@parameterized.parameters([
# Note: there are additional test cases in
# testNestAssertSameStructureCompositeMismatch.
{'s1': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([5, 6])}]},
{'s1': CT([1, 2, 3]),
's2': [1, 2, 3],
'check_types': False},
]) # pyformat: disable
def testNestAssertShallowStructureCompositeMismatch(self,
s1,
s2,
check_types=True):
with self.assertRaises((TypeError, ValueError)): # pylint: disable=g-error-prone-assert-raises
nest.assert_shallow_structure(
s1, s2, expand_composites=True, check_types=check_types)
@parameterized.parameters([
{'structure': CT(1, metadata=2),
'expected': CT(11, metadata=2)},
{'structure': CT({'x': 1, 'y': [2, 3]}, metadata=2),
'expected': CT({'x': 11, 'y': [12, 13]}, metadata=2)},
{'structure': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
'expected': [[CT([11, 12, 13])], 110, {'y': CT([CT([14, 15]), 16])}]},
]) # pyformat: disable
def testNestMapStructure(self, structure, expected, expand_composites=True):
func = lambda x: x + 10
result = nest.map_structure(
func, structure, expand_composites=expand_composites)
self.assertEqual(result, expected)
@parameterized.parameters([
{'s1': [[CT([1, 2, 3])], 100, {'y': 4}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
'expected': [[CT([11, 12, 13])], 110, {'y': CT([CT([4, 5]), 6])}]}
]) # pyformat: disable
def testNestMapStructureUpTo(self, s1, s2, expected):
func = lambda x: x + 10 if isinstance(x, int) else x
result = nest.map_structure_up_to(s1, func, s2, expand_composites=True)
self.assertEqual(result, expected)
@parameterized.parameters([
{'structure': CT('a'),
'expected': CT('CT:a')},
{'structure': CT(['a', 'b']),
'expected': CT(['CT/0:a', 'CT/1:b'])},
{'structure': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
'expected': [
[CT(['0/0/CT/0:1', '0/0/CT/1:2', '0/0/CT/2:3'])],
'1:100',
{'y': CT([CT(['2/y/CT/0/CT/0:4', '2/y/CT/0/CT/1:5']),
'2/y/CT/1:6'])}]},
]) # pyformat: disable
def testNestMapStructureWithPaths(self,
structure,
expected,
expand_composites=True):
def func1(path, x):
return '%s:%s' % (path, x)
result = nest.map_structure_with_paths(
func1, structure, expand_composites=expand_composites)
self.assertEqual(result, expected)
# Use the same test cases for map_structure_with_tuple_paths.
def func2(tuple_path, x):
return '%s:%s' % ('/'.join(str(v) for v in tuple_path), x)
result = nest.map_structure_with_tuple_paths(
func2, structure, expand_composites=expand_composites)
self.assertEqual(result, expected)
@parameterized.parameters([
{'s1': [[CT([1, 2, 3])], 100, {'y': [4, 5]}],
's2': [[CT([1, 2, 3])], 100, {'y': [CT([4, 5]), 6]}],
'expected': [
[CT(['0/0/CT/0:1', '0/0/CT/1:2', '0/0/CT/2:3'])],
('1:100'),
{'y': ['2/y/0:CT((4, 5), None)', '2/y/1:6']}]},
]) # pyformat: disable
def testNestMapStructureWithTuplePathsUpTo(self, s1, s2, expected):
def func(tuple_path, x):
return '%s:%s' % ('/'.join(str(v) for v in tuple_path), x)
result = nest.map_structure_with_tuple_paths_up_to(
s1, func, s2, expand_composites=True)
self.assertEqual(result, expected)
def testNestGetTraverseShallowStructure(self):
func = lambda t: not (isinstance(t, CT) and t.metadata == 'B')
structure = [CT([1, 2], metadata='A'), CT([CT(3)], metadata='B')]
result = nest.get_traverse_shallow_structure(
func, structure, expand_composites=True)
expected = [CT([True, True], metadata='A'), False]
self.assertEqual(result, expected)
def testMemoryIsFreed(self):
# Note: we use `np.array` values for CT and `set` values for
# metadata because we need to construct weakrefs to them. Other builtin
# types, such as `list` and `tuple`, do not support weakrefs.
ct1 = CT(np.array([1, 2]), set(['no', 'leaks']))
ct2 = CT(np.array([3, 4]), set(['no', 'leaks']))
ct3 = CT(np.array([5, 6]), set(['other', 'metadata']))
# Note: map_structure exercises flatten, pack_sequence_as, and
# assert_same_structure.
func = lambda x, y: x + y
ct4 = nest.map_structure(func, ct1, ct2, expand_composites=True)
# Check that the exception-raising path in assert_same_structure
# doesn't leak any objects.
with self.assertRaises(ValueError):
nest.map_structure(func, ct2, ct3, expand_composites=True)
if hasattr(sys, 'exc_clear'):
sys.exc_clear() # Remove any references in exception stack traces.
refs = []
for ct in [ct1, ct2, ct3, ct4]:
refs.append(weakref.ref(ct))
refs.append(weakref.ref(ct.components))
refs.append(weakref.ref(ct.metadata))
del ct # pylint: disable=undefined-loop-variable
for ref in refs:
self.assertIsNotNone(ref())
del ct1, ct2, ct3, ct4
gc.collect()
for ref in refs:
self.assertIsNone(ref())
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/composite_tensor_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to create TensorProtos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_like
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
# Fallback in case fast_tensor_util is not properly compiled.
# pylint: disable=g-import-not-at-top
try:
from tensorflow.python.framework import fast_tensor_util
_FAST_TENSOR_UTIL_AVAILABLE = True
except ImportError:
_FAST_TENSOR_UTIL_AVAILABLE = False
# pylint: enable=g-import-not-at-top
def ExtractBitsFromFloat16(x):
return np.asarray(x, dtype=np.float16).view(np.uint16).item()
def SlowAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend(
[ExtractBitsFromFloat16(x) for x in proto_values])
def _MediumAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
# TODO: Remove the conversion if cython supports np.float16_t
fast_tensor_util.AppendFloat16ArrayToTensorProto(
tensor_proto,
np.asarray(proto_values, dtype=np.float16).view(np.uint16))
def ExtractBitsFromBFloat16(x):
return np.asarray(
x, dtype=dtypes.bfloat16.as_numpy_dtype).view(np.uint16).item()
def SlowAppendBFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend(
[ExtractBitsFromBFloat16(x) for x in proto_values])
def FastAppendBFloat16ArrayToTensorProto(tensor_proto, proto_values):
fast_tensor_util.AppendBFloat16ArrayToTensorProto(
tensor_proto, np.asarray(
proto_values, dtype=dtypes.bfloat16.as_numpy_dtype).view(np.uint16))
if _FAST_TENSOR_UTIL_AVAILABLE:
_NP_TO_APPEND_FN = {
dtypes.bfloat16.as_numpy_dtype:
FastAppendBFloat16ArrayToTensorProto,
np.float16:
_MediumAppendFloat16ArrayToTensorProto,
np.float32:
fast_tensor_util.AppendFloat32ArrayToTensorProto,
np.float64:
fast_tensor_util.AppendFloat64ArrayToTensorProto,
np.int32:
fast_tensor_util.AppendInt32ArrayToTensorProto,
np.int64:
fast_tensor_util.AppendInt64ArrayToTensorProto,
np.uint8:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
np.uint16:
fast_tensor_util.AppendUInt16ArrayToTensorProto,
np.uint32:
fast_tensor_util.AppendUInt32ArrayToTensorProto,
np.uint64:
fast_tensor_util.AppendUInt64ArrayToTensorProto,
np.int8:
fast_tensor_util.AppendInt8ArrayToTensorProto,
np.int16:
fast_tensor_util.AppendInt16ArrayToTensorProto,
np.complex64:
fast_tensor_util.AppendComplex64ArrayToTensorProto,
np.complex128:
fast_tensor_util.AppendComplex128ArrayToTensorProto,
np.object:
fast_tensor_util.AppendObjectArrayToTensorProto,
np.bool:
fast_tensor_util.AppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint8.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint16.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint16.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint32.as_numpy_dtype:
fast_tensor_util.AppendInt32ArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
else:
def SlowAppendFloat32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.float_val.extend([x.item() for x in proto_values])
def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.double_val.extend([x.item() for x in proto_values])
def SlowAppendIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([x.item() for x in proto_values])
def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int64_val.extend([x.item() for x in proto_values])
def SlowAppendQIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([x.item()[0] for x in proto_values])
def SlowAppendUInt32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.uint32_val.extend([x.item() for x in proto_values])
def SlowAppendUInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.uint64_val.extend([x.item() for x in proto_values])
def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.scomplex_val.extend(
[v.item() for x in proto_values for v in [x.real, x.imag]])
def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.dcomplex_val.extend(
[v.item() for x in proto_values for v in [x.real, x.imag]])
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
def SlowAppendBoolArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.bool_val.extend([x.item() for x in proto_values])
_NP_TO_APPEND_FN = {
dtypes.bfloat16.as_numpy_dtype: SlowAppendBFloat16ArrayToTensorProto,
np.float16: SlowAppendFloat16ArrayToTensorProto,
np.float32: SlowAppendFloat32ArrayToTensorProto,
np.float64: SlowAppendFloat64ArrayToTensorProto,
np.int32: SlowAppendIntArrayToTensorProto,
np.int64: SlowAppendInt64ArrayToTensorProto,
np.uint8: SlowAppendIntArrayToTensorProto,
np.uint16: SlowAppendIntArrayToTensorProto,
np.uint32: SlowAppendUInt32ArrayToTensorProto,
np.uint64: SlowAppendUInt64ArrayToTensorProto,
np.int8: SlowAppendIntArrayToTensorProto,
np.int16: SlowAppendIntArrayToTensorProto,
np.complex64: SlowAppendComplex64ArrayToTensorProto,
np.complex128: SlowAppendComplex128ArrayToTensorProto,
np.object: SlowAppendObjectArrayToTensorProto,
np.bool: SlowAppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint32.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
def GetFromNumpyDTypeDict(dtype_dict, dtype):
# NOTE: dtype_dict.get(dtype) always returns None.
for key, val in six.iteritems(dtype_dict):
if key == dtype:
return val
return None
def GetNumpyAppendFn(dtype):
# numpy dtype for strings are variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if dtype.type == np.string_ or dtype.type == np.unicode_:
if _FAST_TENSOR_UTIL_AVAILABLE:
return fast_tensor_util.AppendObjectArrayToTensorProto
else:
return SlowAppendObjectArrayToTensorProto
return GetFromNumpyDTypeDict(_NP_TO_APPEND_FN, dtype)
def TensorShapeProtoToList(shape):
"""Convert a TensorShape to a list.
Args:
shape: A TensorShapeProto.
Returns:
List of integers representing the dimensions of the tensor.
"""
return [dim.size for dim in shape.dim]
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
def _FlattenToStrings(nested_strings):
if isinstance(nested_strings, (list, tuple)):
for inner in nested_strings:
for flattened_string in _FlattenToStrings(inner):
yield flattened_string
else:
yield nested_strings
_TENSOR_CONTENT_TYPES = frozenset([
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8, dtypes.int16,
dtypes.int8, dtypes.int64, dtypes.qint8, dtypes.quint8, dtypes.qint16,
dtypes.quint16, dtypes.qint32, dtypes.uint32, dtypes.uint64
])
class _Message(object):
def __init__(self, message):
self._message = message
def __repr__(self):
return self._message
def _FirstNotNone(l):
for x in l:
if x is not None:
if isinstance(x, ops.Tensor):
return _Message("list containing Tensors")
else:
return x
return None
def _NotNone(v):
if v is None:
return _Message("None")
else:
return v
def _FilterTuple(v):
if not isinstance(v, (list, tuple)):
return v
if isinstance(v, tuple):
if not any(isinstance(x, (list, tuple)) for x in v):
return None
if isinstance(v, list):
if not any(isinstance(x, (list, tuple)) for x in v):
return _FirstNotNone(
[None if isinstance(x, (list, tuple)) else x for x in v])
return _FirstNotNone([_FilterTuple(x) for x in v])
def _FilterInt(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterInt(x) for x in v])
return None if isinstance(
v, (compat.integral_types, tensor_shape.Dimension)) else _NotNone(v)
def _FilterFloat(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterFloat(x) for x in v])
return None if isinstance(v, compat.real_types) else _NotNone(v)
def _FilterComplex(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterComplex(x) for x in v])
return None if isinstance(v, compat.complex_types) else _NotNone(v)
def _FilterStr(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterStr(x) for x in v])
if isinstance(v, compat.bytes_or_text_types):
return None
else:
return _NotNone(v)
def _FilterBool(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterBool(x) for x in v])
return None if isinstance(v, bool) else _NotNone(v)
def _FilterNotTensor(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterNotTensor(x) for x in v])
return str(v) if isinstance(v, ops.Tensor) else None
_TF_TO_IS_OK = {
dtypes.bool: [_FilterBool],
dtypes.complex128: [_FilterComplex],
dtypes.complex64: [_FilterComplex],
dtypes.float16: [_FilterFloat],
dtypes.float32: [_FilterFloat],
dtypes.float64: [_FilterFloat],
dtypes.int16: [_FilterInt],
dtypes.int32: [_FilterInt],
dtypes.int64: [_FilterInt],
dtypes.int8: [_FilterInt],
dtypes.qint16: [_FilterInt, _FilterTuple],
dtypes.qint32: [_FilterInt, _FilterTuple],
dtypes.qint8: [_FilterInt, _FilterTuple],
dtypes.quint16: [_FilterInt, _FilterTuple],
dtypes.quint8: [_FilterInt, _FilterTuple],
dtypes.string: [_FilterStr],
dtypes.uint16: [_FilterInt],
dtypes.uint8: [_FilterInt],
dtypes.uint32: [_FilterInt],
dtypes.uint64: [_FilterInt],
}
def _AssertCompatible(values, dtype):
if dtype is None:
fn_list = [_FilterNotTensor]
else:
try:
fn_list = _TF_TO_IS_OK[dtype]
except KeyError:
# There isn't a specific fn_list, so we try to do the best possible.
if dtype.is_integer:
fn_list = [_FilterInt]
elif dtype.is_floating:
fn_list = [_FilterFloat]
elif dtype.is_complex:
fn_list = [_FilterComplex]
elif dtype.is_quantized:
fn_list = [_FilterInt, _FilterTuple]
else:
fn_list = [_FilterNotTensor]
mismatch = _FirstNotNone([fn(values) for fn in fn_list])
if mismatch is not None:
if dtype is None:
raise TypeError("List of Tensors when single Tensor expected")
else:
raise TypeError("Expected %s, got %s of type '%s' instead." %
(dtype.name, repr(mismatch), type(mismatch).__name__))
# pylint: disable=invalid-name
@tf_export("make_tensor_proto")
def make_tensor_proto(values, dtype=None, shape=None, verify_shape=False,
allow_broadcast=False):
"""Create a TensorProto.
In TensorFlow 2.0, representing tensors as protos should no longer be a
common workflow. That said, this utility function is still useful for
generating TF Serving request protos:
request = tensorflow_serving.apis.predict_pb2.PredictRequest()
request.model_spec.name = "my_model"
request.model_spec.signature_name = "serving_default"
request.inputs["images"].CopyFrom(tf.make_tensor_proto(X_new))
make_tensor_proto accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a compatible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto converted) must have the compatible type with dtype.
make_tensor_proto then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
verify_shape: Boolean that enables verification of a shape of values.
allow_broadcast: Boolean that enables allowing scalars and 1 length vector
broadcasting. Cannot be true when verify_shape is true.
Returns:
A `TensorProto`. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with `tf.make_ndarray(proto)`.
If `values` is a `TensorProto`, it is immediately returned; `dtype` and
`shape` are ignored.
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values or if verify_shape is
True and shape of values is not equals to a shape from the argument.
"""
if allow_broadcast and verify_shape:
raise ValueError("allow_broadcast and verify_shape are not both allowed.")
if isinstance(values, tensor_pb2.TensorProto):
return values
if dtype:
dtype = dtypes.as_dtype(dtype)
is_quantized = (
dtype in [
dtypes.qint8, dtypes.quint8, dtypes.qint16, dtypes.quint16,
dtypes.qint32
])
# We first convert value to a numpy array or scalar.
if isinstance(values, (np.ndarray, np.generic)):
if dtype:
nparray = values.astype(dtype.as_numpy_dtype)
else:
nparray = values
elif callable(getattr(values, "__array__", None)) or isinstance(
getattr(values, "__array_interface__", None), dict):
# If a class has the __array__ method, or __array_interface__ dict, then it
# is possible to convert to numpy array.
nparray = np.asarray(values, dtype=dtype)
# This is the preferred way to create an array from the object, so replace
# the `values` with the array so that _FlattenToStrings is not run.
values = nparray
else:
if values is None:
raise ValueError("None values not supported.")
# if dtype is provided, forces numpy array to be the type
# provided if possible.
if dtype and dtype.is_numpy_compatible:
np_dt = dtype.as_numpy_dtype
else:
np_dt = None
# If shape is None, numpy.prod returns None when dtype is not set, but
# raises exception when dtype is set to np.int64
if shape is not None and np.prod(shape, dtype=np.int64) == 0:
nparray = np.empty(shape, dtype=np_dt)
else:
_AssertCompatible(values, dtype)
nparray = np.array(values, dtype=np_dt)
# check to them.
# We need to pass in quantized values as tuples, so don't apply the shape
if (list(nparray.shape) != _GetDenseDimensions(values) and
not is_quantized):
raise ValueError("""Argument must be a dense tensor: %s"""
""" - got shape %s, but wanted %s.""" %
(values, list(nparray.shape),
_GetDenseDimensions(values)))
# python/numpy default float type is float64. We prefer float32 instead.
if (nparray.dtype == np.float64) and dtype is None:
nparray = nparray.astype(np.float32)
# python/numpy default int type is int64. We prefer int32 instead.
elif (nparray.dtype == np.int64) and dtype is None:
downcasted_array = nparray.astype(np.int32)
# Do not down cast if it leads to precision loss.
if np.array_equal(downcasted_array, nparray):
nparray = downcasted_array
# if dtype is provided, it must be compatible with what numpy
# conversion says.
numpy_dtype = dtypes.as_dtype(nparray.dtype)
if numpy_dtype is None:
raise TypeError("Unrecognized data type: %s" % nparray.dtype)
# If dtype was specified and is a quantized type, we convert
# numpy_dtype back into the quantized version.
if is_quantized:
numpy_dtype = dtype
if dtype is not None and (not hasattr(dtype, "base_dtype") or
dtype.base_dtype != numpy_dtype.base_dtype):
raise TypeError("Incompatible types: %s vs. %s. Value is %s" %
(dtype, nparray.dtype, values))
# If shape is not given, get the shape from the numpy array.
if shape is None:
shape = nparray.shape
is_same_size = True
shape_size = nparray.size
else:
shape = [int(dim) for dim in shape]
shape_size = np.prod(shape, dtype=np.int64)
is_same_size = shape_size == nparray.size
if allow_broadcast:
if nparray.shape == (1,) or nparray.shape == tuple():
pass
elif nparray.size != shape_size:
raise TypeError("Expected Tensor's shape: %s, got %s." %
(tuple(shape), nparray.shape))
else:
if verify_shape and nparray.shape != tuple(shape):
raise TypeError("Expected Tensor's shape: %s, got %s." %
(tuple(shape), nparray.shape))
if nparray.size > shape_size:
raise ValueError(
"Too many elements provided. Needed at most %d, but received %d" %
(shape_size, nparray.size))
tensor_proto = tensor_pb2.TensorProto(
dtype=numpy_dtype.as_datatype_enum,
tensor_shape=tensor_shape.as_shape(shape).as_proto())
if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
if nparray.size * nparray.itemsize >= (1 << 31):
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
tensor_proto.tensor_content = nparray.tostring()
return tensor_proto
# If we were not given values as a numpy array, compute the proto_values
# from the given values directly, to avoid numpy trimming nulls from the
# strings. Since values could be a list of strings, or a multi-dimensional
# list of lists that might or might not correspond to the given shape,
# we flatten it conservatively.
if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
proto_values = _FlattenToStrings(values)
# At this point, values may be a list of objects that we could not
# identify a common type for (hence it was inferred as
# np.object/dtypes.string). If we are unable to convert it to a
# string, we raise a more helpful error message.
#
# Ideally, we'd be able to convert the elements of the list to a
# common type, but this type inference requires some thinking and
# so we defer it for now.
try:
str_values = [compat.as_bytes(x) for x in proto_values]
except TypeError:
raise TypeError("Failed to convert object of type %s to Tensor. "
"Contents: %s. Consider casting elements to a "
"supported type." % (type(values), values))
tensor_proto.string_val.extend(str_values)
return tensor_proto
# TensorFlow expects C order (a.k.a., eigen row major).
proto_values = nparray.ravel()
append_fn = GetNumpyAppendFn(proto_values.dtype)
if append_fn is None:
raise TypeError(
"Element type not supported in TensorProto: %s" % numpy_dtype.name)
append_fn(tensor_proto, proto_values)
return tensor_proto
# pylint: enable=invalid-name
@tf_export("make_ndarray")
def MakeNdarray(tensor):
"""Create a numpy ndarray from a tensor.
Create a numpy ndarray with the same shape and data as the tensor.
Args:
tensor: A TensorProto.
Returns:
A numpy array with the tensor contents.
Raises:
TypeError: if tensor has unsupported type.
"""
shape = [d.size for d in tensor.tensor_shape.dim]
num_elements = np.prod(shape, dtype=np.int64)
tensor_dtype = dtypes.as_dtype(tensor.dtype)
dtype = tensor_dtype.as_numpy_dtype
if tensor.tensor_content:
return (np.frombuffer(tensor.tensor_content,
dtype=dtype).copy().reshape(shape))
if tensor_dtype == dtypes.string:
# np.pad throws on these arrays of type np.object.
values = list(tensor.string_val)
padding = num_elements - len(values)
if padding > 0:
last = values[-1] if values else ""
values.extend([last] * padding)
return np.array(values, dtype=dtype).reshape(shape)
if tensor_dtype == dtypes.float16 or tensor_dtype == dtypes.bfloat16:
# the half_val field of the TensorProto stores the binary representation
# of the fp16: we need to reinterpret this as a proper float16
values = np.fromiter(tensor.half_val, dtype=np.uint16)
values.dtype = tensor_dtype.as_numpy_dtype
elif tensor_dtype == dtypes.float32:
values = np.fromiter(tensor.float_val, dtype=dtype)
elif tensor_dtype == dtypes.float64:
values = np.fromiter(tensor.double_val, dtype=dtype)
elif tensor_dtype in [
dtypes.int32, dtypes.uint8, dtypes.uint16, dtypes.int16, dtypes.int8,
dtypes.qint32, dtypes.quint8, dtypes.qint8, dtypes.qint16, dtypes.quint16
]:
values = np.fromiter(tensor.int_val, dtype=dtype)
elif tensor_dtype == dtypes.int64:
values = np.fromiter(tensor.int64_val, dtype=dtype)
elif tensor_dtype == dtypes.complex64:
it = iter(tensor.scomplex_val)
values = np.array([complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype)
elif tensor_dtype == dtypes.complex128:
it = iter(tensor.dcomplex_val)
values = np.array([complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype)
elif tensor_dtype == dtypes.bool:
values = np.fromiter(tensor.bool_val, dtype=dtype)
else:
raise TypeError("Unsupported tensor type: %s" % tensor.dtype)
if values.size == 0:
return np.zeros(shape, dtype)
if values.size != num_elements:
values = np.pad(values, (0, num_elements - values.size), "edge")
return values.reshape(shape)
def ShapeEquals(tensor_proto, shape):
"""Returns True if "tensor_proto" has the given "shape".
Args:
tensor_proto: A TensorProto.
shape: A tensor shape, expressed as a TensorShape, list, or tuple.
Returns:
True if "tensor_proto" has the given "shape", otherwise False.
Raises:
TypeError: If "tensor_proto" is not a TensorProto, or shape is not a
TensorShape, list, or tuple.
"""
if not isinstance(tensor_proto, tensor_pb2.TensorProto):
raise TypeError("tensor_proto is not a tensor_pb2.TensorProto object")
if isinstance(shape, tensor_shape_pb2.TensorShapeProto):
shape = [d.size for d in shape.dim]
elif not isinstance(shape, (list, tuple)):
raise TypeError("shape is not a list or tuple")
tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]
return all(x == y for x, y in zip(tensor_shape_list, shape))
def _ConstantValue(tensor, partial):
# TODO(touts): Support Variables?
if not isinstance(tensor, ops.Tensor):
raise TypeError("%r is not a Tensor, has type %s" % (tensor, type(tensor)))
if tensor.op.type == "Const":
return MakeNdarray(tensor.op.get_attr("value"))
elif tensor.op.type == "Shape":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.array(
[dim.value for dim in input_shape.dims],
dtype=tensor.dtype.as_numpy_dtype)
else:
return None
elif tensor.op.type == "Size":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.prod([dim.value for dim in input_shape.dims], dtype=np.int32)
else:
return None
elif tensor.op.type == "Rank":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.ndims is not None:
return np.ndarray(
shape=(),
buffer=np.array([input_shape.ndims], dtype=np.int32),
dtype=np.int32)
else:
return None
elif tensor.op.type == "Range":
start = constant_value(tensor.op.inputs[0])
if start is None:
return None
limit = constant_value(tensor.op.inputs[1])
if limit is None:
return None
delta = constant_value(tensor.op.inputs[2])
if delta is None:
return None
return np.arange(start, limit, delta, dtype=tensor.dtype.as_numpy_dtype)
elif tensor.op.type == "Cast":
pre_cast = constant_value(tensor.op.inputs[0])
if pre_cast is None:
return None
cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
return pre_cast.astype(cast_dtype.as_numpy_dtype)
elif tensor.op.type == "Concat":
dim = constant_value(tensor.op.inputs[0])
if dim is None:
return None
values = []
for x in tensor.op.inputs[1:]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "ConcatV2":
dim = constant_value(tensor.op.inputs[-1])
if dim is None:
return None
values = []
for x in tensor.op.inputs[:-1]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "Pack":
values = []
# Some imported GraphDefs have Pack ops with zero inputs. Those are invalid
# and shouldn't be produced, but to deal sensibly with them here we check
# and return None.
if not tensor.op.inputs:
return None
# We can't handle axis != 0 Packs at the moment.
if tensor.op.get_attr("axis") != 0:
return None
for x in tensor.op.inputs:
value = constant_value(x, partial)
if value is None and not partial:
return None
values.append(value)
return np.array(values)
elif tensor.op.type == "Fill":
fill_shape = tensor.shape
fill_value = constant_value(tensor.op.inputs[1])
if fill_shape.is_fully_defined() and fill_value is not None:
return np.full(fill_shape.as_list(), fill_value, dtype=fill_value.dtype)
else:
return None
elif tensor.op.type == "Equal":
value1 = constant_value(tensor.op.inputs[0])
if value1 is None:
return None
value2 = constant_value(tensor.op.inputs[1])
if value2 is None:
return None
return np.equal(value1, value2)
elif tensor.op.type == "NotEqual":
value1 = constant_value(tensor.op.inputs[0])
if value1 is None:
return None
value2 = constant_value(tensor.op.inputs[1])
if value2 is None:
return None
return np.not_equal(value1, value2)
else:
return None
@tf_export("get_static_value")
def constant_value(tensor, partial=False): # pylint: disable=invalid-name
"""Returns the constant value of the given tensor, if efficiently calculable.
This function attempts to partially evaluate the given tensor, and
returns its value as a numpy ndarray if this succeeds.
Compatibility(V1): If `constant_value(tensor)` returns a non-`None` result, it
will no longer be possible to feed a different value for `tensor`. This allows
the result of this function to influence the graph that is constructed, and
permits static shape optimizations.
Args:
tensor: The Tensor to be evaluated.
partial: If True, the returned numpy array is allowed to have partially
evaluated values. Values that can't be evaluated will be None.
Returns:
A numpy ndarray containing the constant value of the given `tensor`,
or None if it cannot be calculated.
Raises:
TypeError: if tensor is not an ops.Tensor.
"""
if isinstance(tensor, ops.EagerTensor):
return tensor.numpy()
if not is_tensor(tensor):
return tensor
if not isinstance(tensor, ops.Tensor):
return None
ret = _ConstantValue(tensor, partial)
if ret is not None:
# The caller may now depend on the constant value of `tensor`, so we
# conservatively prevent it from being fed.
tensor.graph.prevent_feeding(tensor)
return ret
def constant_value_as_shape(tensor): # pylint: disable=invalid-name
"""A version of `constant_value()` that returns a `TensorShape`.
This version should be used when a constant tensor value is
interpreted as a (possibly partial) shape, e.g. in the shape
function for `tf.reshape()`. By explicitly requesting a
`TensorShape` as the return value, it is possible to represent
unknown dimensions; by contrast, `constant_value()` is
all-or-nothing.
Args:
tensor: The rank-0 or rank-1 Tensor to be evaluated.
Returns:
A `TensorShape` based on the constant value of the given `tensor`.
Raises:
ValueError: If the shape is rank-0 and is not statically known to be -1.
"""
if isinstance(tensor, ops.EagerTensor):
return tensor_shape.as_shape(
[dim if dim != -1 else None for dim in tensor.numpy()])
if tensor.get_shape().ndims == 0:
value = constant_value(tensor)
if value is None:
raise ValueError(
"Received a scalar with unknown value as shape; require a statically "
"known scalar with value '-1' to describe an unknown shape.")
if value != -1:
raise ValueError(
"Received a scalar value '%s' as shape; require a statically known "
"scalar with value '-1' to describe an unknown shape." % value)
return tensor_shape.unknown_shape()
shape = tensor.get_shape().with_rank(1)
if shape == [0]:
return tensor_shape.scalar()
elif tensor.op.type == "Shape":
return tensor.op.inputs[0].get_shape()
elif tensor.op.type == "Pack":
ret = tensor_shape.scalar() # Empty list.
# Since we expect rank 1 inputs, Pack's axis must be zero, otherwise it
# would not be rank 1.
assert tensor.op.get_attr("axis") == 0
for pack_input in tensor.op.inputs:
# `pack_input` must be a scalar. Attempt to evaluate it, and append it
# to `ret`.
pack_input_val = constant_value(pack_input)
if pack_input_val is None or pack_input_val < 0:
new_dim = tensor_shape.Dimension(None)
else:
new_dim = tensor_shape.Dimension(pack_input_val)
ret = ret.concatenate([new_dim])
return ret
elif tensor.op.type == "Concat":
# We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.scalar() # Empty list.
for concat_input in tensor.op.inputs[1:]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
elif tensor.op.type == "ConcatV2":
# We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.scalar() # Empty list.
for concat_input in tensor.op.inputs[:-1]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
elif tensor.op.type == "StridedSlice":
try:
begin = constant_value(tensor.op.inputs[1])
end = constant_value(tensor.op.inputs[2])
strides = constant_value(tensor.op.inputs[3])
if begin is not None and end is not None and strides is not None:
begin = begin[0]
end = end[0]
strides = strides[0]
begin_mask = tensor.op.get_attr("begin_mask")
if begin_mask == 1:
begin = None
end_mask = tensor.op.get_attr("end_mask")
if end_mask == 1:
end = None
ellipsis_mask = tensor.op.get_attr("ellipsis_mask")
new_axis_mask = tensor.op.get_attr("new_axis_mask")
shrink_axis_mask = tensor.op.get_attr("shrink_axis_mask")
valid_attributes = (not ellipsis_mask and not new_axis_mask and
not shrink_axis_mask and (not begin_mask or
(begin_mask == 1)) and
(not end_mask or (end_mask == 1)))
if valid_attributes: # additional inputs not supported
prev = constant_value_as_shape(tensor.op.inputs[0])
prev = prev[begin:end:strides]
ret = tensor_shape.TensorShape(prev)
return ret
except ValueError: # Could come from get_attr or slicing prev.
pass
except TypeError: # Could come from slicing prev.
pass
ret = tensor_shape.unknown_shape(shape.dims[0].value)
value = constant_value(tensor)
if value is not None:
ret = ret.merge_with(
tensor_shape.TensorShape([d if d >= 0 else None for d in value]))
return ret
@tf_export("is_tensor")
def is_tensor(x): # pylint: disable=invalid-name
"""Checks whether `x` is a tensor or "tensor-like".
If `is_tensor(x)` returns `True`, it is safe to assume that `x` is a tensor or
can be converted to a tensor using `ops.convert_to_tensor(x)`.
Args:
x: A python object to check.
Returns:
`True` if `x` is a tensor or "tensor-like", `False` if not.
"""
return (isinstance(x, tensor_like._TensorLike) or # pylint: disable=protected-access
ops.is_dense_tensor_like(x) or
getattr(x, "is_tensor_like", False))
|
tensorflow-master
|
tensorflow/python/framework/tensor_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for shape inference helper classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class DimensionTest(test_util.TensorFlowTestCase):
def testDimension(self):
dim = tensor_shape.Dimension(12)
self.assertEqual(12, dim.value)
self.assertEqual(12, int(dim))
self.assertEqual(dim, tensor_shape.Dimension(12))
self.assertEqual(tensor_shape.Dimension(15),
dim + tensor_shape.Dimension(3))
self.assertEqual(tensor_shape.Dimension(15), dim + 3)
self.assertEqual(tensor_shape.Dimension(15), 3 + dim)
self.assertEqual(tensor_shape.Dimension(9), dim - 3)
self.assertEqual(tensor_shape.Dimension(1), 13 - dim)
self.assertEqual(tensor_shape.Dimension(24),
dim * tensor_shape.Dimension(2))
self.assertEqual(tensor_shape.Dimension(24), dim * 2)
self.assertEqual(tensor_shape.Dimension(24), 2 * dim)
self.assertEqual([4] * 12, [4] * dim)
self.assertEqual(12 * [4], dim * [4])
self.assertEqual(tensor_shape.Dimension(24), 2 * dim)
self.assertEqual(
tensor_shape.Dimension(6), dim // tensor_shape.Dimension(2))
self.assertEqual(tensor_shape.Dimension(6), dim // 2)
self.assertEqual(tensor_shape.Dimension(0), 2 // dim)
self.assertEqual(tensor_shape.Dimension(12),
dim.merge_with(tensor_shape.Dimension(12)))
self.assertEqual(tensor_shape.Dimension(12), dim.merge_with(12))
self.assertLess(tensor_shape.Dimension(12), tensor_shape.Dimension(13))
self.assertGreater(tensor_shape.Dimension(13), tensor_shape.Dimension(12))
self.assertLessEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(12))
self.assertLessEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(13))
self.assertGreater(tensor_shape.Dimension(13), tensor_shape.Dimension(12))
self.assertGreaterEqual(tensor_shape.Dimension(12),
tensor_shape.Dimension(12))
self.assertGreaterEqual(tensor_shape.Dimension(13),
tensor_shape.Dimension(12))
self.assertNotEqual(dim, (12,))
with self.assertRaises(ValueError):
dim.merge_with(tensor_shape.Dimension(13))
def testUnknownDimension(self):
dim = tensor_shape.Dimension(None)
self.assertIs(None, dim.value)
self.assertEqual(dim.value, tensor_shape.Dimension(None).value)
self.assertEqual(tensor_shape.Dimension(None).value,
(dim + tensor_shape.Dimension(None)).value)
self.assertEqual(tensor_shape.Dimension(None).value,
(dim * tensor_shape.Dimension(None)).value)
self.assertEqual(
tensor_shape.Dimension(None).value,
(dim // tensor_shape.Dimension(None)).value)
self.assertEqual(tensor_shape.Dimension(None).value,
dim.merge_with(tensor_shape.Dimension(None)).value)
self.assertIs(None,
tensor_shape.Dimension(None) < tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) <= tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) > tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) >= tensor_shape.Dimension(None))
def testKnownAndUnknownDimensions(self):
known = tensor_shape.Dimension(12)
unknown = tensor_shape.Dimension(None)
self.assertEqual(
tensor_shape.Dimension(None).value, (known + unknown).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (unknown + known).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (known * unknown).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (unknown * known).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (known // unknown).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (unknown // known).value)
self.assertEqual(
tensor_shape.Dimension(12), known.merge_with(unknown))
self.assertEqual(
tensor_shape.Dimension(12), unknown.merge_with(known))
self.assertIs(None,
tensor_shape.Dimension(12) < tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(12) <= tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(12) > tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(12) >= tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) < tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(None) <= tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(None) > tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(None) >= tensor_shape.Dimension(12))
def testAsDimension(self):
self.assertEqual(tensor_shape.Dimension(12),
tensor_shape.as_dimension(tensor_shape.Dimension(12)))
self.assertEqual(tensor_shape.Dimension(12), tensor_shape.as_dimension(12))
self.assertEqual(
tensor_shape.Dimension(None).value,
tensor_shape.as_dimension(tensor_shape.Dimension(None)).value)
self.assertEqual(tensor_shape.Dimension(None).value,
tensor_shape.as_dimension(None).value)
def testEquality(self):
self.assertTrue(tensor_shape.Dimension(12) == tensor_shape.Dimension(12))
self.assertFalse(tensor_shape.Dimension(12) == tensor_shape.Dimension(13))
self.assertIs(None,
tensor_shape.Dimension(12) == tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) == tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(None) == tensor_shape.Dimension(None))
self.assertTrue(tensor_shape.Dimension(12) == "12")
self.assertTrue(tensor_shape.Dimension(12) == 24.0 / 2)
# None indicates ambiguous comparison, but comparison vs the wrong type
# is unambigously False.
self.assertIsNotNone(tensor_shape.Dimension(12) == "_")
self.assertIsNotNone(tensor_shape.Dimension(None) == 12.99)
self.assertFalse(tensor_shape.Dimension(12) == "_")
self.assertFalse(tensor_shape.Dimension(None) == 12.99)
self.assertIs(None, tensor_shape.Dimension(None) == "13")
self.assertIs(None, tensor_shape.Dimension(None) == None) # pylint: disable=g-equals-none
self.assertFalse(tensor_shape.Dimension(12) == 12.99)
def testInequality(self):
self.assertTrue(tensor_shape.Dimension(12) != tensor_shape.Dimension(13))
self.assertFalse(tensor_shape.Dimension(12) != tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(12) != tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) != tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(None) != tensor_shape.Dimension(None))
# None indicates ambiguous comparison, but comparison vs the wrong type
# is unambigously False.
self.assertIsNotNone(tensor_shape.Dimension(12) != "_")
self.assertIsNotNone(tensor_shape.Dimension(None) != 12.99)
self.assertTrue(tensor_shape.Dimension(12) != "_")
self.assertTrue(tensor_shape.Dimension(None) != 12.99)
self.assertIs(None, tensor_shape.Dimension(None) != "13")
self.assertIs(None, tensor_shape.Dimension(None) != None) # pylint: disable=g-equals-none
self.assertTrue(tensor_shape.Dimension(12) != 12.99)
def testRepr(self):
self.assertEqual(repr(tensor_shape.Dimension(7)), "Dimension(7)")
self.assertEqual(repr(tensor_shape.Dimension(None)), "Dimension(None)")
def testStr(self):
self.assertEqual(str(tensor_shape.Dimension(7)), "7")
self.assertEqual(str(tensor_shape.Dimension(None)), "?")
def testUnsupportedType(self):
with self.assertRaises(TypeError):
tensor_shape.Dimension(dtypes.string)
def testMod(self):
four = tensor_shape.Dimension(4)
nine = tensor_shape.Dimension(9)
self.assertEqual(nine % four, 1)
# test both __mod__ and __rmod__.
self.assertEqual(nine % 4, 1)
self.assertEqual(4 % nine, 4)
def testReduce(self):
dim = tensor_shape.Dimension(5)
ctor, args = dim.__reduce__()
self.assertEqual(ctor, tensor_shape.Dimension)
self.assertEqual(args, (5,))
reconstructed = ctor(*args)
self.assertEqual(reconstructed, dim)
def testDiv(self):
# Note: This test is related to GitHub issue 25790.
six = tensor_shape.Dimension(6)
two = tensor_shape.Dimension(2)
message = (r"unsupported operand type\(s\) for /: "
r"'Dimension' and 'Dimension', please use // instead")
with self.assertRaisesRegexp(TypeError, message):
_ = six / two
message = (r"unsupported operand type\(s\) for /: "
r"'Dimension' and 'int', please use // instead")
with self.assertRaisesRegexp(TypeError, message):
_ = six / 2
message = (r"unsupported operand type\(s\) for /: "
r"'int' and 'Dimension', please use // instead")
with self.assertRaisesRegexp(TypeError, message):
_ = 6 / two
class ShapeTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def testUnknownShape(self):
s = tensor_shape.TensorShape(None)
with self.assertRaises(ValueError):
s.assert_is_fully_defined()
self.assertIs(None, s.rank)
with self.assertRaises(ValueError):
len(s)
self.assertFalse(s)
self.assertIs(None, s.dims)
with self.assertRaises(ValueError):
for _ in tensor_shape.TensorShape(None):
pass
def testFullyDefinedShape(self):
s = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
s.assert_is_fully_defined()
self.assertEqual(3, s.rank)
self.assertEqual(3, len(s))
self.assertTrue(s)
s.assert_has_rank(3)
self.assertEqual([tensor_shape.Dimension(3),
tensor_shape.Dimension(4),
tensor_shape.Dimension(7)], s.dims)
self.assertEqual(tensor_shape.Dimension(3), s[0])
self.assertEqual(tensor_shape.Dimension(4), s[1])
self.assertEqual(tensor_shape.Dimension(7), s[2])
self.assertEqual([3, 4, 7], s.as_list())
s.assert_is_compatible_with([3, 4, 7])
s.assert_same_rank([6, 3, 7])
for d1, d2 in zip(s, [3, 4, 7]):
assert tensor_shape.dimension_value(d1) == d2
def testPartiallyDefinedShape(self):
s = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(None), tensor_shape.Dimension(7)])
with self.assertRaises(ValueError):
s.assert_is_fully_defined()
self.assertEqual(3, s.rank)
self.assertEqual(3, len(s))
self.assertTrue(s)
s.assert_has_rank(3)
self.assertEqual(tensor_shape.Dimension(3), s[0])
self.assertEqual(tensor_shape.Dimension(None).value, s.dims[1].value)
self.assertEqual(tensor_shape.Dimension(7), s.dims[2])
s.assert_same_rank([6, 3, 7])
for d1, d2 in zip(s, [3, None, 7]):
assert tensor_shape.dimension_value(d1) == d2
def testMergeFullShapes(self):
self.assertEqual([3, 4, 7],
tensor_shape.TensorShape([3, 4, 7]).merge_with(
tensor_shape.TensorShape([3, 4, 7])).as_list())
with self.assertRaises(ValueError):
tensor_shape.TensorShape([3, 4, 7]).merge_with(
tensor_shape.TensorShape([6, 3, 7]))
def testMergePartialShapes(self):
s1 = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(None), tensor_shape.Dimension(7)])
s2 = tensor_shape.TensorShape([tensor_shape.Dimension(
None), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
self.assertEqual([3, 4, 7], s1.merge_with(s2).as_list())
def testMergeFullAndUnknownShape(self):
self.assertEqual([3, 4, 7],
tensor_shape.TensorShape([3, 4, 7]).merge_with(
tensor_shape.TensorShape(None)).as_list())
def testSlice(self):
known = tensor_shape.TensorShape([0, 1, 2, 3, 4])
self.assertEqual(tensor_shape.Dimension(2), known[2])
tensor_shape.TensorShape([1, 2, 3]).assert_is_compatible_with(known[1:4])
unknown = tensor_shape.TensorShape(None)
self.assertEqual(
tensor_shape.Dimension(None).value,
tensor_shape.dimension_value(unknown[2]))
tensor_shape.TensorShape(
[None, None, None]).assert_is_compatible_with(unknown[1:4])
@parameterized.named_parameters(
("Concatenate", lambda x, y: x.concatenate(y)),
("Add", lambda x, y: x + y),
("RAdd", lambda x, y: y.__radd__(x)))
def testConcatenate(self, concatenate_fn):
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
concatenate_fn(
tensor_shape.TensorShape([1, 2]),
tensor_shape.TensorShape([3, 4])))
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
concatenate_fn(
tensor_shape.TensorShape([1, 2]),
tensor_shape.TensorShape(None)))
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
concatenate_fn(
tensor_shape.TensorShape(None),
tensor_shape.TensorShape([3, 4])))
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
concatenate_fn(
tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None)))
@parameterized.named_parameters(
("Concatenate", lambda x, y: x.concatenate(y)),
("Add", lambda x, y: x + y))
def testConcatenateWithDimension(self, concatenate_fn):
tensor_shape.TensorShape([1, 2, 3]).assert_is_compatible_with(
concatenate_fn(
tensor_shape.TensorShape([1, 2]),
tensor_shape.Dimension(3)))
@parameterized.named_parameters(
("List", [3, 4, 5]),
("Tuple", (3, 4, 5)))
def testAdd_nonTensorShape(self, addend):
two = tensor_shape.TensorShape([2])
result = two + addend
self.assertIsInstance(result, tensor_shape.TensorShape)
tensor_shape.TensorShape([2, 3, 4, 5]).assert_is_compatible_with(result)
@parameterized.named_parameters(
("List", [2, 3, 4]),
("Tuple", (2, 3, 4)))
def testRAdd_nonTensorShape(self, addend):
five = tensor_shape.TensorShape([5])
result = addend + five
self.assertIsInstance(result, tensor_shape.TensorShape)
tensor_shape.TensorShape([2, 3, 4, 5]).assert_is_compatible_with(result)
def _testMostSpecificCompatibleShapeHelper(self, x, y, expected):
mcs = tensor_shape.TensorShape(x).most_specific_compatible_shape(
tensor_shape.TensorShape(y))
mcs_dims = mcs.dims
if expected is None or mcs_dims is None:
self.assertIs(expected, mcs_dims)
else:
self.assertEqual(expected, mcs.as_list())
def testMostSpecificCompatibleShape(self):
self._testMostSpecificCompatibleShapeHelper([1, 2], None, None)
self._testMostSpecificCompatibleShapeHelper(None, [1, 2], None)
self._testMostSpecificCompatibleShapeHelper([1, 2], [1, 2, 3, 4], None)
self._testMostSpecificCompatibleShapeHelper([1, 2, 3, 4], [1, 2], None)
self._testMostSpecificCompatibleShapeHelper([1, 2], [1, 2], [1, 2])
self._testMostSpecificCompatibleShapeHelper([None, 2, 3], [1, 1, 3],
[None, None, 3])
self._testMostSpecificCompatibleShapeHelper([1, 1, 3], [None, 2, 3],
[None, None, 3])
def testHelpers(self):
tensor_shape.TensorShape([]).assert_is_compatible_with(
tensor_shape.scalar())
tensor_shape.TensorShape([37]).assert_is_compatible_with(
tensor_shape.vector(37))
tensor_shape.TensorShape(
[94, 43]).assert_is_compatible_with(tensor_shape.matrix(94, 43))
def testTruedivFails(self):
unknown = tensor_shape.Dimension(None)
self.assertEqual((unknown // unknown).value, None)
with self.assertRaisesRegexp(TypeError, r"unsupported operand type"):
unknown / unknown # pylint: disable=pointless-statement
def testConvertFromProto(self):
def make_tensor_shape_proto(shape):
return tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=x) for x in shape])
proto = make_tensor_shape_proto([])
self.assertEqual(tensor_shape.TensorShape([]),
tensor_shape.TensorShape(proto))
self.assertEqual(tensor_shape.TensorShape([]),
tensor_shape.as_shape(proto))
proto = make_tensor_shape_proto([1, 37, 42])
self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
tensor_shape.TensorShape(proto))
self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
tensor_shape.as_shape(proto))
partial_proto_shape = tensor_shape.as_shape(
make_tensor_shape_proto([-1, 37, 42]))
partial_shape = tensor_shape.TensorShape([None, 37, 42])
self.assertNotEqual(partial_proto_shape, partial_shape)
self.assertEqual(tensor_shape.dimension_value(partial_proto_shape[0]), None)
self.assertEqual(tensor_shape.dimension_value(partial_proto_shape[1]), 37)
self.assertEqual(tensor_shape.dimension_value(partial_proto_shape[2]), 42)
self.assertTrue(partial_shape.is_compatible_with(partial_proto_shape))
def testStr(self):
self.assertEqual("<unknown>", str(tensor_shape.unknown_shape()))
self.assertEqual(
"(None,)",
str(tensor_shape.unknown_shape(rank=1)).replace("?", "None"))
self.assertEqual(
"(None, None)",
str(tensor_shape.unknown_shape(rank=2)).replace("?", "None"))
self.assertEqual(
"(None, None, None)",
str(tensor_shape.unknown_shape(rank=3)).replace("?", "None"))
self.assertEqual(
"(32, None, 1, 9)",
str(tensor_shape.TensorShape([32, None, 1, 9])).replace("?", "None"))
self.assertEqual("()", str(tensor_shape.scalar()))
self.assertEqual("(7,)", str(tensor_shape.vector(7)))
self.assertEqual("(3, 8)", str(tensor_shape.matrix(3, 8)))
self.assertEqual("(4, 5, 2)", str(tensor_shape.TensorShape([4, 5, 2])))
def testAsProto(self):
self.assertTrue(tensor_shape.unknown_shape().as_proto().unknown_rank)
self.assertFalse(
tensor_shape.unknown_shape(rank=3).as_proto().unknown_rank)
self.assertFalse(
tensor_shape.TensorShape([1, 2, 3]).as_proto().unknown_rank)
self.assertFalse(
tensor_shape.TensorShape([1, None, 3]).as_proto().unknown_rank)
def testEquality(self):
s1 = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
s2 = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
s3 = tensor_shape.TensorShape([tensor_shape.Dimension(3),
tensor_shape.Dimension(4), None])
self.assertTrue(s1 == s2)
self.assertFalse(s1 != s2)
self.assertFalse(s1 == "a string")
self.assertTrue(s1 != "a string")
self.assertNotEqual(s1, "347", "Should not equal an ambiguous string.")
self.assertEqual(s1, ["3", "4", "7"])
# Test with an unknown shape in s3
self.assertTrue(s1 != s3)
self.assertFalse(s3 == "a string")
self.assertTrue(s3 != "a string")
# eq and neq are not symmetric for unknown shapes.
unk0 = tensor_shape.unknown_shape()
self.assertFalse(unk0 == s1)
self.assertFalse(s1 == unk0)
with self.assertRaises(ValueError):
unk0 != s1 # pylint: disable=pointless-statement
with self.assertRaises(ValueError):
s1 != unk0 # pylint: disable=pointless-statement
unk1 = tensor_shape.unknown_shape()
self.assertTrue(unk0 == unk1)
self.assertTrue(unk1 == unk0)
with self.assertRaises(ValueError):
unk0 != unk1 # pylint: disable=pointless-statement
with self.assertRaises(ValueError):
unk1 != unk0 # pylint: disable=pointless-statement
def testAsList(self):
with self.assertRaisesRegexp(ValueError,
"not defined on an unknown TensorShape"):
tensor_shape.unknown_shape().as_list()
self.assertAllEqual([None, None], tensor_shape.unknown_shape(2).as_list())
self.assertAllEqual([2, None, 4], tensor_shape.TensorShape(
(2, None, 4)).as_list())
def testReduce(self):
shape = tensor_shape.TensorShape([2, 3])
ctor, args = shape.__reduce__()
self.assertEqual(ctor, tensor_shape.TensorShape)
self.assertEqual(args,
([tensor_shape.Dimension(2),
tensor_shape.Dimension(3)],))
reconstructed = ctor(*args)
self.assertEqual(reconstructed, shape)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/tensor_shape_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class ImportGraphDefTest(test.TestCase):
def _MakeGraphDef(self,
text,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
text = "versions: { producer: %d min_consumer: %d };\n%s" % (producer,
min_consumer,
text)
ret = graph_pb2.GraphDef()
text_format.Merge(text, ret)
return ret
def testBasic(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutputFloatOutput' }
node { name: 'B' op: 'ListOutput'
attr { key: 'T'
value { list { type: DT_INT32 type: DT_FLOAT } } } }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_FLOAT } }
input: 'A:1' input: 'B:1' }
"""),
return_elements=["A", "B", "C", "D"],
name="import")
# Assert that the import process creates distinct tensors.
self.assertNotEqual(a.outputs[0].name, a.outputs[1].name)
self.assertNotEqual(b.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[1].name)
# Assert that the ops are connected according to the GraphDef topology.
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], b.outputs[1])
# Check the types of the returned ops and tensors.
self.assertEqual(a.type, "IntOutputFloatOutput")
self.assertEqual(b.type, "ListOutput")
self.assertEqual(c.type, "ListInput")
self.assertEqual(d.type, "ListInput")
self.assertEqual(a.outputs[0].dtype, dtypes.int32)
self.assertEqual(a.outputs[1].dtype, dtypes.float32)
self.assertEqual(b.outputs[0].dtype, dtypes.int32)
self.assertEqual(b.outputs[1].dtype, dtypes.float32)
# Check the names of the returned ops.
self.assertEqual(a.name, "import/A")
self.assertEqual(b.name, "import/B")
self.assertEqual(c.name, "import/C")
self.assertEqual(d.name, "import/D")
# Check that the op_def is still available.
self.assertNotEqual(None, a.op_def)
def testMultipleImport(self):
graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntInput' input: 'A:0' }
""")
with ops.Graph().as_default():
# Initial import
a, b = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a.name, "A")
self.assertEqual(b.name, "B")
self.assertEqual(list(b.inputs), [a.outputs[0]])
# Repeat the same import
a1, b1 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a1.name, "A_1")
self.assertEqual(b1.name, "B_1")
self.assertEqual(list(b1.inputs), [a1.outputs[0]])
# Repeat the same import again
a2, b2 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a2.name, "A_2")
self.assertEqual(b2.name, "B_2")
self.assertEqual(list(b2.inputs), [a2.outputs[0]])
# Import with an already-used name
a3, b3 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="A")
self.assertEqual(a3.name, "A_3/A")
self.assertEqual(b3.name, "A_3/B")
self.assertEqual(list(b3.inputs), [a3.outputs[0]])
# Import with an already-used name but with a '/' to indicate an
# "absolute" name scope (see the Graph.name_scope docstring).
a_a, a_b = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="A/")
self.assertEqual(a_a.name, "A/A")
self.assertEqual(a_b.name, "A/B")
self.assertEqual(list(a_b.inputs), [a_a.outputs[0]])
# Repeat the same import.
a_a1, a_b1 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="A/")
self.assertEqual(a_a1.name, "A/A_1")
self.assertEqual(a_b1.name, "A/B_1")
self.assertEqual(list(a_b1.inputs), [a_a1.outputs[0]])
# Import with existing de-duped node names
a1_1, b1_1 = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A_1' op: 'IntOutput' }
node { name: 'B_1' op: 'IntInput' input: 'A_1:0' }
"""),
return_elements=["A_1", "B_1"],
name="")
self.assertEqual(a1_1.name, "A_1_1")
self.assertEqual(b1_1.name, "B_1_1")
self.assertEqual(list(b1_1.inputs), [a1_1.outputs[0]])
# Create a name scope and then import node with same name
with ops.name_scope("foo"):
constant_op.constant(1)
foo, = importer.import_graph_def(
self._MakeGraphDef("node { name: 'foo' op: 'IntOutput' }"),
return_elements=["foo"],
name="")
self.assertEqual(foo.name, "foo_1")
# Imported node name can't conflict with intermediate name scope (but can
# conflict with outer scope and full name scope)
with ops.name_scope("outer"):
with ops.name_scope("inner"):
c = constant_op.constant(1, name="c")
self.assertEqual(c.op.name, "outer/inner/c")
outer, inner, new_c, outer_inner, outer_inner_c = (
importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'outer' op: 'IntOutput' }"
"node { name: 'inner' op: 'IntOutput' }"
"node { name: 'c' op: 'IntOutput' }"
"node { name: 'outer/inner' op: 'IntOutput' }"
"node { name: 'outer/inner/c' op: 'IntOutput' }"),
return_elements=["outer", "inner", "c", "outer/inner",
"outer/inner/c"],
name=""))
self.assertEqual(outer.name, "outer_1")
self.assertEqual(inner.name, "inner")
self.assertEqual(new_c.name, "c")
self.assertEqual(outer_inner.name, "outer/inner_1")
self.assertEqual(outer_inner_c.name, "outer/inner/c_1")
def testEmptyNameScope(self):
with ops.Graph().as_default():
# Create name scope but don't create any ops with it
with ops.name_scope("foo"):
pass
# Import graph def that uses name scope name
op, = importer.import_graph_def(
self._MakeGraphDef("node { name: 'foo' op: 'IntOutput' }"),
return_elements=["foo"],
name="")
self.assertEqual(op.name, "foo")
def testInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'TwoIntOutputs' }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={"A:0": feed_a_0,
"B:1": feed_b_1},
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapBytes(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'TwoIntOutputs' }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={b"A:0": feed_a_0,
b"B:1": feed_b_1},
return_elements=[b"A", b"B", b"C", b"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapUnicode(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'TwoIntOutputs' }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={u"A:0": feed_a_0,
u"B:1": feed_b_1},
return_elements=[u"A", u"B", u"C", u"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testImplicitZerothOutput(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'IntInput' input: 'A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.inputs[0], a.outputs[0])
def testInputMapImplicitZerothOutput(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
b, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'IntInput' input: 'A:0' }
"""),
input_map={"A": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testWithControlDependency(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' input: '^A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.control_inputs, [a])
def testWithRefs(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'RefOutput' }
node { name: 'B' op: 'IntOutput' }
node { name: 'C' op: 'TwoIntInputs' input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'RefInputIntInput' input: 'A:0' input: 'B:0' }
"""),
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[0])
self.assertEqual(d.inputs[1], b.outputs[0])
self.assertEqual(a.outputs[0].dtype, dtypes.int32_ref)
self.assertEqual(c._input_types, [dtypes.int32, dtypes.int32])
self.assertEqual(c.outputs, [])
self.assertEqual(d._input_types, [dtypes.int32_ref, dtypes.int32])
self.assertEqual(d.outputs, [])
def testResources(self):
# Produce GraphDef containing a ops producing and consuming resources.
graph = ops.Graph()
with graph.as_default():
var = resource_variable_ops.ResourceVariable(1.0)
var_assign = var.assign(2.0)
# Use an op that requires handle shape to be set.
var_shape = resource_variable_ops.variable_shape(var.handle)
init = variables.global_variables_initializer()
graph_def = graph.as_graph_def()
# Import the GraphDef.
with ops.Graph().as_default():
# pylint: disable=unused-variable
imported_var, imported_assign, imported_shape, imported_init = (
importer.import_graph_def(
graph_def,
return_elements=[var.name, var_assign.name, var_shape.name,
init.name]))
# Make sure the handle shape is set on the imported variable.
new_var_shape = resource_variable_ops.variable_shape(imported_var)
# pylint: enable=unused-variable
# Run the imported graph.
# TODO(b/76173421): make this work (currently DCHECKS)
# with self.cached_session() as sess:
# self.evaluate(imported_init)
# self.assertEqual(self.evaluate(imported_var), 1.0)
# self.assertEqual(self.evaluate(imported_assign), 2.0)
# self.assertEqual(list(self.evaluate(imported_shape)), [])
# self.assertEqual(list(self.evaluate(new_var_shape)), [])
def testWhileLoop(self):
# Produce GraphDef containing while loop.
graph = ops.Graph()
with graph.as_default():
r = control_flow_ops.while_loop(lambda i: i < 10, lambda i: i + 1, [0])
# Add an op that consumes the while loop output.
math_ops.add(r, 1)
graph_def = graph.as_graph_def()
# Import the GraphDef and make sure it runs.
with ops.Graph().as_default():
imported_r, = importer.import_graph_def(graph_def,
return_elements=[r.name])
self.assertEqual(imported_r.name, "import/" + r.name)
with self.cached_session() as sess:
self.assertEqual(self.evaluate(imported_r), 10)
def testImportWhileLoopInCond(self):
# Produce GraphDef containing while loop.
graph = ops.Graph()
with graph.as_default():
r = control_flow_ops.while_loop(lambda i: i < 10, lambda i: i + 1, [0])
graph_def = graph.as_graph_def()
# Import the GraphDef inside a cond and make sure it runs.
with ops.Graph().as_default():
def ImportFn():
return importer.import_graph_def(graph_def, return_elements=[r.name])[0]
pred = array_ops.placeholder(dtypes.bool)
out = control_flow_ops.cond(pred, ImportFn,
lambda: constant_op.constant(1))
with self.cached_session() as sess:
self.assertEqual(sess.run(out, {pred: True}), 10)
self.assertEqual(sess.run(out, {pred: False}), 1)
def testImportWhileLoopInWhileLoop(self):
self.skipTest("b/111757448")
# Produce GraphDef containing while loop.
graph = ops.Graph()
with graph.as_default():
r = control_flow_ops.while_loop(lambda i: i < 10, lambda i: i + 1, [0])
graph_def = graph.as_graph_def()
# Import the GraphDef inside another loop and make sure it runs.
with ops.Graph().as_default():
def ImportFn(_):
return importer.import_graph_def(graph_def, return_elements=[r.name])[0]
out = control_flow_ops.while_loop(
lambda i: i < 2, ImportFn, [0],
shape_invariants=[tensor_shape.TensorShape(None)])
with self.cached_session() as sess:
self.assertEqual(self.evaluate(out), 10)
def testTypeMismatchInGraphDef(self):
# TODO(skyewm): improve error message
error_msg = ("Input 0 of node import/B was passed int32 from import/A:0 "
"incompatible with expected float.")
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'FloatInput' input: 'A:0' }
"""))
def testShapeWhitelist(self):
# Barrier's shape is an output vector of 2, but the
# graph says it's a scalar. This is currently whitelisted.
with ops.Graph().as_default():
_ = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Barrier'
attr { key: '_output_shapes'
value { list { shape { } } } }
attr { key: 'component_types'
value { list { type: DT_FLOAT } } } }
"""),
return_elements=["A"],
name="import")
def testShapeWhitelistViolation(self):
# L2 loss produces a scalar shape, but the graph
# has the wrong shape, so raise an error.
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
_ = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'FloatOutput' }
node { name: 'B' op: 'L2Loss'
input: 'A:0'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: '_output_shapes'
value { list { shape { dim { size: 43 } } } } } }
"""),
return_elements=["B"],
name="import")
self.assertTrue(
"Shapes () and (43,) are not compatible" in str(e.exception))
def testInvalidSignatureTooManyInputsInGraphDef(self):
with ops.Graph().as_default():
# TODO(skyewm): improve error message
with self.assertRaisesRegexp(
ValueError,
"NodeDef expected inputs '' do not match 1 inputs specified"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'None' input: 'A:0' }
"""))
def testInvalidSignatureNotEnoughInputsInGraphDef(self):
with ops.Graph().as_default():
# TODO(skyewm): improve error message
with self.assertRaisesRegexp(
ValueError,
"NodeDef expected inputs 'int32, float' do not match 1 inputs "
"specified"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntInputFloatInput' input: 'A:0' }
"""))
def testMissingInputOpInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError,
"Node 'B': Unknown input node 'A:0'"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'FloatInput' input: 'A:0' }
"""))
def testMissingInputOpInGraphDefButAppearsInInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(5.0)
b, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'FloatInput' input: 'A:0' }
"""),
input_map={"A:0": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testMissingInputTensorInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError,
"Node 'B': Connecting to invalid output 1 of source node A "
"which has 1 outputs"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'FloatOutput' }
node { name: 'B' op: 'FloatInput' input: 'A:1' }
"""))
def testMissingControlInputInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError,
r"Node 'B': Unknown input node '\^A'"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: '^A' }
"""))
def testInvalidTensorNameOutputIndexInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError,
"Node 'B': Unknown input node 'A:B'"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B' }
"""))
def testInvalidTensorNameInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError,
"Node 'B': Unknown input node 'A:B:0'"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B:0' }
"""))
def testMissingReturnOperation(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError, "Requested return node 'B' not found in graph def"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["B"])
def testMissingReturnTensor(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError,
r"Invalid return output 1 of node 'A', which has 1 output\(s\)"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
return_elements=["A:1"])
with self.assertRaisesRegexp(
ValueError, "Requested return tensor 'B:0' not found in graph def"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
return_elements=["B:0"])
with self.assertRaisesRegexp(ValueError,
"Cannot convert 'A:B:0' to a tensor name."):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
return_elements=["A:B:0"])
def testMissingInputMap(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError,
r"Attempted to map inputs that were not found in graph_def: \[B:0\]"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
input_map={"B:0": constant_op.constant(5.0)})
def testInputMapUnusedAsInput(self):
with ops.Graph().as_default():
# Mapping an unused node output should succeed.
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
input_map={"A:0": constant_op.constant(5.0)})
# Mapping a non-existent output of an existing node should fail.
with self.assertRaisesRegexp(
ValueError,
r"Attempted to map inputs that were not found in graph_def: \[A:2\]"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
input_map={"A:2": constant_op.constant(5.0)})
def testInputMapTypeMismatch(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError, "Input 0 of node import/B was passed float from Const:0 "
"incompatible with expected int32."):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntInput' input: 'A:0' }
"""),
input_map={"A:0": constant_op.constant(5.0)})
def testNoReturns(self):
with ops.Graph().as_default() as g:
ret = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""))
self.assertEqual(ret, None)
a = g.get_operation_by_name("import/A")
self.assertEqual(a.type, "None")
def testOverrideNamePrefix(self):
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["A"],
name="imported_graph")
self.assertEqual(a.name, "imported_graph/A")
def testDefaultNamePrefix(self):
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["A"],
name=None)
self.assertEqual(a.name, "import/A")
def testNamePrefixColocationAttrs(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
b, = importer.import_graph_def(
original_graph_def, return_elements=["B"], name="imported_graph")
self.assertTrue("_class" in b.node_def.attr)
self.assertProtoEquals(
"list { s: 'loc:@imported_graph/A' }",
b.node_def.attr["_class"])
def testColocationAndDevice(self):
# A and B are colocated, device set on A.
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' device: '/device:CPU:0' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a.device, "/device:CPU:0")
self.assertEqual(b.device, "/device:CPU:0")
self.assertEqual(a.colocation_groups(), [b"loc:@A"])
self.assertEqual(b.colocation_groups(), [b"loc:@A"])
# A and B are colocated, device set on B.
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }
node { name: 'B' op: 'None' device: '/device:CPU:0' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="")
# TODO(skyewm): this behavior seems inconsistent with the above. Why is
# B's device ignored?
self.assertEqual(a.device, "")
self.assertEqual(b.device, "")
self.assertEqual(a.colocation_groups(), [b"loc:@A"])
self.assertEqual(b.colocation_groups(), [b"loc:@A"])
def testColocationWithDeviceFn(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
# A device function that places "A" on one device and "B" on
# another device. Because B is colocated with A, we test that B's
# device function is overridden by A.
def CustomDeviceFn(op):
if "A" in op.name:
return "/device:A:0"
else:
return "/device:B:0"
with ops.Graph().as_default():
with ops.device(CustomDeviceFn):
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="imported_graph")
self.assertEqual(a.device, "/device:A:0")
self.assertEqual(b.device, "/device:A:0")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/A"])
# Test a scenario where 'A' doesn't get a device; 'A' should not have a
# device, but during runtime will get colocated with 'B' because of the
# colocation attribute. B's device function is still overridden by A.
def BDeviceFn(op):
if "B" in op.name:
return "/device:B:0"
return ""
with ops.Graph().as_default():
with ops.device(BDeviceFn):
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="imported_graph")
self.assertEqual(a.device, "")
self.assertEqual(b.device, "")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/A"])
# Only A gets a device, so B inherits it implicitly.
def ADeviceFn(op):
if "A" in op.name:
return "/device:A:0"
return ""
with ops.Graph().as_default():
with ops.device(ADeviceFn):
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="imported_graph")
self.assertEqual(a.device, "/device:A:0")
self.assertEqual(b.device, "/device:A:0")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/A"])
def testMultipleColocationWithDeviceFn(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None'}
node { name: 'B' op: 'None'}
node { name: 'C' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' s: 'loc:@B' } }
} }""")
# A device function that places "B" on a device, and "A" is empty.
#
# B and C should contain "/device:B". A will not right now. But
# because of the colocation property, at runtime it would be
# placed with B and C.
def CustomDeviceFn(op):
if "B" in op.name:
return "/device:B:0"
return ""
with ops.Graph().as_default():
with ops.device(CustomDeviceFn):
a, b, c = importer.import_graph_def(original_graph_def,
return_elements=["A", "B", "C"],
name="imported_graph")
self.assertEqual(a.device, "")
self.assertEqual(b.device, "/device:B:0")
self.assertEqual(c.device, "/device:B:0")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/B"])
self.assertEqual(c.colocation_groups(),
[b"loc:@imported_graph/A", b"loc:@imported_graph/B"])
def testNamePrefixColocationAttrsMultipleImport(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
a, b = importer.import_graph_def(
original_graph_def, return_elements=["A", "B"], name="")
a_1, b_1 = importer.import_graph_def(
original_graph_def, return_elements=["A", "B"], name="")
self.assertEqual(a.name, "A")
self.assertEqual(b.name, "B")
self.assertEqual(b.colocation_groups(), [b"loc:@A"])
self.assertEqual(a_1.name, "A_1")
self.assertEqual(b_1.name, "B_1")
self.assertEqual(b_1.colocation_groups(), [b"loc:@A_1"])
def testNamePrefixColocationAttrsNotFound(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError, "Node 'B' expects to be colocated with unknown node 'A'"):
importer.import_graph_def(
original_graph_def, return_elements=["B"], name="imported_graph")
def testEmptyGraph(self):
with ops.Graph().as_default() as g:
init_version = g.version
importer.import_graph_def(self._MakeGraphDef(""))
self.assertEqual(init_version, g.version)
def testInvalidInputForGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as e:
importer.import_graph_def("")
self.assertEqual("graph_def must be a GraphDef proto.", str(e.exception))
def testInvalidInputForInputMap(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as e:
importer.import_graph_def(
self._MakeGraphDef(""), input_map=[constant_op.constant(5.0)])
self.assertEqual("input_map must be a dictionary mapping strings to "
"Tensor objects.", str(e.exception))
graph_def = self._MakeGraphDef("""
node { name: 'a' op: 'Placeholder'
attr { key: 'dtype' value { type: DT_FLOAT } }}
node { name: 'id' op: 'Identity' input: 'a:0'
attr { key: 'T' value { type: DT_FLOAT } }}""")
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
graph_def,
input_map={"a:0": variables.Variable(5.0)},
name="")
self.assertStartsWith(str(e.exception),
"tf.import_graph_def() requires a non-empty `name` "
"if `input_map` contains non-Tensor values.")
with ops.Graph().as_default():
t, = importer.import_graph_def(
graph_def,
input_map={"a:0": constant_op.constant(5.0)},
name="",
return_elements=["id:0"])
with self.cached_session():
self.assertEqual(5.0, self.evaluate(t))
def testInvalidInputForReturnOperations(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
TypeError, "return_elements must be a list of strings."):
importer.import_graph_def(self._MakeGraphDef(""), return_elements=[7])
with self.assertRaisesRegexp(ValueError,
"Cannot convert 'a:b:c' to a tensor name."):
importer.import_graph_def(
self._MakeGraphDef(""), return_elements=["a:b:c"])
def testDuplicateOperationNames(self):
with self.assertRaisesRegexp(ValueError, "Node 'A' is not unique"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntOutput' }
node { name: 'A' op: 'IntOutput' }
"""))
def testWithExtensionAndAttr(self):
with ops.Graph().as_default() as g:
c = constant_op.constant(5.0, dtype=dtypes.float32, name="c")
array_ops.stack([c, c], name="pack")
gdef = g.as_graph_def()
with self.cached_session():
pack, = importer.import_graph_def(gdef, return_elements=["pack"])
self.assertAllEqual(pack.outputs[0].eval(), [5.0, 5.0])
def testWithDevice(self):
with ops.Graph().as_default() as g:
# No device.
a = constant_op.constant(3.0, name="a")
with ops.device("/cpu:0"):
b = constant_op.constant(4.0, name="b")
with ops.device("/job:worker"):
c = constant_op.constant(5.0, name="c")
gdef = g.as_graph_def()
with ops.Graph().as_default():
a2, b2, c2 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual(a.device, a2.device)
self.assertEqual(b.device, b2.device)
self.assertEqual(c.device, c2.device)
with ops.Graph().as_default():
with ops.device(device.merge_device("/task:0")):
a3, b3, c3 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/task:0", a3.device)
self.assertEqual("/task:0/device:CPU:0", b3.device) # canonicalized.
self.assertEqual(c.device + "/task:0", c3.device)
with ops.Graph().as_default():
with ops.device(device.merge_device("/job:ps")):
a4, b4, c4 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/job:ps", a4.device)
self.assertEqual("/job:ps/device:CPU:0", b4.device) # canonicalized.
self.assertEqual(c.device, c4.device) # worker overrides ps.
with ops.Graph().as_default():
with ops.device(device.merge_device("/device:GPU:0")):
a5, b5, c5 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/device:GPU:0", a5.device)
self.assertEqual("/device:CPU:0", b5.device) # cpu overrides gpu.
self.assertEqual(c.device + "/device:GPU:0", c5.device)
def testWithDeviceFunctionDependingOnInputs(self):
with ops.Graph().as_default() as g:
with ops.device("/job:ps"):
v1 = constant_op.constant(1.0)
v2 = constant_op.constant(1.0)
_ = v1 + v2
_ = v1 - v2
_ = array_ops.identity(v1)
gdef = g.as_graph_def()
# We'll use the following device function to observe ops with two inputs.
ops_with_two_inputs = []
def InputCounter(op):
if len(op.inputs) == 2:
ops_with_two_inputs.append(op)
return ""
with ops.Graph().as_default() as g:
with ops.device(InputCounter):
importer.import_graph_def(gdef)
# We expect to see the add and subtract, but not identity.
self.assertEqual(2, len(ops_with_two_inputs))
def testGradient(self):
with ops.Graph().as_default() as g:
inputs = array_ops.placeholder(
dtypes.float32, shape=[None, 100], name="input")
weights = array_ops.placeholder(
dtypes.float32, shape=[100, 10], name="weights")
biases = array_ops.placeholder(dtypes.float32, shape=[10], name="biases")
activations = nn_ops.relu(
math_ops.matmul(inputs, weights) + biases, name="activations")
loss = math_ops.reduce_mean(activations, name="loss")
gdef = g.as_graph_def()
with ops.Graph().as_default() as g:
input_placeholder = array_ops.placeholder(dtypes.float32, shape=[32, 100])
weights_var = variables.Variable(
random_ops.truncated_normal([100, 10]), name="weights")
biases_var = variables.Variable(array_ops.zeros([10]), name="biases")
activations, loss = importer.import_graph_def(
gdef,
input_map={
"input:0": input_placeholder,
"weights:0": weights_var,
"biases:0": biases_var
},
return_elements=["activations:0", "loss:0"])
self.assertEqual([32, 10], activations.get_shape())
self.assertEqual([], loss.get_shape())
weights_grad, biases_grad = gradients_impl.gradients(
loss, [weights_var, biases_var])
self.assertEqual([100, 10], weights_grad.get_shape())
self.assertEqual([10], biases_grad.get_shape())
def testLargeGraph(self):
with self.cached_session():
# The default message byte limit is 64M. Ours is 2G with a warning at 512.
# Adding a 130M entries float32 tensor should exceed the warning, but not
# the hard limit.
input_shape = [130, 1000, 1000]
tensor_input = np.ones(input_shape, dtype=np.float32)
t = constant_op.constant(tensor_input, shape=input_shape)
g = array_ops.identity(t)
self.evaluate(g)
def testVersion(self):
v0 = versions.GRAPH_DEF_VERSION_MIN_CONSUMER
v2 = versions.GRAPH_DEF_VERSION
v1 = (v0 + v2) // 2
for producer in v0, v1, v2:
for min_consumer in v0, v1, v2:
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'TwoIntOutputs' }",
producer=producer,
min_consumer=min_consumer),
return_elements=["A"])
self.assertEqual(a.graph.graph_def_versions.producer, producer)
self.assertEqual(a.graph.graph_def_versions.min_consumer,
min_consumer)
def testVersionLow(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
Exception,
r"GraphDef producer version -1 below min producer %d supported "
r"by TensorFlow \S+\. Please regenerate your graph.$" %
versions.GRAPH_DEF_VERSION_MIN_PRODUCER):
importer.import_graph_def(self._MakeGraphDef("", producer=-1))
def testVersionHigh(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError,
r"GraphDef min consumer version %d above current version %d "
r"for TensorFlow \S+\. Please upgrade TensorFlow\.$" %
(1 << 30, versions.GRAPH_DEF_VERSION)):
importer.import_graph_def(self._MakeGraphDef("", min_consumer=1 << 30))
def testVersionAppliesToOpConstruction(self):
"""These tests rely on shape fns in test_ops.cc."""
with ops.Graph().as_default():
importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'RequiresOlderGraphVersion' }",
producer=versions.GRAPH_DEF_VERSION - 1),
return_elements=["A"])
with ops.Graph().as_default():
with self.assertRaisesWithPredicateMatch(ValueError,
"Wrong graph version.*"):
importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'RequiresOlderGraphVersion' }",
producer=versions.GRAPH_DEF_VERSION),
return_elements=["A"])
def testDefaultAttrsAdded(self):
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithDefaultAttr' }
"""),
return_elements=["A"])
self.assertEqual(123.0, a[0].get_attr("default_float"))
def testDefaultAttrsRemoved(self):
producer_op_list = op_def_pb2.OpList()
text_format.Merge("""
op {
name: 'OpWithFutureDefaultAttr'
attr { name: 'default_int' type: 'int' default_value { i: 456 } }
}
""", producer_op_list)
# Attr only in producer_op_list with default value gets removed.
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 456 } } }
"""),
return_elements=["A"],
producer_op_list=producer_op_list)
with self.assertRaisesRegexp(
ValueError, "Operation 'import/A' has no attr named 'default_int'."):
a[0].get_attr("default_int")
def testFunctions(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype, dtype)
def Grad(x, y, dout1, dout2): # pylint: disable=unused-argument
# Return the inputs for simplicity of testing. The correct return value
# would be (dout1 + dout2, dout1 - dout2)
return x, y
@function.Defun(dtype, dtype, grad_func=Grad)
def FuncWithGrad(x, y):
return x + y, x - y
@function.Defun(dtypes.int32)
def ExternalTensorFunc(x):
# c must be defined in the containing graph
return x + c
@function.Defun(dtypes.int32, dtypes.int32)
def OuterFunc(x, y):
@function.Defun(dtypes.int32)
def InnerFunc(x):
return x + x
return InnerFunc(x) + y
# Create graph with function calls and export to GraphDef
with ops.Graph().as_default() as g1:
p1 = array_ops.placeholder(dtype, name="p1")
p2 = array_ops.placeholder(dtype, name="p2")
# pylint: disable=unexpected-keyword-arg
a, b = FuncWithGrad(p1, p2, name="f")
c = constant_op.constant(10, dtype=dtypes.int32)
ExternalTensorFunc(1, name="external")
OuterFunc(10, 1, name="outer")
# pylint: enable=unexpected-keyword-arg
gdef = g1.as_graph_def()
# Import GraphDef into new graph, add imported gradients, and test that
# imported functions can be run
with ops.Graph().as_default() as g2:
p1, p2, a, b = importer.import_graph_def(
gdef, return_elements=["p1:0", "p2:0", "f:0", "f:1"], name="")
grad = gradients_impl.gradients([a], [p1, p2])
with self.session(graph=g2) as sess:
feed_dict = {p1: 1, p2: 2}
a_val, b_val, grad_val = sess.run([a, b, grad], feed_dict=feed_dict)
self.assertEqual(a_val, 3.0)
self.assertEqual(b_val, -1.0)
# Grad function returns inputs values for testing
self.assertEqual(grad_val, [1.0, 2.0])
self.assertEqual(sess.run("external:0"), 11)
self.assertEqual(sess.run("outer:0"), 21)
# Export the new graph and reimport to test that imported functions can be
# successfully exported/imported again
gdef = g2.as_graph_def()
with ops.Graph().as_default() as g3:
p1, p2, a, b = importer.import_graph_def(
gdef, return_elements=["p1:0", "p2:0", "f:0", "f:1"], name="")
# Create new gradient functions (in additional to the imported gradient
# functions created in g2).
grad = gradients_impl.gradients([a], [p1, p2])
with self.session(graph=g3) as sess:
feed_dict = {p1: 1, p2: 2}
a_val, b_val, grad_val = sess.run([a, b, grad], feed_dict=feed_dict)
self.assertEqual(a_val, 3.0)
self.assertEqual(b_val, -1.0)
self.assertEqual(grad_val, [1.0, 2.0])
self.assertEqual(sess.run("external:0"), 11)
self.assertEqual(sess.run("outer:0"), 21)
def testImportInsideDefun(self):
g = ops.Graph()
with g.as_default():
@function.Defun()
def Add2(x, y):
return math_ops.add(x, y)
x = constant_op.constant(3.0, dtype=dtypes.float32)
y = constant_op.constant(-5.0, dtype=dtypes.float32)
z = Add2(x, y, name="z") # pylint: disable=unexpected-keyword-arg
gdef = g.as_graph_def()
@function.Defun()
def TestFunc():
return importer.import_graph_def(gdef, return_elements=["z:0"])[0]
z = TestFunc()
with self.cached_session():
z_val = self.evaluate(z)
self.assertEqual(z_val, -2.0)
def testImportGraphWithFunctionTwice(self):
g = ops.Graph()
with g.as_default():
@function.Defun()
def Add2(x, y):
return math_ops.add(x, y)
x = array_ops.placeholder(dtype=dtypes.float32, name="x")
y = array_ops.placeholder(dtype=dtypes.float32, name="y")
_ = Add2(x, y, name="z") # pylint: disable=unexpected-keyword-arg
gdef = g.as_graph_def()
x = random_ops.random_uniform(dtype=dtypes.float32, shape=())
y = random_ops.random_uniform(dtype=dtypes.float32, shape=())
input_map = {"x:0": x, "y:0": y}
with ops.name_scope("first"):
z1 = importer.import_graph_def(gdef, return_elements=["z:0"],
input_map=input_map)[0]
with ops.name_scope("second"):
z2 = importer.import_graph_def(gdef, return_elements=["z:0"],
input_map=input_map)[0]
with self.cached_session() as sess:
z1_val, z2_val = sess.run((z1, z2))
self.assertAllEqual(z1_val, z2_val)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/framework/importer_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensor_spec."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class TensorSpecTest(test_util.TensorFlowTestCase):
def testDefaultDType(self):
desc = tensor_spec.TensorSpec([1])
self.assertEqual(desc.dtype, dtypes.float32)
def testAcceptsNumpyDType(self):
desc = tensor_spec.TensorSpec([1], np.float32)
self.assertEqual(desc.dtype, dtypes.float32)
def testAcceptsTensorShape(self):
desc = tensor_spec.TensorSpec(tensor_shape.TensorShape([1]), dtypes.float32)
self.assertEqual(desc.shape, tensor_shape.TensorShape([1]))
def testUnknownShape(self):
desc = tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)
self.assertEqual(desc.shape, tensor_shape.TensorShape(None))
@test_util.run_deprecated_v1
def testShapeCompatibility(self):
unknown = array_ops.placeholder(dtypes.int64)
partial = array_ops.placeholder(dtypes.int64, shape=[None, 1])
full = array_ops.placeholder(dtypes.int64, shape=[2, 3])
rank3 = array_ops.placeholder(dtypes.int64, shape=[4, 5, 6])
desc_unknown = tensor_spec.TensorSpec(None, dtypes.int64)
self.assertTrue(desc_unknown.is_compatible_with(unknown))
self.assertTrue(desc_unknown.is_compatible_with(partial))
self.assertTrue(desc_unknown.is_compatible_with(full))
self.assertTrue(desc_unknown.is_compatible_with(rank3))
desc_partial = tensor_spec.TensorSpec([2, None], dtypes.int64)
self.assertTrue(desc_partial.is_compatible_with(unknown))
self.assertTrue(desc_partial.is_compatible_with(partial))
self.assertTrue(desc_partial.is_compatible_with(full))
self.assertFalse(desc_partial.is_compatible_with(rank3))
desc_full = tensor_spec.TensorSpec([2, 3], dtypes.int64)
self.assertTrue(desc_full.is_compatible_with(unknown))
self.assertFalse(desc_full.is_compatible_with(partial))
self.assertTrue(desc_full.is_compatible_with(full))
self.assertFalse(desc_full.is_compatible_with(rank3))
desc_rank3 = tensor_spec.TensorSpec([4, 5, 6], dtypes.int64)
self.assertTrue(desc_rank3.is_compatible_with(unknown))
self.assertFalse(desc_rank3.is_compatible_with(partial))
self.assertFalse(desc_rank3.is_compatible_with(full))
self.assertTrue(desc_rank3.is_compatible_with(rank3))
@test_util.run_deprecated_v1
def testTypeCompatibility(self):
floats = array_ops.placeholder(dtypes.float32, shape=[10, 10])
ints = array_ops.placeholder(dtypes.int32, shape=[10, 10])
desc = tensor_spec.TensorSpec(shape=(10, 10), dtype=dtypes.float32)
self.assertTrue(desc.is_compatible_with(floats))
self.assertFalse(desc.is_compatible_with(ints))
def testName(self):
desc = tensor_spec.TensorSpec([1], dtypes.float32, name="beep")
self.assertEqual(desc.name, "beep")
def testRepr(self):
desc1 = tensor_spec.TensorSpec([1], dtypes.float32, name="beep")
self.assertEqual(
repr(desc1),
"TensorSpec(shape=(1,), dtype=tf.float32, name='beep')")
desc2 = tensor_spec.TensorSpec([1, None], dtypes.int32)
if desc2.shape._v2_behavior:
self.assertEqual(
repr(desc2),
"TensorSpec(shape=(1, None), dtype=tf.int32, name=None)")
else:
self.assertEqual(
repr(desc2),
"TensorSpec(shape=(1, ?), dtype=tf.int32, name=None)")
def testFromTensorSpec(self):
spec_1 = tensor_spec.TensorSpec((1, 2), dtypes.int32)
spec_2 = tensor_spec.TensorSpec.from_spec(spec_1)
self.assertEqual(spec_1, spec_2)
@test_util.run_deprecated_v1
def testFromTensor(self):
zero = constant_op.constant(0)
spec = tensor_spec.TensorSpec.from_tensor(zero)
self.assertEqual(spec.dtype, dtypes.int32)
self.assertEqual(spec.shape, [])
self.assertEqual(spec.name, "Const")
@test_util.run_deprecated_v1
def testFromPlaceholder(self):
unknown = array_ops.placeholder(dtypes.int64, name="unknown")
partial = array_ops.placeholder(dtypes.float32,
shape=[None, 1],
name="partial")
spec_1 = tensor_spec.TensorSpec.from_tensor(unknown)
self.assertEqual(spec_1.dtype, dtypes.int64)
self.assertEqual(spec_1.shape, None)
self.assertEqual(spec_1.name, "unknown")
spec_2 = tensor_spec.TensorSpec.from_tensor(partial)
self.assertEqual(spec_2.dtype, dtypes.float32)
self.assertEqual(spec_2.shape.as_list(), [None, 1])
self.assertEqual(spec_2.name, "partial")
def testFromBoundedTensorSpec(self):
bounded_spec = tensor_spec.BoundedTensorSpec((1, 2), dtypes.int32, 0, 1)
spec = tensor_spec.TensorSpec.from_spec(bounded_spec)
self.assertEqual(bounded_spec.shape, spec.shape)
self.assertEqual(bounded_spec.dtype, spec.dtype)
self.assertEqual(bounded_spec.name, spec.name)
def testSerialization(self):
desc = tensor_spec.TensorSpec([1, 5], dtypes.float32, "test")
self.assertEqual(pickle.loads(pickle.dumps(desc)), desc)
class BoundedTensorSpecTest(test_util.TensorFlowTestCase):
def testInvalidMinimum(self):
with self.assertRaisesRegexp(ValueError, "not compatible"):
tensor_spec.BoundedTensorSpec((3, 5), dtypes.uint8, (0, 0, 0), (1, 1))
def testInvalidMaximum(self):
with self.assertRaisesRegexp(ValueError, "not compatible"):
tensor_spec.BoundedTensorSpec((3, 5), dtypes.uint8, 0, (1, 1, 1))
def testMinimumMaximumAttributes(self):
spec = tensor_spec.BoundedTensorSpec(
(1, 2, 3), dtypes.float32, 0, (5, 5, 5))
self.assertEqual(type(spec.minimum), np.ndarray)
self.assertEqual(type(spec.maximum), np.ndarray)
self.assertAllEqual(spec.minimum, np.array(0, dtype=np.float32))
self.assertAllEqual(spec.maximum, np.array([5, 5, 5], dtype=np.float32))
def testNotWriteableNP(self):
spec = tensor_spec.BoundedTensorSpec(
(1, 2, 3), dtypes.float32, 0, (5, 5, 5))
with self.assertRaisesRegexp(ValueError, "read-only"):
spec.minimum[0] = -1
with self.assertRaisesRegexp(ValueError, "read-only"):
spec.maximum[0] = 100
def testReuseSpec(self):
spec_1 = tensor_spec.BoundedTensorSpec((1, 2), dtypes.int32,
minimum=0, maximum=1)
spec_2 = tensor_spec.BoundedTensorSpec(
spec_1.shape, spec_1.dtype, spec_1.minimum, spec_1.maximum)
self.assertEqual(spec_1, spec_2)
def testScalarBounds(self):
spec = tensor_spec.BoundedTensorSpec(
(), dtypes.float32, minimum=0.0, maximum=1.0)
self.assertIsInstance(spec.minimum, np.ndarray)
self.assertIsInstance(spec.maximum, np.ndarray)
# Sanity check that numpy compares correctly to a scalar for an empty shape.
self.assertEqual(0.0, spec.minimum)
self.assertEqual(1.0, spec.maximum)
# Check that the spec doesn't fail its own input validation.
_ = tensor_spec.BoundedTensorSpec(
spec.shape, spec.dtype, spec.minimum, spec.maximum)
def testFromBoundedTensorSpec(self):
spec_1 = tensor_spec.BoundedTensorSpec((1, 2), dtypes.int32,
minimum=0, maximum=1)
spec_2 = tensor_spec.BoundedTensorSpec.from_spec(spec_1)
self.assertEqual(spec_1, spec_2)
def testEquality(self):
spec_1_1 = tensor_spec.BoundedTensorSpec((1, 2, 3), dtypes.float32,
0, (5, 5, 5))
spec_1_2 = tensor_spec.BoundedTensorSpec((1, 2, 3), dtypes.float32,
0.00000001,
(5, 5, 5.00000000000000001))
spec_2_1 = tensor_spec.BoundedTensorSpec((1, 2, 3), dtypes.float32,
1, (5, 5, 5))
spec_2_2 = tensor_spec.BoundedTensorSpec((1, 2, 3), dtypes.float32,
(1, 1, 1), (5, 5, 5))
spec_2_3 = tensor_spec.BoundedTensorSpec((1, 2, 3), dtypes.float32,
(1, 1, 1), 5)
spec_3_1 = tensor_spec.BoundedTensorSpec((1, 2, 3), dtypes.float32,
(2, 1, 1), (5, 5, 5))
self.assertEqual(spec_1_1, spec_1_2)
self.assertEqual(spec_1_2, spec_1_1)
self.assertNotEqual(spec_1_1, spec_2_2)
self.assertNotEqual(spec_1_1, spec_2_1)
self.assertNotEqual(spec_2_2, spec_1_1)
self.assertNotEqual(spec_2_1, spec_1_1)
self.assertEqual(spec_2_1, spec_2_2)
self.assertEqual(spec_2_2, spec_2_1)
self.assertEqual(spec_2_2, spec_2_3)
self.assertNotEqual(spec_1_1, spec_3_1)
self.assertNotEqual(spec_2_1, spec_3_1)
self.assertNotEqual(spec_2_2, spec_3_1)
def testFromTensorSpec(self):
spec = tensor_spec.TensorSpec((1, 2), dtypes.int32)
bounded_spec = tensor_spec.BoundedTensorSpec.from_spec(spec)
self.assertEqual(spec.shape, bounded_spec.shape)
self.assertEqual(spec.dtype, bounded_spec.dtype)
self.assertEqual(spec.dtype.min, bounded_spec.minimum)
self.assertEqual(spec.dtype.max, bounded_spec.maximum)
self.assertEqual(spec.name, bounded_spec.name)
def testSerialization(self):
desc = tensor_spec.BoundedTensorSpec([1, 5], dtypes.float32, -1, 1, "test")
self.assertEqual(pickle.loads(pickle.dumps(desc)), desc)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/tensor_spec_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import auto_control_deps as acd
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.layers import core as keras_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import momentum
class AutomaticControlDependenciesTest(test.TestCase):
def testBasic(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
with acd.AutomaticControlDependencies() as c:
v.assign(v + 1)
v.assign(2 * v)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(), 4.0)
@test_util.run_v1_only("b/120545219")
def testCondMustRun(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1)
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 6.0)
@test_util.run_v1_only("b/120545219")
def testCondMustRunSeparateRead(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1)
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
one = constant_op.constant(1.0)
one = c.mark_as_return(one)
one.eval(feed_dict={p: False})
self.assertAllEqual(v.read_value().eval(), 5.0)
one.eval(feed_dict={p: True})
self.assertAllEqual(v.read_value().eval(), 6.0)
@test_util.run_v1_only("b/120545219")
def testCondNested(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
q = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1, name="true")
return 1.0
def false_fn():
def inner_true_fn():
v.assign(v * 2, name="false_true")
return 2.0
def inner_false_fn():
v.assign(v * 3, name="false_false")
return 3.0
control_flow_ops.cond(q, inner_true_fn, inner_false_fn)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
with ops.name_scope("final"):
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False, q: False}), 3.0)
self.assertAllEqual(val.eval(feed_dict={p: False, q: True}), 6.0)
self.assertAllEqual(val.eval(feed_dict={p: True, q: True}), 7.0)
self.assertAllEqual(val.eval(feed_dict={p: True, q: False}), 8.0)
@test_util.run_v1_only("b/120545219")
def testCondOneBranch(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 5.0)
@test_util.run_v1_only("b/120545219")
def testCondOneBranchUpdateBefore(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
v.assign(v * 2)
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 6.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 12.0)
@test_util.run_v1_only("b/120545219")
def testCondOneBranchUpdateAfter(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
v.assign(v * 2)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 10.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 20.0)
def testDefunWhileLoopWithCapturedLoopVars(self):
n = 3
x = constant_op.constant(list(range(n)))
@function.defun
def loop():
c = lambda i, x: i < n
b = lambda i, x: (i + 1, x + 1)
i, out = control_flow_ops.while_loop(c, b, (0, x))
return i, out
i, out = loop()
self.assertEqual(int(i), 3)
self.assertAllEqual(out, [3, 4, 5])
def testDecorator(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
@acd.automatic_control_dependencies
def f():
v.assign(v + 1)
v.assign(2 * v)
return v.read_value()
self.assertAllEqual(f().eval(), 4.0)
def testOptimizerInDefun(self):
def loss(v):
return v**2
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
@function.defun
def train():
self.v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(loss)(self.v)
optimizer.apply_gradients(grad)
return self.v.read_value()
value = train()
self.assertEqual(value.numpy(), -1.0)
def testReturningNonTensorRaisesError(self):
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
optimizer.apply_gradients = function.defun(optimizer.apply_gradients)
v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(lambda v: v**2)(v)
with self.assertRaisesRegexp(TypeError,
'.*must return zero or more Tensors.*'):
# TODO(akshayka): We might want to allow defun-ing Python functions
# that return operations (and just execute the op instead of running it).
optimizer.apply_gradients(grad)
# TODO(b/111663004): This should work when the outer context is graph
# building.
def testOptimizerNonSlotVarsInDefunNoError(self):
def loss(v):
return v**2
optimizer = adam.AdamOptimizer(learning_rate=1.0)
@function.defun
def train():
self.v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(loss)(self.v)
optimizer.apply_gradients(grad)
return self.v.read_value()
train()
def testOptimizerInDefunWithCapturedVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
def loss():
return v**2
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
@function.defun
def train():
grad = backprop.implicit_grad(loss)()
optimizer.apply_gradients(grad)
train()
self.assertEqual(v.numpy(), -1.0)
def testRepeatedResourceInput(self):
var = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def inner(var1, var2):
return (resource_variable_ops.read_variable_op(var1, dtypes.float32) +
resource_variable_ops.read_variable_op(var2, dtypes.float32))
@def_function.function
def outer():
return inner(var.handle, var.handle)
self.assertEqual(self.evaluate(outer()), 2.0)
def testVariableInitializersCanBeLifted(self):
# The initializer is a stateful op, but using it inside a function should
# *not* create additional dependencies. That's what we're testing.
layer = keras_core.Dense(1, kernel_initializer="glorot_uniform")
@def_function.function
def fn(x):
# Stateful operation
control_flow_ops.Assert(x, ["Error"])
# Variable initialization should be lifted. Prior to the change that
# added this test, the lifting would crash because of an auto control dep
# added on `x`. Note, the error did not happen if we
# manually created a tf.Variable outside of function and used it
# here. Alternatively, creating a tf.Variable inside fn() causes
# a different sort of error that is out of scope for this test.
return layer(ops.convert_to_tensor([[1.0, 1.0]]))
true = ops.convert_to_tensor(True)
concrete = fn.get_concrete_function(
tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool))
self.evaluate(concrete(true))
self.evaluate(fn(True))
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/python/framework/auto_control_deps_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.dtypes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
def _is_numeric_dtype_enum(datatype_enum):
non_numeric_dtypes = [types_pb2.DT_VARIANT,
types_pb2.DT_VARIANT_REF,
types_pb2.DT_INVALID,
types_pb2.DT_RESOURCE,
types_pb2.DT_RESOURCE_REF]
return datatype_enum not in non_numeric_dtypes
class TypesTest(test_util.TensorFlowTestCase):
def testAllTypesConstructible(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
self.assertEqual(datatype_enum,
dtypes.DType(datatype_enum).as_datatype_enum)
def testAllTypesConvertibleToDType(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dt = dtypes.as_dtype(datatype_enum)
self.assertEqual(datatype_enum, dt.as_datatype_enum)
def testAllTypesConvertibleToNumpyDtype(self):
for datatype_enum in types_pb2.DataType.values():
if not _is_numeric_dtype_enum(datatype_enum):
continue
dtype = dtypes.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
_ = np.empty((1, 1, 1, 1), dtype=numpy_dtype)
if dtype.base_dtype != dtypes.bfloat16:
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
self.assertEqual(
dtypes.as_dtype(datatype_enum).base_dtype,
dtypes.as_dtype(numpy_dtype))
def testInvalid(self):
with self.assertRaises(TypeError):
dtypes.DType(types_pb2.DT_INVALID)
with self.assertRaises(TypeError):
dtypes.as_dtype(types_pb2.DT_INVALID)
def testNumpyConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype(np.float32))
self.assertIs(dtypes.float64, dtypes.as_dtype(np.float64))
self.assertIs(dtypes.int32, dtypes.as_dtype(np.int32))
self.assertIs(dtypes.int64, dtypes.as_dtype(np.int64))
self.assertIs(dtypes.uint8, dtypes.as_dtype(np.uint8))
self.assertIs(dtypes.uint16, dtypes.as_dtype(np.uint16))
self.assertIs(dtypes.int16, dtypes.as_dtype(np.int16))
self.assertIs(dtypes.int8, dtypes.as_dtype(np.int8))
self.assertIs(dtypes.complex64, dtypes.as_dtype(np.complex64))
self.assertIs(dtypes.complex128, dtypes.as_dtype(np.complex128))
self.assertIs(dtypes.string, dtypes.as_dtype(np.object_))
self.assertIs(dtypes.string,
dtypes.as_dtype(np.array(["foo", "bar"]).dtype))
self.assertIs(dtypes.bool, dtypes.as_dtype(np.bool_))
with self.assertRaises(TypeError):
dtypes.as_dtype(np.dtype([("f1", np.uint), ("f2", np.int32)]))
def testRealDtype(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.bool, dtypes.uint8, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64
]:
self.assertIs(dtype.real_dtype, dtype)
self.assertIs(dtypes.complex64.real_dtype, dtypes.float32)
self.assertIs(dtypes.complex128.real_dtype, dtypes.float64)
def testStringConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype("float32"))
self.assertIs(dtypes.float64, dtypes.as_dtype("float64"))
self.assertIs(dtypes.int32, dtypes.as_dtype("int32"))
self.assertIs(dtypes.uint8, dtypes.as_dtype("uint8"))
self.assertIs(dtypes.uint16, dtypes.as_dtype("uint16"))
self.assertIs(dtypes.int16, dtypes.as_dtype("int16"))
self.assertIs(dtypes.int8, dtypes.as_dtype("int8"))
self.assertIs(dtypes.string, dtypes.as_dtype("string"))
self.assertIs(dtypes.complex64, dtypes.as_dtype("complex64"))
self.assertIs(dtypes.complex128, dtypes.as_dtype("complex128"))
self.assertIs(dtypes.int64, dtypes.as_dtype("int64"))
self.assertIs(dtypes.bool, dtypes.as_dtype("bool"))
self.assertIs(dtypes.qint8, dtypes.as_dtype("qint8"))
self.assertIs(dtypes.quint8, dtypes.as_dtype("quint8"))
self.assertIs(dtypes.qint32, dtypes.as_dtype("qint32"))
self.assertIs(dtypes.bfloat16, dtypes.as_dtype("bfloat16"))
self.assertIs(dtypes.float32_ref, dtypes.as_dtype("float32_ref"))
self.assertIs(dtypes.float64_ref, dtypes.as_dtype("float64_ref"))
self.assertIs(dtypes.int32_ref, dtypes.as_dtype("int32_ref"))
self.assertIs(dtypes.uint8_ref, dtypes.as_dtype("uint8_ref"))
self.assertIs(dtypes.int16_ref, dtypes.as_dtype("int16_ref"))
self.assertIs(dtypes.int8_ref, dtypes.as_dtype("int8_ref"))
self.assertIs(dtypes.string_ref, dtypes.as_dtype("string_ref"))
self.assertIs(dtypes.complex64_ref, dtypes.as_dtype("complex64_ref"))
self.assertIs(dtypes.complex128_ref, dtypes.as_dtype("complex128_ref"))
self.assertIs(dtypes.int64_ref, dtypes.as_dtype("int64_ref"))
self.assertIs(dtypes.bool_ref, dtypes.as_dtype("bool_ref"))
self.assertIs(dtypes.qint8_ref, dtypes.as_dtype("qint8_ref"))
self.assertIs(dtypes.quint8_ref, dtypes.as_dtype("quint8_ref"))
self.assertIs(dtypes.qint32_ref, dtypes.as_dtype("qint32_ref"))
self.assertIs(dtypes.bfloat16_ref, dtypes.as_dtype("bfloat16_ref"))
with self.assertRaises(TypeError):
dtypes.as_dtype("not_a_type")
def testDTypesHaveUniqueNames(self):
dtypez = []
names = set()
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dtype = dtypes.as_dtype(datatype_enum)
dtypez.append(dtype)
names.add(dtype.name)
self.assertEqual(len(dtypez), len(names))
def testIsInteger(self):
self.assertEqual(dtypes.as_dtype("int8").is_integer, True)
self.assertEqual(dtypes.as_dtype("int16").is_integer, True)
self.assertEqual(dtypes.as_dtype("int32").is_integer, True)
self.assertEqual(dtypes.as_dtype("int64").is_integer, True)
self.assertEqual(dtypes.as_dtype("uint8").is_integer, True)
self.assertEqual(dtypes.as_dtype("uint16").is_integer, True)
self.assertEqual(dtypes.as_dtype("complex64").is_integer, False)
self.assertEqual(dtypes.as_dtype("complex128").is_integer, False)
self.assertEqual(dtypes.as_dtype("float").is_integer, False)
self.assertEqual(dtypes.as_dtype("double").is_integer, False)
self.assertEqual(dtypes.as_dtype("string").is_integer, False)
self.assertEqual(dtypes.as_dtype("bool").is_integer, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint8").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint16").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint32").is_integer, False)
self.assertEqual(dtypes.as_dtype("quint8").is_integer, False)
self.assertEqual(dtypes.as_dtype("quint16").is_integer, False)
def testIsFloating(self):
self.assertEqual(dtypes.as_dtype("int8").is_floating, False)
self.assertEqual(dtypes.as_dtype("int16").is_floating, False)
self.assertEqual(dtypes.as_dtype("int32").is_floating, False)
self.assertEqual(dtypes.as_dtype("int64").is_floating, False)
self.assertEqual(dtypes.as_dtype("uint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("uint16").is_floating, False)
self.assertEqual(dtypes.as_dtype("complex64").is_floating, False)
self.assertEqual(dtypes.as_dtype("complex128").is_floating, False)
self.assertEqual(dtypes.as_dtype("float32").is_floating, True)
self.assertEqual(dtypes.as_dtype("float64").is_floating, True)
self.assertEqual(dtypes.as_dtype("string").is_floating, False)
self.assertEqual(dtypes.as_dtype("bool").is_floating, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_floating, True)
self.assertEqual(dtypes.as_dtype("qint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("qint16").is_floating, False)
self.assertEqual(dtypes.as_dtype("qint32").is_floating, False)
self.assertEqual(dtypes.as_dtype("quint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("quint16").is_floating, False)
def testIsComplex(self):
self.assertEqual(dtypes.as_dtype("int8").is_complex, False)
self.assertEqual(dtypes.as_dtype("int16").is_complex, False)
self.assertEqual(dtypes.as_dtype("int32").is_complex, False)
self.assertEqual(dtypes.as_dtype("int64").is_complex, False)
self.assertEqual(dtypes.as_dtype("uint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("uint16").is_complex, False)
self.assertEqual(dtypes.as_dtype("complex64").is_complex, True)
self.assertEqual(dtypes.as_dtype("complex128").is_complex, True)
self.assertEqual(dtypes.as_dtype("float32").is_complex, False)
self.assertEqual(dtypes.as_dtype("float64").is_complex, False)
self.assertEqual(dtypes.as_dtype("string").is_complex, False)
self.assertEqual(dtypes.as_dtype("bool").is_complex, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint16").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint32").is_complex, False)
self.assertEqual(dtypes.as_dtype("quint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("quint16").is_complex, False)
def testIsUnsigned(self):
self.assertEqual(dtypes.as_dtype("int8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("uint8").is_unsigned, True)
self.assertEqual(dtypes.as_dtype("uint16").is_unsigned, True)
self.assertEqual(dtypes.as_dtype("float32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("float64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("bool").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("string").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("complex64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("complex128").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("quint8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("quint16").is_unsigned, False)
def testMinMax(self):
# make sure min/max evaluates for all data types that have min/max
for datatype_enum in types_pb2.DataType.values():
if not _is_numeric_dtype_enum(datatype_enum):
continue
dtype = dtypes.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
# ignore types for which there are no minimum/maximum (or we cannot
# compute it, such as for the q* types)
if (dtype.is_quantized or dtype.base_dtype == dtypes.bool or
dtype.base_dtype == dtypes.string or
dtype.base_dtype == dtypes.complex64 or
dtype.base_dtype == dtypes.complex128):
continue
print("%s: %s - %s" % (dtype, dtype.min, dtype.max))
# check some values that are known
if numpy_dtype == np.bool_:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 1)
if numpy_dtype == np.int8:
self.assertEquals(dtype.min, -128)
self.assertEquals(dtype.max, 127)
if numpy_dtype == np.int16:
self.assertEquals(dtype.min, -32768)
self.assertEquals(dtype.max, 32767)
if numpy_dtype == np.int32:
self.assertEquals(dtype.min, -2147483648)
self.assertEquals(dtype.max, 2147483647)
if numpy_dtype == np.int64:
self.assertEquals(dtype.min, -9223372036854775808)
self.assertEquals(dtype.max, 9223372036854775807)
if numpy_dtype == np.uint8:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 255)
if numpy_dtype == np.uint16:
if dtype == dtypes.uint16:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 65535)
elif dtype == dtypes.bfloat16:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 4294967295)
if numpy_dtype == np.uint32:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 4294967295)
if numpy_dtype == np.uint64:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 18446744073709551615)
if numpy_dtype in (np.float16, np.float32, np.float64):
self.assertEquals(dtype.min, np.finfo(numpy_dtype).min)
self.assertEquals(dtype.max, np.finfo(numpy_dtype).max)
if numpy_dtype == dtypes.bfloat16.as_numpy_dtype:
self.assertEquals(dtype.min, float.fromhex("-0x1.FEp127"))
self.assertEquals(dtype.max, float.fromhex("0x1.FEp127"))
def testRepr(self):
for enum, name in dtypes._TYPE_TO_STRING.items():
if enum > 100:
continue
dtype = dtypes.DType(enum)
self.assertEquals(repr(dtype), "tf." + name)
import tensorflow as tf
dtype2 = eval(repr(dtype))
self.assertEquals(type(dtype2), dtypes.DType)
self.assertEquals(dtype, dtype2)
def testEqWithNonTFTypes(self):
self.assertNotEqual(dtypes.int32, int)
self.assertNotEqual(dtypes.float64, 2.1)
def testPythonLongConversion(self):
self.assertIs(dtypes.int64, dtypes.as_dtype(np.array(2**32).dtype))
def testPythonTypesConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype(float))
self.assertIs(dtypes.bool, dtypes.as_dtype(bool))
def testReduce(self):
for enum in dtypes._TYPE_TO_STRING:
dtype = dtypes.DType(enum)
ctor, args = dtype.__reduce__()
self.assertEquals(ctor, dtypes.as_dtype)
self.assertEquals(args, (dtype.name,))
reconstructed = ctor(*args)
self.assertEquals(reconstructed, dtype)
def testAsDtypeInvalidArgument(self):
with self.assertRaises(TypeError):
dtypes.as_dtype((dtypes.int32, dtypes.float32))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/dtypes_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to hold a library of OpDefs and use it to create Brain operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import tf_contextlib
def _Attr(op_def, name):
for attr in op_def.attr:
if attr.name == name:
return attr
raise TypeError("Inconsistent OpDef for '%s', missing attr '%s'" %
(op_def.name, name))
def _AttrValue(attr_protos, name):
if name in attr_protos:
return attr_protos[name]
raise TypeError("Inconsistent OpDef, missing attr '%s' from '%s'." %
(name, attr_protos))
def _SatisfiesTypeConstraint(dtype, attr_def, param_name):
if attr_def.HasField("allowed_values"):
allowed_list = attr_def.allowed_values.list.type
if dtype not in allowed_list:
raise TypeError(
"Value passed to parameter '%s' has DataType %s not in list of "
"allowed values: %s" %
(param_name, dtypes.as_dtype(dtype).name,
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
def _IsListParameter(arg):
if arg.number_attr:
return True
elif arg.type_list_attr:
return True
return False
def _NumTypeFields(arg):
num = 0
if arg.type != types_pb2.DT_INVALID: num += 1
if arg.type_attr: num += 1
if arg.type_list_attr: num += 1
return num
def _IsListValue(v):
return isinstance(v, (list, tuple))
def _Flatten(l):
"""Converts [1, 2, [3, 4], [5]] to [1, 2, 3, 4, 5]."""
# [1, 2, [3, 4], [5]] -> [[1], [2], [3, 4], [5]]
l_of_l = [x if _IsListValue(x) else [x] for x in l]
# [[1], [2], [3, 4], [5]] -> [1, 2, 3, 4, 5]
return [item for sublist in l_of_l for item in sublist]
def _Restructure(l, structure):
"""Returns the elements of list l structured according to the given structure.
A structure is represented by a list whose elements are either
`None` or a non-negative integer. `None` corresponds to a single
element in the output list, and an integer N corresponds to a nested
list of length N.
The function returns a data structure whose shape is given by
`structure`, and whose elements are taken from `l`. If `structure`
is a singleton, the function returns the single data structure
implied by the 0th element of `structure`. For example:
_Restructure(["foo", "bar", "baz", "qux"], [None, 2, None])
-> ["foo", ["bar", "baz"], "qux"]
_Restructure(["foo"], [None]) -> "foo"
_Restructure(["foo"], [1]) -> ["foo"]
_Restructure([], [0]) -> []
Args:
l: A list.
structure: A list whose elements are either `None` or a non-negative
integer.
Returns:
The elements of `l`, restructured according to `structure`. If
`structure` is a list of length 1, this function returns the
single data structure implied by `structure[0]`.
"""
result = []
current_index = 0
for element in structure:
if element is None:
result.append(l[current_index])
current_index += 1
else:
result.append(l[current_index:current_index+element])
current_index += element
if len(result) == 1:
return result[0]
else:
return tuple(result)
def _MakeFloat(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def _MakeInt(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def _MakeStr(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def _MakeBool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def _MakeType(v, attr_def):
try:
v = dtypes.as_dtype(v).base_dtype
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(attr_def.name, repr(v)))
i = v.as_datatype_enum
_SatisfiesTypeConstraint(i, attr_def, param_name=attr_def.name)
return i
def _MakeShape(v, arg_name):
"""Convert v into a TensorShapeProto."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# A TensorShapeProto.
if isinstance(v, tensor_shape_pb2.TensorShapeProto):
for d in v.dim:
if d.name:
logging.warning("Warning: TensorShapeProto with a named dimension: %s",
str(v))
break
return v
try:
return tensor_shape.as_shape(v).as_proto()
except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s" % (arg_name, e))
except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s" % (arg_name, e))
def _MakeTensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'" %
(repr(v), arg_name))
def _MakeFunc(v, arg_name):
"""Ensure v is a func."""
if isinstance(v, attr_value_pb2.NameAttrList):
return v
fn_attr = attr_value_pb2.NameAttrList()
if isinstance(v, compat.bytes_or_text_types):
fn_attr.name = v
elif hasattr(v, "add_to_graph"):
v.add_to_graph(ops.get_default_graph())
fn_attr.name = v.name
else:
raise TypeError("Don't know how to convert {} to a func for "
"argument {}".format(v, arg_name))
return fn_attr
class _OpInfo(object):
"""All per-Op state we would like to precompute/validate."""
def __init__(self, op_def):
self.op_def = op_def
# TODO(josh11b): SWIG the ValidateOpDef() function from C++ and call it
# here, instead of these checks.
for arg in list(op_def.input_arg) + list(op_def.output_arg):
num_type_fields = _NumTypeFields(arg)
if num_type_fields != 1:
raise TypeError("Arg '%s' of '%s' must have one type field not %d" %
(arg.name, op_def.name, num_type_fields))
if arg.type_attr:
attr_type = _Attr(op_def, arg.type_attr).type
if attr_type != "type":
raise TypeError("Attr '%s' of '%s' used as a type_attr "
"but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.type_list_attr:
attr_type = _Attr(op_def, arg.type_list_attr).type
if attr_type != "list(type)":
raise TypeError(
"Attr '%s' of '%s' used as a type_list_attr but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.number_attr:
attr_type = _Attr(op_def, arg.number_attr).type
if attr_type != "int":
raise TypeError(
"Attr '%s' of '%s' used as a number_attr but has type %s" %
(arg.number_attr, op_def.name, attr_type))
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _MaybeColocateWith(inputs):
"""A context manager for (maybe) colocating with a list of input tensors.
Args:
inputs: A list of `Tensor` or `Operation` objects.
Returns:
A context manager.
"""
if not inputs:
yield
else:
# NOTE(mrry): The `ops.colocate_with()` function accepts only a single
# op or tensor, so we create one context manager per element in the list.
with ops.colocate_with(inputs[0]), _MaybeColocateWith(inputs[1:]):
yield
# pylint: enable=g-doc-return-or-yield
class OpDefLibrary(object):
"""Holds a collection of OpDefs, can add the corresponding Ops to a graph."""
def __init__(self):
self._ops = {}
# pylint: disable=invalid-name
def add_op(self, op_def):
"""Register an OpDef. May call apply_op with the name afterwards."""
if not isinstance(op_def, op_def_pb2.OpDef):
raise TypeError("%s is %s, not an op_def_pb2.OpDef" %
(op_def, type(op_def)))
if not op_def.name:
raise ValueError("%s missing name." % op_def)
if op_def.name in self._ops:
raise RuntimeError("Op name %s registered twice." % op_def.name)
self._ops[op_def.name] = _OpInfo(op_def)
def add_op_list(self, op_list):
"""Register the OpDefs from an OpList."""
if not isinstance(op_list, op_def_pb2.OpList):
raise TypeError("%s is %s, not an op_def_pb2.OpList" %
(op_list, type(op_list)))
for op_def in op_list.op:
self.add_op(op_def)
def apply_op(self, op_type_name, name=None, **keywords):
# pylint: disable=g-doc-args
"""Add a node invoking a registered Op to a graph.
Example usage:
# input1 and input2 can be Tensors or anything ops.convert_to_tensor()
# will convert to a Tensor.
op_def_library.apply_op("op", input1=input1, input2=input2)
# Can specify a node name.
op_def_library.apply_op("op", input1=input1, name="node_name")
# Must use keyword arguments, with the names specified in the OpDef.
op_def_library.apply_op("op", input_name=input, attr_name=attr)
All attrs must either be inferred from an input or specified.
(If inferred, the attr must not be specified.) If an attr has a default
value specified in the Op's OpDef, then you may pass None as the value
of that attr to get the default.
Args:
op_type_name: string. Must match the name field of a registered Op.
name: string. Optional name of the created op.
**keywords: input Tensor and attr arguments specified by name,
and optional parameters to pass when constructing the Operation.
Returns:
The Tensor(s) representing the output of the operation, or the Operation
itself if there are no outputs.
Raises:
RuntimeError: On some errors.
TypeError: On some errors.
ValueError: On some errors.
"""
output_structure, is_stateful, op = self._apply_op_helper(
op_type_name, name, **keywords)
if output_structure:
outputs = op.outputs
res = _Restructure(ops.convert_n_to_tensor(outputs), output_structure)
if isinstance(res, list) and not res and is_stateful:
return op
else:
return res
else:
return op
def _apply_op_helper(self, op_type_name, name=None, **keywords):
"""Implementation of apply_op that returns output_structure, op."""
op_info = self._ops.get(op_type_name, None)
if op_info is None:
raise RuntimeError("Unrecognized Op name " + op_type_name)
op_def = op_info.op_def
# Determine the graph context.
try:
# Need to flatten all the arguments into a list.
# pylint: disable=protected-access
g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
# pylint: enable=protected-access
except AssertionError as e:
raise RuntimeError(
"Cannot determine graph for Op '%s' due to: %s"
% (op_type_name, e.message))
# Default name if not specified.
if name is None:
name = op_type_name
# Check for deprecation
deprecation_version = op_def.deprecation.version
if deprecation_version:
producer = g.graph_def_versions.producer
if producer >= deprecation_version:
raise NotImplementedError(
("Op %s is not available in GraphDef version %d. "
"It has been removed in version %d. %s.") %
(op_type_name, producer, deprecation_version,
op_def.deprecation.explanation))
# Fill in the list of default types for all "type" attrs. This
# will be used to choose a preferred dtype to convert to in the
# absence of input type information.
#
# TODO(b/31302892): Currently the defaults don't work in the right
# way if you have two inputs, one of whose type resolution depends
# on the other. Handling this will require restructuring this code
# significantly.
default_type_attr_map = {}
for attr_def in op_def.attr:
if attr_def.type != "type":
continue
key = attr_def.name
if attr_def.HasField("default_value"):
default_type_attr_map[key] = dtypes.as_dtype(
attr_def.default_value.type)
# Requires that op_def has passed validation (using the C++
# ValidateOpDef() from ../framework/op_def_util.h).
attrs = {}
inputs = []
input_types = []
with g.as_default(), ops.name_scope(name) as scope:
# Perform input type inference
inferred_from = {}
for input_arg in op_def.input_arg:
input_name = input_arg.name
if input_name in keywords:
values = keywords.pop(input_name)
elif input_name + "_" in keywords:
# Handle the case where the name is a keyword or built-in
# for Python so we use the name + _ instead.
input_name += "_"
values = keywords.pop(input_name)
else:
raise TypeError("No argument for input " + input_name)
# Goals:
# * Convert values to Tensors if it contains constants.
# * Verify that values is a list if that matches the input_arg's
# type.
# * If the input_arg's type is determined by attrs, either set
# those attrs and validate those attr values are legal (if
# they have not yet been set) or validate the input matches
# the type indicated by the attrs (if they have already been
# inferred via an earlier input).
# * If the input_arg has an explicit type, make sure the input
# conforms.
if _IsListParameter(input_arg):
if not _IsListValue(values):
raise TypeError(
"Expected list for '%s' argument to '%s' Op, not %s." %
(input_name, op_type_name, values))
# In cases where we expect all elements of the list to have the
# same dtype, try to cast non-Tensor elements to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.number_attr:
if input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
else:
for t in values:
if isinstance(t, ops.Tensor):
dtype = t.dtype
break
# dtype still not found, prefer using the default dtype
# from the attr.
if dtype is None and input_arg.type_attr in default_type_attr_map:
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
if not input_arg.is_ref and dtype:
dtype = dtypes.as_dtype(dtype).base_dtype
values = ops.internal_convert_n_to_tensor(
values,
name=input_arg.name,
dtype=dtype if dtype else None,
preferred_dtype=default_dtype,
as_ref=input_arg.is_ref)
if input_arg.number_attr and len(
set(v.dtype.base_dtype for v in values)) > 1:
raise TypeError() # All types should match.
except (TypeError, ValueError):
# What types does the conversion function think values have?
observed_types = []
for value in values:
try:
converted_value = ops.internal_convert_to_tensor(
value, as_ref=input_arg.is_ref)
observed_types.append(converted_value.dtype.base_dtype.name)
except (TypeError, ValueError):
observed_types.append("<NOT CONVERTIBLE TO TENSOR>")
observed = ", ".join(observed_types)
prefix = (
"Tensors in list passed to '%s' of '%s' Op have types [%s]" %
(input_name, op_type_name, observed))
if input_arg.number_attr:
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s that do not match expected type %s." %
(prefix, dtype.name))
elif input_arg.type_attr in attrs:
raise TypeError("%s that do not match type %s inferred from "
"earlier arguments." %
(prefix, dtype.name))
else:
raise TypeError("%s that don't all match." % prefix)
else:
raise TypeError(
"%s that are invalid. Tensors: %s" % (prefix, values))
types = [x.dtype for x in values]
inputs.extend(values)
else:
# In cases where we have an expected type, try to convert non-Tensor
# arguments to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
elif input_arg.type_attr in default_type_attr_map:
# The dtype could not be inferred solely from the inputs,
# so we prefer the attr's default, so code that adds a new attr
# with a default is backwards compatible.
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
values = ops.internal_convert_to_tensor(
values,
name=input_arg.name,
dtype=dtype,
as_ref=input_arg.is_ref,
preferred_dtype=default_dtype)
except TypeError as err:
if dtype is None:
raise err
else:
raise TypeError(
"Expected %s passed to parameter '%s' of op '%s', got %s of "
"type '%s' instead. Error: %s" %
(dtypes.as_dtype(dtype).name, input_arg.name, op_type_name,
repr(values), type(values).__name__, err))
except ValueError:
# What type does convert_to_tensor think it has?
try:
observed = ops.internal_convert_to_tensor(
values, as_ref=input_arg.is_ref).dtype.name
except ValueError as err:
raise ValueError(
"Tried to convert '%s' to a tensor and failed. Error: %s" %
(input_name, err))
prefix = ("Input '%s' of '%s' Op has type %s that does not match" %
(input_name, op_type_name, observed))
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s expected type of %s." %
(prefix, dtypes.as_dtype(input_arg.type).name))
else:
# Update the maps with the default, if needed.
k = input_arg.type_attr
if k in default_type_attr_map:
if k not in attrs:
attrs[k] = default_type_attr_map[k]
if k not in inferred_from:
inferred_from[k] = "Default in OpDef"
raise TypeError(
"%s type %s of argument '%s'." %
(prefix, dtypes.as_dtype(attrs[input_arg.type_attr]).name,
inferred_from[input_arg.type_attr]))
types = [values.dtype]
inputs.append(values)
base_types = [x.base_dtype for x in types]
if input_arg.number_attr:
# <number-attr> * <type> or <number-attr> * <type-attr>
if input_arg.number_attr in attrs:
if len(values) != attrs[input_arg.number_attr]:
raise ValueError(
"List argument '%s' to '%s' Op with length %d must match "
"length %d of argument '%s'." %
(input_name, op_type_name, len(values),
attrs[input_arg.number_attr],
inferred_from[input_arg.number_attr]))
else:
attrs[input_arg.number_attr] = len(values)
inferred_from[input_arg.number_attr] = input_name
num_attr = _Attr(op_def, input_arg.number_attr)
if num_attr.has_minimum and len(values) < num_attr.minimum:
raise ValueError(
"List argument '%s' to '%s' Op with length %d shorter "
"than minimum length %d." %
(input_name, op_type_name, len(values), num_attr.minimum))
# All tensors must have the same base type.
if any(bt != base_types[0] for bt in base_types):
raise TypeError(
"All tensors passed to '%s' of '%s' Op "
"must have the same type." %
(input_name, op_type_name))
if input_arg.type != types_pb2.DT_INVALID:
# <number-attr> * <type> case
if base_types and base_types[0] != input_arg.type:
assert False, "Unreachable"
elif input_arg.type_attr in attrs:
# <number-attr> * <type-attr> case, where <type-attr> already
# has an inferred value.
if base_types and base_types[0] != attrs[input_arg.type_attr]:
assert False, "Unreachable"
else:
# <number-attr> * <type-attr> case, where we are now setting
# the <type-attr> based on this input
if not base_types:
raise TypeError(
"Don't know how to infer type variable from empty input "
"list passed to input '%s' of '%s' Op." %
(input_name, op_type_name))
attrs[input_arg.type_attr] = base_types[0]
inferred_from[input_arg.type_attr] = input_name
type_attr = _Attr(op_def, input_arg.type_attr)
_SatisfiesTypeConstraint(base_types[0], type_attr,
param_name=input_name)
elif input_arg.type_attr:
# <type-attr>
attr_value = base_types[0]
if input_arg.type_attr in attrs:
if attrs[input_arg.type_attr] != attr_value:
raise TypeError(
"Input '%s' of '%s' Op has type %s that does not "
"match type %s of argument '%s'." %
(input_name, op_type_name, dtypes.as_dtype(attr_value).name,
dtypes.as_dtype(attrs[input_arg.type_attr]).name,
inferred_from[input_arg.type_attr]))
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_attr),
param_name=input_name)
attrs[input_arg.type_attr] = attr_value
inferred_from[input_arg.type_attr] = input_name
elif input_arg.type_list_attr:
# <type-list-attr>
attr_value = base_types
if input_arg.type_list_attr in attrs:
if attrs[input_arg.type_list_attr] != attr_value:
raise TypeError(
"Input '%s' of '%s' Op has type list of %s that does not "
"match type list %s of argument '%s'." %
(input_name, op_type_name,
", ".join(dtypes.as_dtype(x).name for x in attr_value),
", ".join(dtypes.as_dtype(x).name
for x in attrs[input_arg.type_list_attr]),
inferred_from[input_arg.type_list_attr]))
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_list_attr),
param_name=input_name)
attrs[input_arg.type_list_attr] = attr_value
inferred_from[input_arg.type_list_attr] = input_name
else:
# single Tensor with specified type
if base_types[0] != input_arg.type:
assert False, "Unreachable"
if input_arg.is_ref:
if not all(x._is_ref_dtype for x in types): # pylint: disable=protected-access
raise TypeError(
("'%s' Op requires that input '%s' be a mutable tensor "
"(e.g.: a tf.Variable)") % (op_type_name, input_name))
input_types.extend(types)
else:
input_types.extend(base_types)
# Process remaining attrs
for attr in op_def.attr:
# Skip attrs that have already had their values inferred
if attr.name in attrs:
if attr.name in keywords:
raise TypeError(
"Should not specify value for inferred attr '%s'." % attr.name)
continue
if attr.name in keywords:
attrs[attr.name] = keywords.pop(attr.name)
elif attr.name + "_" in keywords:
# Attrs whose names match Python keywords have an extra '_'
# appended, so we must check for that as well.
attrs[attr.name] = keywords.pop(attr.name + "_")
else:
raise TypeError("No argument for attr " + attr.name)
# Convert attr values to AttrValue protos.
attr_protos = {}
for attr_def in op_def.attr:
key = attr_def.name
value = attrs[key]
attr_value = attr_value_pb2.AttrValue()
if attr_def.HasField("default_value") and value is None:
attr_value.CopyFrom(attr_def.default_value)
attr_protos[key] = attr_value
continue
if attr_def.type.startswith("list("):
if not _IsListValue(value):
raise TypeError("Expected list for attr " + key)
if attr_def.has_minimum:
if len(value) < attr_def.minimum:
raise ValueError("Attr '%s' of '%s' Op passed list of length %d "
"less than minimum %d." %
(key, op_type_name, len(value),
attr_def.minimum))
attr_value.list.SetInParent()
if attr_def.type == "string":
attr_value.s = _MakeStr(value, key)
if attr_def.HasField("allowed_values"):
if attr_value.s not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(attr_value.s),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "list(string)":
attr_value.list.s.extend([_MakeStr(x, key) for x in value])
if attr_def.HasField("allowed_values"):
for x in attr_value.list.s:
if x not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(x),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "int":
attr_value.i = _MakeInt(value, key)
if attr_def.has_minimum:
if attr_value.i < attr_def.minimum:
raise ValueError(
"Attr '%s' of '%s' Op passed %d less than minimum %d." %
(key, op_type_name, attr_value.i, attr_def.minimum))
elif attr_def.type == "list(int)":
attr_value.list.i.extend([_MakeInt(x, key) for x in value])
elif attr_def.type == "float":
attr_value.f = _MakeFloat(value, key)
elif attr_def.type == "list(float)":
attr_value.list.f.extend([_MakeFloat(x, key) for x in value])
elif attr_def.type == "bool":
attr_value.b = _MakeBool(value, key)
elif attr_def.type == "list(bool)":
attr_value.list.b.extend([_MakeBool(x, key) for x in value])
elif attr_def.type == "type":
attr_value.type = _MakeType(value, attr_def)
elif attr_def.type == "list(type)":
attr_value.list.type.extend(
[_MakeType(x, attr_def) for x in value])
elif attr_def.type == "shape":
attr_value.shape.CopyFrom(_MakeShape(value, key))
elif attr_def.type == "list(shape)":
attr_value.list.shape.extend(
[_MakeShape(x, key) for x in value])
elif attr_def.type == "tensor":
attr_value.tensor.CopyFrom(_MakeTensor(value, key))
elif attr_def.type == "list(tensor)":
attr_value.list.tensor.extend(
[_MakeTensor(x, key) for x in value])
elif attr_def.type == "func":
attr_value.func.CopyFrom(_MakeFunc(value, key))
elif attr_def.type == "list(func)":
attr_value.list.func.extend([_MakeFunc(x, key) for x in value])
else:
raise TypeError("Unrecognized Attr type " + attr_def.type)
attr_protos[key] = attr_value
del attrs # attrs is no longer authoritative, use attr_protos instead
# Determine output types (possibly using attrs)
output_structure = []
for arg in op_def.output_arg:
if arg.number_attr:
n = _AttrValue(attr_protos, arg.number_attr).i
output_structure.append(n)
elif arg.type_attr:
t = _AttrValue(attr_protos, arg.type_attr)
output_structure.append(None)
elif arg.type_list_attr:
t = _AttrValue(attr_protos, arg.type_list_attr)
output_structure.append(len(t.list.type))
else:
output_structure.append(None)
if keywords:
raise TypeError("apply_op() got unexpected keyword arguments: " +
", ".join(sorted(keywords.keys())))
# NOTE(mrry): We add an explicit colocation constraint between
# the newly created op and any of its reference-typed inputs.
must_colocate_inputs = [val for arg, val in zip(op_def.input_arg, inputs)
if arg.is_ref]
with _MaybeColocateWith(must_colocate_inputs):
# Add Op to graph
op = g.create_op(op_type_name, inputs, dtypes=None, name=scope,
input_types=input_types, attrs=attr_protos,
op_def=op_def)
return output_structure, op_def.is_stateful, op
# pylint: enable=invalid-name
|
tensorflow-master
|
tensorflow/python/framework/op_def_library.py
|
tensorflow-master
|
tensorflow/python/framework/__init__.py
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Utlity to convert FunctionDef to GraphDef and Graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import versions
from tensorflow.python.framework.func_graph import FuncGraph
def function_def_to_graph(fdef, input_shapes=None, copy_functions=True):
"""Converts a FunctionDef to a FuncGraph (sub-class Graph).
The returned FuncGraph's `name`, `inputs` and `outputs` fields will be set.
The input tensors are represented as placeholders.
Note: `FuncGraph.inputs` and `FuncGraph.captures` are not set and may be set
by the caller.
Args:
fdef: FunctionDef.
input_shapes: Optional. A list of TensorShape objects of the shapes of
function inputs. Defaults to the function's "_input_shapes" attribute. If
specified, its length must match length of `fdef.signature.input_arg`. If
a shape is None, the corresponding input placeholder will have unknown
shape.
copy_functions: Whether to copy all functions that exists in default graph
(independently of being used or not) to the created FuncGraph.
Returns:
A FuncGraph.
"""
func_graph = FuncGraph(fdef.signature.name)
if input_shapes is None:
input_shapes_attr = fdef.attr.get("_input_shapes", None)
if input_shapes_attr is not None:
input_shapes = input_shapes_attr.list.shape
graph_def, nested_to_flat_tensor_name = function_def_to_graph_def(
fdef, input_shapes, copy_functions)
with func_graph.as_default():
# Add all function nodes to the graph.
importer.import_graph_def(graph_def, name="")
# Initialize fields specific to FuncGraph.
# inputs
input_tensor_names = [
nested_to_flat_tensor_name[arg.name] for arg in fdef.signature.input_arg
]
func_graph.inputs = [
func_graph.get_tensor_by_name(name) for name in input_tensor_names
]
# outputs
output_tensor_names = [
nested_to_flat_tensor_name[fdef.ret[arg.name]]
for arg in fdef.signature.output_arg
]
func_graph.outputs = [
func_graph.get_tensor_by_name(name) for name in output_tensor_names
]
func_graph.control_outputs = [
func_graph.get_operation_by_name(fdef.control_ret[ret_name])
for ret_name in fdef.signature.control_output
]
for node in graph_def.node:
output_shapes = node.attr.get("_output_shapes", None)
if output_shapes is not None:
op = func_graph.get_operation_by_name(node.name)
# _output_shapes for functions can sometimes be too long because the
# output-intermediates-for-gradients version of the function was
# substituted before saving. We'll accept that here. (See b/133666530).
for output_index, shape in enumerate(
output_shapes.list.shape[:len(op.outputs)]):
op.outputs[output_index].set_shape(shape)
return func_graph
def _is_function(fname):
"""Checks for a function definition with `fname` in the current context."""
if context.executing_eagerly():
return context.context().has_function(fname)
else:
return ops.get_default_graph()._is_function(fname) # pylint: disable=protected-access
def function_def_to_graph_def(fdef, input_shapes=None, copy_functions=True):
"""Convert a FunctionDef to a GraphDef.
Steps:
1. Creates placeholder nodes corresponding to inputs in
`FunctionDef.signature.input_arg`.
2. Adds NodeDefs in `FunctionDef.node_def` to `GraphDef.node`.
3. Renames inputs of all nodes to use the convention of GraphDef instead of
FunctionDef. See comment on `FunctionDef.node_def` on how the tensor naming
in FunctionDefs is different from GraphDefs.
Args:
fdef: FunctionDef.
input_shapes: Optional. A list of TensorShape objects of the shapes of
function inputs. If specified, its length must match length of
`fdef.signature.input_arg`. If a shape is None, the corresponding input
placeholder will have unknown shape.
copy_functions: Whether to copy all functions that exists in default graph
(independently of being used or not) to the created GraphDef.
Returns:
A tuple of (GraphDef, dict<string, string>). The dict contains a mapping
from nested tensor names (in FunctionDef) to flattened names (in GraphDef).
Raises:
ValueError: If the length of input_shapes does not match the number of
input_args or if the FunctionDef is invalid.
"""
graph_def = graph_pb2.GraphDef()
graph_def.versions.CopyFrom(
versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER))
# Copy *all* functions from outer graph to `graph_def` so that both direct
# and indirect references are safely handled.
if copy_functions:
ops.get_default_graph()._copy_functions_to_graph_def(graph_def, 0) # pylint: disable=protected-access
if input_shapes and len(input_shapes) != len(fdef.signature.input_arg):
raise ValueError("Length of input_shapes must match the number of " +
"input_args. len(input_shapes): {} len(input_arg): {}".
format(len(input_shapes), len(fdef.signature.input_arg)))
# 1. Create placeholders for input nodes.
for i, arg_def in enumerate(fdef.signature.input_arg):
node_def = graph_def.node.add()
node_def.name = arg_def.name
node_def.op = "Placeholder"
node_def.attr["dtype"].type = arg_def.type
if input_shapes and input_shapes[i] is not None:
input_shape = input_shapes[i]
if not isinstance(input_shape, tensor_shape_pb2.TensorShapeProto):
input_shape = input_shape.as_proto()
node_def.attr["shape"].shape.CopyFrom(input_shape)
arg_attrs = fdef.arg_attr[i].attr
for k in arg_attrs:
# Only copy internal attributes. Normal attributes for nodes cannot be
# applied to these Placeholder nodes.
if k.startswith("_"):
node_def.attr[k].CopyFrom(arg_attrs[k])
# 2. Copy all body NodeDefs to the GraphDef.
graph_def.node.extend(fdef.node_def)
# 3. Perform the renaming.
# Build the tensor name mapping then flatten the tensor names.
# See comment on `FunctionDef.node_def` on how the tensor naming in
# FunctionDefs is different from GraphDefs.
nested_to_flat_tensor_name = {}
for arg_def in fdef.signature.input_arg:
nested_to_flat_tensor_name[arg_def.name] = "{}:0".format(arg_def.name)
control_name = "^" + arg_def.name
nested_to_flat_tensor_name[control_name] = control_name
for node_def in fdef.node_def:
op_def = ops.get_default_graph()._get_op_def(node_def.op) # pylint: disable=protected-access
for attr in op_def.attr:
if attr.type == "func":
fname = node_def.attr[attr.name].func.name
if not _is_function(fname):
raise ValueError("%s function not found." % fname)
elif attr.type == "list(func)":
for fn in node_def.attr[attr.name].list.func:
fname = fn.name
if not _is_function(fname):
raise ValueError("%s function not found." % fname)
# Iterate over output_args in op_def to build the map.
# Index of the output tensor in the flattened list of *all* output
# tensors of the op.
flattened_index = 0
for arg_def in op_def.output_arg:
num_args = _get_num_args(arg_def, node_def)
for i in range(num_args):
# Map tensor names from "node_name:output_arg_name:index" to
# "node_name:flattened_index".
nested_name = "{}:{}:{}".format(node_def.name, arg_def.name, i)
flat_name = "{}:{}".format(node_def.name, flattened_index)
nested_to_flat_tensor_name[nested_name] = flat_name
flattened_index += 1
control_name = "^" + node_def.name
nested_to_flat_tensor_name[control_name] = control_name
# Update inputs of all nodes in graph.
for node_def in graph_def.node:
for i in range(len(node_def.input)):
node_def.input[i] = nested_to_flat_tensor_name[node_def.input[i]]
return graph_def, nested_to_flat_tensor_name
# Based on implementation in core/framework/node_def_util.cc::ComputeArgRange.
def _get_num_args(arg_def, node_def):
if arg_def.number_attr:
return node_def.attr[arg_def.number_attr].i
elif arg_def.type_list_attr:
return len(node_def.attr[arg_def.type_list_attr].list.type)
elif arg_def.type_attr or arg_def.type != types_pb2.DT_INVALID:
return 1
else:
raise ValueError("Invalid arg_def:\n\n{}".format(str(arg_def)))
|
tensorflow-master
|
tensorflow/python/framework/function_def_to_graph.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of dtypes (Tensor element types)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import builtins
from tensorflow.core.framework import types_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util.tf_export import tf_export
_np_bfloat16 = pywrap_tensorflow.TF_bfloat16_type()
@tf_export("dtypes.DType", "DType")
class DType(object):
"""Represents the type of the elements in a `Tensor`.
The following `DType` objects are defined:
* `tf.float16`: 16-bit half-precision floating-point.
* `tf.float32`: 32-bit single-precision floating-point.
* `tf.float64`: 64-bit double-precision floating-point.
* `tf.bfloat16`: 16-bit truncated floating-point.
* `tf.complex64`: 64-bit single-precision complex.
* `tf.complex128`: 128-bit double-precision complex.
* `tf.int8`: 8-bit signed integer.
* `tf.uint8`: 8-bit unsigned integer.
* `tf.uint16`: 16-bit unsigned integer.
* `tf.uint32`: 32-bit unsigned integer.
* `tf.uint64`: 64-bit unsigned integer.
* `tf.int16`: 16-bit signed integer.
* `tf.int32`: 32-bit signed integer.
* `tf.int64`: 64-bit signed integer.
* `tf.bool`: Boolean.
* `tf.string`: String.
* `tf.qint8`: Quantized 8-bit signed integer.
* `tf.quint8`: Quantized 8-bit unsigned integer.
* `tf.qint16`: Quantized 16-bit signed integer.
* `tf.quint16`: Quantized 16-bit unsigned integer.
* `tf.qint32`: Quantized 32-bit signed integer.
* `tf.resource`: Handle to a mutable resource.
* `tf.variant`: Values of arbitrary types.
The `tf.as_dtype()` function converts numpy types and string type
names to a `DType` object.
"""
def __init__(self, type_enum):
"""Creates a new `DataType`.
NOTE(mrry): In normal circumstances, you should not need to
construct a `DataType` object directly. Instead, use the
`tf.as_dtype()` function.
Args:
type_enum: A `types_pb2.DataType` enum value.
Raises:
TypeError: If `type_enum` is not a value `types_pb2.DataType`.
"""
# TODO(mrry): Make the necessary changes (using __new__) to ensure
# that calling this returns one of the interned values.
type_enum = int(type_enum)
if (type_enum not in types_pb2.DataType.values() or
type_enum == types_pb2.DT_INVALID):
raise TypeError("type_enum is not a valid types_pb2.DataType: %s" %
type_enum)
self._type_enum = type_enum
@property
def _is_ref_dtype(self):
"""Returns `True` if this `DType` represents a reference type."""
return self._type_enum > 100
@property
def _as_ref(self):
"""Returns a reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return self
else:
return _INTERN_TABLE[self._type_enum + 100]
@property
def base_dtype(self):
"""Returns a non-reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return _INTERN_TABLE[self._type_enum - 100]
else:
return self
@property
def real_dtype(self):
"""Returns the dtype correspond to this dtype's real part."""
base = self.base_dtype
if base == complex64:
return float32
elif base == complex128:
return float64
else:
return self
@property
def is_numpy_compatible(self):
return self._type_enum not in _NUMPY_INCOMPATIBLE
@property
def as_numpy_dtype(self):
"""Returns a `numpy.dtype` based on this `DType`."""
return _TF_TO_NP[self._type_enum]
@property
def as_datatype_enum(self):
"""Returns a `types_pb2.DataType` enum value based on this `DType`."""
return self._type_enum
@property
def is_bool(self):
"""Returns whether this is a boolean data type"""
return self.base_dtype == bool
@property
def is_integer(self):
"""Returns whether this is a (non-quantized) integer type."""
return (self.is_numpy_compatible and not self.is_quantized and
np.issubdtype(self.as_numpy_dtype, np.integer))
@property
def is_floating(self):
"""Returns whether this is a (non-quantized, real) floating point type."""
return ((self.is_numpy_compatible and
np.issubdtype(self.as_numpy_dtype, np.floating)) or
self.base_dtype == bfloat16)
@property
def is_complex(self):
"""Returns whether this is a complex floating point type."""
return self.base_dtype in (complex64, complex128)
@property
def is_quantized(self):
"""Returns whether this is a quantized data type."""
return self.base_dtype in _QUANTIZED_DTYPES_NO_REF
@property
def is_unsigned(self):
"""Returns whether this type is unsigned.
Non-numeric, unordered, and quantized types are not considered unsigned, and
this function returns `False`.
Returns:
Whether a `DType` is unsigned.
"""
try:
return self.min == 0
except TypeError:
return False
@property
def min(self):
"""Returns the minimum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or
self.base_dtype in (bool, string, complex64, complex128)):
raise TypeError("Cannot find minimum value of %s." % self)
# there is no simple way to get the min value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).min
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).min
except:
if self.base_dtype == bfloat16:
return _np_bfloat16(float.fromhex("-0x1.FEp127"))
raise TypeError("Cannot find minimum value of %s." % self)
@property
def max(self):
"""Returns the maximum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or
self.base_dtype in (bool, string, complex64, complex128)):
raise TypeError("Cannot find maximum value of %s." % self)
# there is no simple way to get the max value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).max
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).max
except:
if self.base_dtype == bfloat16:
return _np_bfloat16(float.fromhex("0x1.FEp127"))
raise TypeError("Cannot find maximum value of %s." % self)
@property
def limits(self, clip_negative=True):
"""Return intensity limits, i.e.
(min, max) tuple, of the dtype.
Args:
clip_negative : bool, optional If True, clip the negative range (i.e.
return 0 for min intensity) even if the image dtype allows negative
values. Returns
min, max : tuple Lower and upper intensity limits.
"""
min, max = dtype_range[self.as_numpy_dtype] # pylint: disable=redefined-builtin
if clip_negative:
min = 0 # pylint: disable=redefined-builtin
return min, max
def is_compatible_with(self, other):
"""Returns True if the `other` DType will be converted to this DType.
The conversion rules are as follows:
```python
DType(T) .is_compatible_with(DType(T)) == True
```
Args:
other: A `DType` (or object that may be converted to a `DType`).
Returns:
True if a Tensor of the `other` `DType` will be implicitly converted to
this `DType`.
"""
other = as_dtype(other)
return self._type_enum in (other.as_datatype_enum,
other.base_dtype.as_datatype_enum)
def __eq__(self, other):
"""Returns True iff this DType refers to the same type as `other`."""
if other is None:
return False
try:
dtype = as_dtype(other).as_datatype_enum
return self._type_enum == dtype # pylint: disable=protected-access
except TypeError:
return False
def __ne__(self, other):
"""Returns True iff self != other."""
return not self.__eq__(other)
@property
def name(self):
"""Returns the string name for this `DType`."""
return _TYPE_TO_STRING[self._type_enum]
def __str__(self):
return "<dtype: %r>" % self.name
def __repr__(self):
return "tf." + self.name
def __hash__(self):
return self._type_enum
def __reduce__(self):
return as_dtype, (self.name,)
@property
def size(self):
if (self._type_enum == types_pb2.DT_VARIANT or
self._type_enum == types_pb2.DT_RESOURCE):
return 1
return np.dtype(self.as_numpy_dtype).itemsize
# Define data type range of numpy dtype
dtype_range = {
np.bool_: (False, True),
np.bool8: (False, True),
np.uint8: (0, 255),
np.uint16: (0, 65535),
np.int8: (-128, 127),
np.int16: (-32768, 32767),
np.int64: (-2**63, 2**63 - 1),
np.uint64: (0, 2**64 - 1),
np.int32: (-2**31, 2**31 - 1),
np.uint32: (0, 2**32 - 1),
np.float32: (-1, 1),
np.float64: (-1, 1)
}
# Define standard wrappers for the types_pb2.DataType enum.
resource = DType(types_pb2.DT_RESOURCE)
tf_export("dtypes.resource", "resource").export_constant(__name__, "resource")
variant = DType(types_pb2.DT_VARIANT)
tf_export("dtypes.variant", "variant").export_constant(__name__, "variant")
float16 = DType(types_pb2.DT_HALF)
tf_export("dtypes.float16", "float16").export_constant(__name__, "float16")
half = float16
tf_export("dtypes.half", "half").export_constant(__name__, "half")
float32 = DType(types_pb2.DT_FLOAT)
tf_export("dtypes.float32", "float32").export_constant(__name__, "float32")
float64 = DType(types_pb2.DT_DOUBLE)
tf_export("dtypes.float64", "float64").export_constant(__name__, "float64")
double = float64
tf_export("dtypes.double", "double").export_constant(__name__, "double")
int32 = DType(types_pb2.DT_INT32)
tf_export("dtypes.int32", "int32").export_constant(__name__, "int32")
uint8 = DType(types_pb2.DT_UINT8)
tf_export("dtypes.uint8", "uint8").export_constant(__name__, "uint8")
uint16 = DType(types_pb2.DT_UINT16)
tf_export("dtypes.uint16", "uint16").export_constant(__name__, "uint16")
uint32 = DType(types_pb2.DT_UINT32)
tf_export("dtypes.uint32", "uint32").export_constant(__name__, "uint32")
uint64 = DType(types_pb2.DT_UINT64)
tf_export("dtypes.uint64", "uint64").export_constant(__name__, "uint64")
int16 = DType(types_pb2.DT_INT16)
tf_export("dtypes.int16", "int16").export_constant(__name__, "int16")
int8 = DType(types_pb2.DT_INT8)
tf_export("dtypes.int8", "int8").export_constant(__name__, "int8")
string = DType(types_pb2.DT_STRING)
tf_export("dtypes.string", "string").export_constant(__name__, "string")
complex64 = DType(types_pb2.DT_COMPLEX64)
tf_export("dtypes.complex64",
"complex64").export_constant(__name__, "complex64")
complex128 = DType(types_pb2.DT_COMPLEX128)
tf_export("dtypes.complex128",
"complex128").export_constant(__name__, "complex128")
int64 = DType(types_pb2.DT_INT64)
tf_export("dtypes.int64", "int64").export_constant(__name__, "int64")
bool = DType(types_pb2.DT_BOOL) # pylint: disable=redefined-builtin
tf_export("dtypes.bool", "bool").export_constant(__name__, "bool")
qint8 = DType(types_pb2.DT_QINT8)
tf_export("dtypes.qint8", "qint8").export_constant(__name__, "qint8")
quint8 = DType(types_pb2.DT_QUINT8)
tf_export("dtypes.quint8", "quint8").export_constant(__name__, "quint8")
qint16 = DType(types_pb2.DT_QINT16)
tf_export("dtypes.qint16", "qint16").export_constant(__name__, "qint16")
quint16 = DType(types_pb2.DT_QUINT16)
tf_export("dtypes.quint16", "quint16").export_constant(__name__, "quint16")
qint32 = DType(types_pb2.DT_QINT32)
tf_export("dtypes.qint32", "qint32").export_constant(__name__, "qint32")
resource_ref = DType(types_pb2.DT_RESOURCE_REF)
variant_ref = DType(types_pb2.DT_VARIANT_REF)
bfloat16 = DType(types_pb2.DT_BFLOAT16)
tf_export("dtypes.bfloat16", "bfloat16").export_constant(__name__, "bfloat16")
float16_ref = DType(types_pb2.DT_HALF_REF)
half_ref = float16_ref
float32_ref = DType(types_pb2.DT_FLOAT_REF)
float64_ref = DType(types_pb2.DT_DOUBLE_REF)
double_ref = float64_ref
int32_ref = DType(types_pb2.DT_INT32_REF)
uint32_ref = DType(types_pb2.DT_UINT32_REF)
uint8_ref = DType(types_pb2.DT_UINT8_REF)
uint16_ref = DType(types_pb2.DT_UINT16_REF)
int16_ref = DType(types_pb2.DT_INT16_REF)
int8_ref = DType(types_pb2.DT_INT8_REF)
string_ref = DType(types_pb2.DT_STRING_REF)
complex64_ref = DType(types_pb2.DT_COMPLEX64_REF)
complex128_ref = DType(types_pb2.DT_COMPLEX128_REF)
int64_ref = DType(types_pb2.DT_INT64_REF)
uint64_ref = DType(types_pb2.DT_UINT64_REF)
bool_ref = DType(types_pb2.DT_BOOL_REF)
qint8_ref = DType(types_pb2.DT_QINT8_REF)
quint8_ref = DType(types_pb2.DT_QUINT8_REF)
qint16_ref = DType(types_pb2.DT_QINT16_REF)
quint16_ref = DType(types_pb2.DT_QUINT16_REF)
qint32_ref = DType(types_pb2.DT_QINT32_REF)
bfloat16_ref = DType(types_pb2.DT_BFLOAT16_REF)
_NUMPY_INCOMPATIBLE = frozenset([
types_pb2.DT_VARIANT, types_pb2.DT_VARIANT_REF, types_pb2.DT_RESOURCE,
types_pb2.DT_RESOURCE_REF
])
# Maintain an intern table so that we don't have to create a large
# number of small objects.
_INTERN_TABLE = {
types_pb2.DT_HALF: float16,
types_pb2.DT_FLOAT: float32,
types_pb2.DT_DOUBLE: float64,
types_pb2.DT_INT32: int32,
types_pb2.DT_UINT8: uint8,
types_pb2.DT_UINT16: uint16,
types_pb2.DT_UINT32: uint32,
types_pb2.DT_UINT64: uint64,
types_pb2.DT_INT16: int16,
types_pb2.DT_INT8: int8,
types_pb2.DT_STRING: string,
types_pb2.DT_COMPLEX64: complex64,
types_pb2.DT_COMPLEX128: complex128,
types_pb2.DT_INT64: int64,
types_pb2.DT_BOOL: bool,
types_pb2.DT_QINT8: qint8,
types_pb2.DT_QUINT8: quint8,
types_pb2.DT_QINT16: qint16,
types_pb2.DT_QUINT16: quint16,
types_pb2.DT_QINT32: qint32,
types_pb2.DT_BFLOAT16: bfloat16,
types_pb2.DT_RESOURCE: resource,
types_pb2.DT_VARIANT: variant,
types_pb2.DT_HALF_REF: float16_ref,
types_pb2.DT_FLOAT_REF: float32_ref,
types_pb2.DT_DOUBLE_REF: float64_ref,
types_pb2.DT_INT32_REF: int32_ref,
types_pb2.DT_UINT32_REF: uint32_ref,
types_pb2.DT_UINT8_REF: uint8_ref,
types_pb2.DT_UINT16_REF: uint16_ref,
types_pb2.DT_INT16_REF: int16_ref,
types_pb2.DT_INT8_REF: int8_ref,
types_pb2.DT_STRING_REF: string_ref,
types_pb2.DT_COMPLEX64_REF: complex64_ref,
types_pb2.DT_COMPLEX128_REF: complex128_ref,
types_pb2.DT_INT64_REF: int64_ref,
types_pb2.DT_UINT64_REF: uint64_ref,
types_pb2.DT_BOOL_REF: bool_ref,
types_pb2.DT_QINT8_REF: qint8_ref,
types_pb2.DT_QUINT8_REF: quint8_ref,
types_pb2.DT_QINT16_REF: qint16_ref,
types_pb2.DT_QUINT16_REF: quint16_ref,
types_pb2.DT_QINT32_REF: qint32_ref,
types_pb2.DT_BFLOAT16_REF: bfloat16_ref,
types_pb2.DT_RESOURCE_REF: resource_ref,
types_pb2.DT_VARIANT_REF: variant_ref,
}
# Standard mappings between types_pb2.DataType values and string names.
_TYPE_TO_STRING = {
types_pb2.DT_HALF: "float16",
types_pb2.DT_FLOAT: "float32",
types_pb2.DT_DOUBLE: "float64",
types_pb2.DT_INT32: "int32",
types_pb2.DT_UINT8: "uint8",
types_pb2.DT_UINT16: "uint16",
types_pb2.DT_UINT32: "uint32",
types_pb2.DT_UINT64: "uint64",
types_pb2.DT_INT16: "int16",
types_pb2.DT_INT8: "int8",
types_pb2.DT_STRING: "string",
types_pb2.DT_COMPLEX64: "complex64",
types_pb2.DT_COMPLEX128: "complex128",
types_pb2.DT_INT64: "int64",
types_pb2.DT_BOOL: "bool",
types_pb2.DT_QINT8: "qint8",
types_pb2.DT_QUINT8: "quint8",
types_pb2.DT_QINT16: "qint16",
types_pb2.DT_QUINT16: "quint16",
types_pb2.DT_QINT32: "qint32",
types_pb2.DT_BFLOAT16: "bfloat16",
types_pb2.DT_RESOURCE: "resource",
types_pb2.DT_VARIANT: "variant",
types_pb2.DT_HALF_REF: "float16_ref",
types_pb2.DT_FLOAT_REF: "float32_ref",
types_pb2.DT_DOUBLE_REF: "float64_ref",
types_pb2.DT_INT32_REF: "int32_ref",
types_pb2.DT_UINT32_REF: "uint32_ref",
types_pb2.DT_UINT8_REF: "uint8_ref",
types_pb2.DT_UINT16_REF: "uint16_ref",
types_pb2.DT_INT16_REF: "int16_ref",
types_pb2.DT_INT8_REF: "int8_ref",
types_pb2.DT_STRING_REF: "string_ref",
types_pb2.DT_COMPLEX64_REF: "complex64_ref",
types_pb2.DT_COMPLEX128_REF: "complex128_ref",
types_pb2.DT_INT64_REF: "int64_ref",
types_pb2.DT_UINT64_REF: "uint64_ref",
types_pb2.DT_BOOL_REF: "bool_ref",
types_pb2.DT_QINT8_REF: "qint8_ref",
types_pb2.DT_QUINT8_REF: "quint8_ref",
types_pb2.DT_QINT16_REF: "qint16_ref",
types_pb2.DT_QUINT16_REF: "quint16_ref",
types_pb2.DT_QINT32_REF: "qint32_ref",
types_pb2.DT_BFLOAT16_REF: "bfloat16_ref",
types_pb2.DT_RESOURCE_REF: "resource_ref",
types_pb2.DT_VARIANT_REF: "variant_ref",
}
_STRING_TO_TF = {
value: _INTERN_TABLE[key] for key, value in _TYPE_TO_STRING.items()
}
# Add non-canonical aliases.
_STRING_TO_TF["half"] = float16
_STRING_TO_TF["half_ref"] = float16_ref
_STRING_TO_TF["float"] = float32
_STRING_TO_TF["float_ref"] = float32_ref
_STRING_TO_TF["double"] = float64
_STRING_TO_TF["double_ref"] = float64_ref
# Numpy representation for quantized dtypes.
#
# These are magic strings that are used in the swig wrapper to identify
# quantized types.
# TODO(mrry,keveman): Investigate Numpy type registration to replace this
# hard-coding of names.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
# _np_bfloat16 is defined by a module import.
# Custom struct dtype for directly-fed ResourceHandles of supported type(s).
np_resource = np.dtype([("resource", np.ubyte, 1)])
# Standard mappings between types_pb2.DataType values and numpy.dtypes.
_NP_TO_TF = {
np.float16: float16,
np.float32: float32,
np.float64: float64,
np.int32: int32,
np.int64: int64,
np.uint8: uint8,
np.uint16: uint16,
np.uint32: uint32,
np.uint64: uint64,
np.int16: int16,
np.int8: int8,
np.complex64: complex64,
np.complex128: complex128,
np.object_: string,
np.string_: string,
np.unicode_: string,
np.bool_: bool,
_np_qint8: qint8,
_np_quint8: quint8,
_np_qint16: qint16,
_np_quint16: quint16,
_np_qint32: qint32,
_np_bfloat16: bfloat16,
}
# Map (some) NumPy platform dtypes to TF ones using their fixed-width
# synonyms. Note that platform dtypes are not always simples aliases,
# i.e. reference equality is not guaranteed. See e.g. numpy/numpy#9799.
for pdt in [
np.intc,
np.uintc,
np.int_,
np.uint,
np.longlong,
np.ulonglong,
]:
if pdt not in _NP_TO_TF:
_NP_TO_TF[pdt] = next(
_NP_TO_TF[dt] for dt in _NP_TO_TF if dt == pdt().dtype)
_TF_TO_NP = {
types_pb2.DT_HALF:
np.float16,
types_pb2.DT_FLOAT:
np.float32,
types_pb2.DT_DOUBLE:
np.float64,
types_pb2.DT_INT32:
np.int32,
types_pb2.DT_UINT8:
np.uint8,
types_pb2.DT_UINT16:
np.uint16,
types_pb2.DT_UINT32:
np.uint32,
types_pb2.DT_UINT64:
np.uint64,
types_pb2.DT_INT16:
np.int16,
types_pb2.DT_INT8:
np.int8,
# NOTE(touts): For strings we use np.object as it supports variable length
# strings.
types_pb2.DT_STRING:
np.object,
types_pb2.DT_COMPLEX64:
np.complex64,
types_pb2.DT_COMPLEX128:
np.complex128,
types_pb2.DT_INT64:
np.int64,
types_pb2.DT_BOOL:
np.bool,
types_pb2.DT_QINT8:
_np_qint8,
types_pb2.DT_QUINT8:
_np_quint8,
types_pb2.DT_QINT16:
_np_qint16,
types_pb2.DT_QUINT16:
_np_quint16,
types_pb2.DT_QINT32:
_np_qint32,
types_pb2.DT_BFLOAT16:
_np_bfloat16,
# Ref types
types_pb2.DT_HALF_REF:
np.float16,
types_pb2.DT_FLOAT_REF:
np.float32,
types_pb2.DT_DOUBLE_REF:
np.float64,
types_pb2.DT_INT32_REF:
np.int32,
types_pb2.DT_UINT32_REF:
np.uint32,
types_pb2.DT_UINT8_REF:
np.uint8,
types_pb2.DT_UINT16_REF:
np.uint16,
types_pb2.DT_INT16_REF:
np.int16,
types_pb2.DT_INT8_REF:
np.int8,
types_pb2.DT_STRING_REF:
np.object,
types_pb2.DT_COMPLEX64_REF:
np.complex64,
types_pb2.DT_COMPLEX128_REF:
np.complex128,
types_pb2.DT_INT64_REF:
np.int64,
types_pb2.DT_UINT64_REF:
np.uint64,
types_pb2.DT_BOOL_REF:
np.bool,
types_pb2.DT_QINT8_REF:
_np_qint8,
types_pb2.DT_QUINT8_REF:
_np_quint8,
types_pb2.DT_QINT16_REF:
_np_qint16,
types_pb2.DT_QUINT16_REF:
_np_quint16,
types_pb2.DT_QINT32_REF:
_np_qint32,
types_pb2.DT_BFLOAT16_REF:
_np_bfloat16,
}
_QUANTIZED_DTYPES_NO_REF = frozenset([qint8, quint8, qint16, quint16, qint32])
_QUANTIZED_DTYPES_REF = frozenset(
[qint8_ref, quint8_ref, qint16_ref, quint16_ref, qint32_ref])
QUANTIZED_DTYPES = _QUANTIZED_DTYPES_REF.union(_QUANTIZED_DTYPES_NO_REF)
tf_export(
"dtypes.QUANTIZED_DTYPES",
v1=["dtypes.QUANTIZED_DTYPES",
"QUANTIZED_DTYPES"]).export_constant(__name__, "QUANTIZED_DTYPES")
_PYTHON_TO_TF = {
builtins.float: float32,
builtins.bool: bool,
builtins.object: string
}
_ANY_TO_TF = {}
_ANY_TO_TF.update(_INTERN_TABLE)
_ANY_TO_TF.update(_STRING_TO_TF)
_ANY_TO_TF.update(_PYTHON_TO_TF)
_ANY_TO_TF.update(_NP_TO_TF)
# Ensure no collisions.
assert len(_ANY_TO_TF) == sum(
len(d) for d in [_INTERN_TABLE, _STRING_TO_TF, _PYTHON_TO_TF, _NP_TO_TF])
@tf_export("dtypes.as_dtype", "as_dtype")
def as_dtype(type_value):
"""Converts the given `type_value` to a `DType`.
Args:
type_value: A value that can be converted to a `tf.DType` object. This may
currently be a `tf.DType` object, a [`DataType`
enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),
a string type name, or a `numpy.dtype`.
Returns:
A `DType` corresponding to `type_value`.
Raises:
TypeError: If `type_value` cannot be converted to a `DType`.
"""
if isinstance(type_value, DType):
return type_value
if isinstance(type_value, np.dtype):
try:
return _NP_TO_TF[type_value.type]
except KeyError:
pass
try:
return _ANY_TO_TF[type_value]
except KeyError:
pass
raise TypeError("Cannot convert value %r to a TensorFlow DType." %
(type_value,))
|
tensorflow-master
|
tensorflow/python/framework/dtypes.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.