python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Contains the base Layer class, from which all layers inherit.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools import itertools import json import threading import numpy as np from six.moves import zip # pylint: disable=redefined-builtin from google.protobuf import json_format from tensorflow.core.framework import node_def_pb2 from tensorflow.python.autograph.core import ag_ctx from tensorflow.python.autograph.impl import api as autograph from tensorflow.python.distribute import distribution_strategy_context as ds_context from tensorflow.python.distribute import values as distribute_values from tensorflow.python.eager import context from tensorflow.python.eager import execute from tensorflow.python.eager import function from tensorflow.python.eager import monitoring from tensorflow.python.framework import auto_control_deps from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import func_graph from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend from tensorflow.python.keras import constraints from tensorflow.python.keras import initializers from tensorflow.python.keras import regularizers from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.engine import input_spec from tensorflow.python.keras.engine import node as node_module from tensorflow.python.keras.mixed_precision.experimental import autocast_variable from tensorflow.python.keras.mixed_precision.experimental import policy from tensorflow.python.keras.saving.saved_model import save as saved_model from tensorflow.python.keras.utils import generic_utils from tensorflow.python.keras.utils import tf_utils # A module that only depends on `keras.layers` import these from here. from tensorflow.python.keras.utils.generic_utils import serialize_keras_object from tensorflow.python.keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import from tensorflow.python.keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import from tensorflow.python.module import module from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables as tf_variables from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import tf_logging from tensorflow.python.training.tracking import base as trackable from tensorflow.python.training.tracking import data_structures from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils from tensorflow.python.training.tracking import tracking from tensorflow.python.util import compat from tensorflow.python.util import deprecation from tensorflow.python.util import nest from tensorflow.python.util import object_identity from tensorflow.python.util import serialization from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import keras_export from tensorflow.tools.docs import doc_controls # Prefix that is added to the TF op layer names. _TF_OP_LAYER_NAME_PREFIX = 'tf_op_layer_' _keras_layers_gauge = monitoring.BoolGauge('/tensorflow/api/keras/layers', 'keras layers usage', 'method') @keras_export('keras.layers.Layer') class Layer(module.Module): """Base layer class. This is the class from which all layers inherit. A layer is a class implementing common neural networks operations, such as convolution, batch norm, etc. These operations require managing weights, losses, updates, and inter-layer connectivity. Users will just instantiate a layer and then treat it as a callable. We recommend that descendants of `Layer` implement the following methods: * `__init__()`: Save configuration in member variables * `build()`: Called once from `__call__`, when we know the shapes of inputs and `dtype`. Should have the calls to `add_weight()`, and then call the super's `build()` (which sets `self.built = True`, which is nice in case the user wants to call `build()` manually before the first `__call__`). * `call()`: Called in `__call__` after making sure `build()` has been called once. Should actually perform the logic of applying the layer to the input tensors (which should be passed in as the first argument). Arguments: trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: The dtype of the layer's computations and weights (default of `None` means use `tf.keras.backend.floatx` in TensorFlow 2, or the type of the first input in TensorFlow 1). dynamic: Set this to `True` if your layer should only be run eagerly, and should not be used to generate a static computation graph. This would be the case for a Tree-RNN or a recursive network, for example, or generally for any layer that manipulates tensors using Python control flow. If `False`, we assume that the layer can safely be used to generate a static computation graph. Read-only properties: name: The name of the layer (string). dtype: The dtype of the layer's computations and weights. If mixed precision is used with a `tf.keras.mixed_precision.experimental.Policy`, this is instead just the dtype of the layer's weights, as the computations are done in a different dtype. updates: List of update ops of this layer. losses: List of losses added by this layer. trainable_weights: List of variables to be included in backprop. non_trainable_weights: List of variables that should not be included in backprop. weights: The concatenation of the lists trainable_weights and non_trainable_weights (in this order). Mutable properties: trainable: Whether the layer should be trained (boolean). input_spec: Optional (list of) `InputSpec` object(s) specifying the constraints on inputs that can be accepted by the layer. ### Dtypes and casting Each layer has a dtype, which is typically the dtype of the layer's computations and variables. A layer's dtype can be queried via the `Layer.dtype` property. The dtype is specified with the `dtype` constructor argument. In TensorFlow 2, the dtype defaults to `tf.keras.backend.floatx()` if no dtype is passed. `floatx()` itself defaults to "float32". Additionally, layers will cast their inputs to the layer's dtype in TensorFlow 2. For example: ``` x = tf.ones((4, 4, 4, 4), dtype='float64') layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2) print(layer.dtype) # float32 # `layer` casts it's inputs to layer.dtype, which is float32, and does # computations in float32. y = layer(x) ``` Currently, only tensors in the first argument to the layer's `call` method are casted. For example: ``` class MyLayer(tf.keras.layers.Layer): # Bug! `b` will not be casted. def call(self, a, b): return a + 1., b + 1. a = tf.constant(1., dtype="float32") b = tf.constant(1., dtype="float32") layer = MyLayer(dtype="float64") x, y = layer(a, b) print(x.dtype) # float64 print(y.dtype) # float32. Not casted since `b` was not passed to first input ``` It is recommended to accept tensors only in the first argument. This way, all tensors are casted to the layer's dtype. `MyLayer` should therefore be written as: ``` class MyLayer(tf.keras.layers.Layer): # Now, all tensor inputs will be casted. def call(self, inputs): a, b = inputs return a + 1., b + 1. a = tf.constant(1., dtype="float32") b = tf.constant(1., dtype="float32") layer = MyLayer(dtype="float64") x, y = layer((a, b)) print(x.dtype) # float64 print(y.dtype) # float64. ``` In a future minor release, tensors in other arguments may be casted as well. Currently, other arguments are not automatically casted for technical reasons, but this may change in a future minor release. A layer subclass can prevent its inputs from being autocasted by passing `autocast=False` to the layer constructor. For example: ``` class MyLayer(tf.keras.layers.Layer): def __init__(self, **kwargs): kwargs['autocast']=False super(MyLayer, self).__init__(**kwargs) def call(self, inp): return inp x = tf.ones((4, 4, 4, 4), dtype='float64') layer = MyLayer() print(layer.dtype) # float32. y = layer(x) # MyLayer will not cast inputs to it's dtype of float32 print(y.dtype) # float64 ``` #### Running models in float64 in TensorFlow 2 If you want to run a Model in float64, you can set floatx to be float64 by calling `tf.keras.backend.set_floatx('float64')`. This will cause all layers to default to float64 instead of float32: ``` tf.keras.backend.set_floatx('float64') layer1 = tf.keras.layers.Dense(4) layer2 = tf.keras.layers.Dense(4) x = tf.ones((4, 4)) y = layer2(layer1(x)) # Both layers run in float64 ``` Alternatively, you can pass `dtype='float64'` to each individual layer. Note that if you have any layers which contain other layers as members, you must ensure each sublayer gets `dtype='float64'` passed to it's constructor as well: ``` layer1 = tf.keras.layers.Dense(4, dtype='float64') layer2 = tf.keras.layers.Dense(4, dtype='float64') x = tf.ones((4, 4)) y = layer2(layer1(x)) # Both layers run in float64 class NestedLayer(tf.keras.layers.Layer): def __init__(self, **kwargs): super(NestedLayer, self).__init__(**kwargs) self.dense = tf.keras.layers.Dense(4, dtype=kwargs.get('dtype')) def call(self, inp): return self.dense(inp) layer3 = NestedLayer(dtype='float64') z = layer3(x) # layer3's dense layer runs in float64, since NestedLayer # correcty passed it's dtype to it's dense layer ``` """ # See tf.Module for the usage of this property. # The key for _obj_reference_counts_dict is a Trackable, which could be a # variable or layer etc. tf.Module._flatten will fail to flatten the key # since it is trying to convert Trackable to a string. This attribute can be # ignored even after the fix of nest lib, since the trackable object should # already been available as individual attributes. _obj_reference_counts_dict # just contains a copy of them. _TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain( ('_obj_reference_counts_dict',), module.Module._TF_MODULE_IGNORED_PROPERTIES )) @trackable.no_automatic_dependency_tracking def __init__(self, trainable=True, name=None, dtype=None, dynamic=False, **kwargs): # These properties should be set by the user via keyword arguments. # note that 'dtype', 'input_shape' and 'batch_input_shape' # are only applicable to input layers: do not pass these keywords # to non-input layers. allowed_kwargs = { 'input_shape', 'batch_input_shape', 'batch_size', 'weights', 'activity_regularizer', 'autocast' } # Validate optional keyword arguments. generic_utils.validate_kwargs(kwargs, allowed_kwargs) # Mutable properties # Indicates whether the layer's weights are updated during training # and whether the layer's updates are run during training. self._trainable = trainable # A stateful layer is a layer whose updates are run during inference too, # for instance stateful RNNs. self.stateful = False # Indicates whether `build` needs to be called upon layer call, to create # the layer's weights. self.built = False # Provides information about which inputs are compatible with the layer. self.input_spec = None self.supports_masking = False self._init_set_name(name) self._activity_regularizer = kwargs.pop('activity_regularizer', None) self._maybe_create_attribute('_trainable_weights', []) self._maybe_create_attribute('_non_trainable_weights', []) self._updates = [] # Object to store all thread local layer properties. self._thread_local = threading.local() # A list of zero-argument lambdas which return Tensors, used for variable # regularizers. self._callable_losses = [] # A list of symbolic Tensors containing activity regularizers and losses # manually added through `add_loss` in graph-building mode. self._losses = [] # A list of metric instances corresponding to the symbolic metric tensors # added using the `add_metric` API. self._metrics = [] self._set_dtype_policy(dtype) # Boolean indicating whether the layer automatically casts its inputs to the # layer's compute_dtype. self._autocast = kwargs.get('autocast', base_layer_utils.v2_dtype_behavior_enabled()) # Dependencies tracked via attribute assignment. self._maybe_create_attribute('_layers', []) # These lists will be filled via successive calls # to self._add_inbound_node(). self._inbound_nodes = [] self._outbound_nodes = [] self._init_call_fn_args() # Whether the `call` method can be used to build a TF graph without issues. self._dynamic = dynamic # Manage input shape information if passed. if 'input_shape' in kwargs or 'batch_input_shape' in kwargs: # In this case we will later create an input layer # to insert before the current layer if 'batch_input_shape' in kwargs: batch_input_shape = tuple(kwargs['batch_input_shape']) elif 'input_shape' in kwargs: if 'batch_size' in kwargs: batch_size = kwargs['batch_size'] else: batch_size = None batch_input_shape = (batch_size,) + tuple(kwargs['input_shape']) self._batch_input_shape = batch_input_shape # Manage initial weight values if passed. if 'weights' in kwargs: self._initial_weights = kwargs['weights'] else: self._initial_weights = None def build(self, input_shape): """Creates the variables of the layer (optional, for subclass implementers). This is a method that implementers of subclasses of `Layer` or `Model` can override if they need a state-creation step in-between layer instantiation and layer call. This is typically used to create the weights of `Layer` subclasses. Arguments: input_shape: Instance of `TensorShape`, or list of instances of `TensorShape` if the layer expects a list of inputs (one instance per input). """ self.built = True @doc_controls.for_subclass_implementers def call(self, inputs, **kwargs): # pylint: disable=unused-argument """This is where the layer's logic lives. Arguments: inputs: Input tensor, or list/tuple of input tensors. **kwargs: Additional keyword arguments. Returns: A tensor or list/tuple of tensors. """ return inputs @doc_controls.for_subclass_implementers def add_weight(self, name=None, shape=None, dtype=None, initializer=None, regularizer=None, trainable=None, constraint=None, partitioner=None, use_resource=None, synchronization=tf_variables.VariableSynchronization.AUTO, aggregation=tf_variables.VariableAggregation.NONE, **kwargs): """Adds a new variable to the layer. Arguments: name: Variable name. shape: Variable shape. Defaults to scalar if unspecified. dtype: The type of the variable. Defaults to `self.dtype` or `float32`. initializer: Initializer instance (callable). regularizer: Regularizer instance (callable). trainable: Boolean, whether the variable should be part of the layer's "trainable_variables" (e.g. variables, biases) or "non_trainable_variables" (e.g. BatchNorm mean and variance). Note that `trainable` cannot be `True` if `synchronization` is set to `ON_READ`. constraint: Constraint instance (callable). partitioner: Partitioner to be passed to the `Trackable` API. use_resource: Whether to use `ResourceVariable`. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. **kwargs: Additional keyword arguments. Accepted values are `getter` and `collections`. Returns: The created variable. Usually either a `Variable` or `ResourceVariable` instance. If `partitioner` is not `None`, a `PartitionedVariable` instance is returned. Raises: RuntimeError: If called with partitioned variable regularization and eager execution is enabled. ValueError: When giving unsupported dtype and no initializer or when trainable has been set to True with synchronization set as `ON_READ`. """ if shape is None: shape = () # Validate optional keyword arguments. for kwarg in kwargs: if kwarg not in ['getter', 'collections', 'experimental_autocast']: raise TypeError('Unknown keyword argument:', kwarg) getter = kwargs.pop('getter', base_layer_utils.make_variable) collections_arg = kwargs.pop('collections', None) # 'experimental_autocast' can be set to False by the caller to indicate an # AutoCastVariable should never be created. autocast = kwargs.pop('experimental_autocast', True) if dtype is None: dtype = self.dtype or backend.floatx() dtype = dtypes.as_dtype(dtype) if self._dtype_policy.variable_dtype is None: # The policy is "infer", so we infer the policy from the variable dtype. self._dtype_policy = policy.Policy(dtype.base_dtype.name) initializer = initializers.get(initializer) regularizer = regularizers.get(regularizer) constraint = constraints.get(constraint) if synchronization == tf_variables.VariableSynchronization.ON_READ: if trainable: raise ValueError( 'Synchronization value can be set to ' 'VariableSynchronization.ON_READ only for non-trainable variables. ' 'You have specified trainable=True and ' 'synchronization=VariableSynchronization.ON_READ.') else: # Set trainable to be false when variable is to be synced on read. trainable = False elif trainable is None: trainable = True # Initialize variable when no initializer provided if initializer is None: # If dtype is DT_FLOAT, provide a uniform unit scaling initializer if dtype.is_floating: initializer = initializers.glorot_uniform() # If dtype is DT_INT/DT_UINT, provide a default value `zero` # If dtype is DT_BOOL, provide a default value `FALSE` elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool: initializer = initializers.zeros() # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here? else: raise ValueError('An initializer for variable %s of type %s is required' ' for layer %s' % (name, dtype.base_dtype, self.name)) if autocast and self._dtype_policy.should_cast_variables: # Wrap 'getter' with a version that returns an AutoCastVariable. old_getter = getter def getter(*args, **kwargs): # pylint: disable=function-redefined variable = old_getter(*args, **kwargs) if isinstance(variable, distribute_values.DistributedVariable): return autocast_variable.AutoCastDistributedVariable(variable) else: return autocast_variable.AutoCastVariable(variable) variable = self._add_variable_with_custom_getter( name=name, shape=shape, # TODO(allenl): a `make_variable` equivalent should be added as a # `Trackable` method. getter=getter, # Manage errors in Layer rather than Trackable. overwrite=True, initializer=initializer, dtype=dtype, constraint=constraint, trainable=trainable, partitioner=partitioner, use_resource=use_resource, collections=collections_arg, synchronization=synchronization, aggregation=aggregation) backend.track_variable(variable) if regularizer is not None: # TODO(fchollet): in the future, this should be handled at the # level of variable creation, and weight regularization losses # should be variable attributes. name_in_scope = variable.name[:variable.name.find(':')] self._handle_weight_regularization(name_in_scope, variable, regularizer) if trainable: self._trainable_weights.append(variable) else: self._non_trainable_weights.append(variable) return variable @base_layer_utils.default def get_config(self): """Returns the config of the layer. A layer config is a Python dictionary (serializable) containing the configuration of a layer. The same layer can be reinstantiated later (without its trained weights) from this configuration. The config of a layer does not include connectivity information, nor the layer class name. These are handled by `Network` (one layer of abstraction above). Returns: Python dictionary. """ all_args = tf_inspect.getfullargspec(self.__init__).args config = {'name': self.name, 'trainable': self.trainable} if hasattr(self, '_batch_input_shape'): config['batch_input_shape'] = self._batch_input_shape if hasattr(self, 'dtype'): config['dtype'] = self.dtype if hasattr(self, 'dynamic'): # Only include `dynamic` in the `config` if it is `True` if self.dynamic: config['dynamic'] = self.dynamic elif 'dynamic' in all_args: all_args.remove('dynamic') expected_args = config.keys() # Finds all arguments in the `__init__` that are not in the config: extra_args = [arg for arg in all_args if arg not in expected_args] # Check that either the only argument in the `__init__` is `self`, # or that `get_config` has been overridden: if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'): raise NotImplementedError('Layers with arguments in `__init__` must ' 'override `get_config`.') # TODO(reedwm): Handle serializing self._dtype_policy. return config @classmethod def from_config(cls, config): """Creates a layer from its config. This method is the reverse of `get_config`, capable of instantiating the same layer from the config dictionary. It does not handle layer connectivity (handled by Network), nor weights (handled by `set_weights`). Arguments: config: A Python dictionary, typically the output of get_config. Returns: A layer instance. """ return cls(**config) def compute_output_shape(self, input_shape): """Computes the output shape of the layer. If the layer has not been built, this method will call `build` on the layer. This assumes that the layer will later be used with inputs that match the input shape provided here. Arguments: input_shape: Shape tuple (tuple of integers) or list of shape tuples (one per output tensor of the layer). Shape tuples can include None for free dimensions, instead of an integer. Returns: An input shape tuple. """ if context.executing_eagerly(): # In this case we build the model first in order to do shape inference. # This is acceptable because the framework only calls # `compute_output_shape` on shape values that the layer would later be # built for. It would however cause issues in case a user attempts to # use `compute_output_shape` manually with shapes that are incompatible # with the shape the Layer will be called on (these users will have to # implement `compute_output_shape` themselves). self._maybe_build(input_shape) with context.graph_mode(): graph = func_graph.FuncGraph('graph') with graph.as_default(): input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False) inputs = nest.map_structure( base_layer_utils.generate_placeholders_from_shape, input_shape) try: if self._expects_training_arg: outputs = self(inputs, training=False) else: outputs = self(inputs) except TypeError: raise NotImplementedError('We could not automatically infer ' 'the static shape of the layer\'s output.' ' Please implement the ' '`compute_output_shape` method on your ' 'layer (%s).' % self.__class__.__name__) return nest.map_structure(lambda t: t.shape, outputs) raise NotImplementedError @doc_controls.for_subclass_implementers def compute_output_signature(self, input_signature): """Compute the output tensor signature of the layer based on the inputs. Unlike a TensorShape object, a TensorSpec object contains both shape and dtype information for a tensor. This method allows layers to provide output dtype information if it is different from the input dtype. For any layer that doesn't implement this function, the framework will fall back to use `compute_output_shape`, and will assume that the output dtype matches the input dtype. Args: input_signature: Single TensorSpec or nested structure of TensorSpec objects, describing a candidate input for the layer. Returns: Single TensorSpec or nested structure of TensorSpec objects, describing how the layer would transform the provided input. Raises: TypeError: If input_signature contains a non-TensorSpec object. """ def check_type_return_shape(s): if not isinstance(s, tensor_spec.TensorSpec): raise TypeError( 'Only TensorSpec signature types are supported, ' 'but saw signature signature entry: {}.'.format(s)) return s.shape input_shape = nest.map_structure(check_type_return_shape, input_signature) output_shape = self.compute_output_shape(input_shape) dtype = self._compute_dtype if dtype is None: input_dtypes = [s.dtype for s in nest.flatten(input_signature)] # Default behavior when self.dtype is None, is to use the first input's # dtype. dtype = input_dtypes[0] return nest.map_structure( lambda s: tensor_spec.TensorSpec(dtype=dtype, shape=s), output_shape) @base_layer_utils.default def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument """Computes an output mask tensor. Arguments: inputs: Tensor or list of tensors. mask: Tensor or list of tensors. Returns: None or a tensor (or list of tensors, one per output tensor of the layer). """ if not self.supports_masking: if any(m is not None for m in nest.flatten(mask)): raise TypeError('Layer ' + self.name + ' does not support masking, ' 'but was passed an input_mask: ' + str(mask)) # masking not explicitly supported: return None as mask. return None # if masking is explicitly supported, by default # carry over the input mask return mask def __call__(self, inputs, *args, **kwargs): """Wraps `call`, applying pre- and post-processing steps. Arguments: inputs: input tensor(s). *args: additional positional arguments to be passed to `self.call`. **kwargs: additional keyword arguments to be passed to `self.call`. Returns: Output tensor(s). Note: - The following optional keyword arguments are reserved for specific uses: * `training`: Boolean scalar tensor of Python boolean indicating whether the `call` is meant for training or inference. * `mask`: Boolean input mask. - If the layer's `call` method takes a `mask` argument (as some Keras layers do), its default value will be set to the mask generated for `inputs` by the previous layer (if `input` did come from a layer that generated a corresponding mask, i.e. if it came from a Keras layer with masking support. Raises: ValueError: if the layer's `call` method returns None (an invalid value). """ call_context = base_layer_utils.call_context() input_list = nest.flatten(inputs) # We will attempt to build a TF graph if & only if all inputs are symbolic. # This is always the case in graph mode. It can also be the case in eager # mode when all inputs can be traced back to `keras.Input()` (when building # models using the functional API). build_graph = tf_utils.are_all_symbolic_tensors(input_list) # Accept NumPy and scalar inputs by converting to Tensors. if any(isinstance(x, (np.ndarray, float, int)) for x in input_list): def _convert_non_tensor(x): # Don't call `ops.convert_to_tensor` on all `inputs` because # `SparseTensors` can't be converted to `Tensor`. if isinstance(x, (np.ndarray, float, int)): return ops.convert_to_tensor(x) return x inputs = nest.map_structure(_convert_non_tensor, inputs) input_list = nest.flatten(inputs) # Handle `mask` propagation from previous layer to current layer. Masks can # be propagated explicitly via the `mask` argument, or implicitly via # setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed # explicitly take priority. mask_arg_passed_by_framework = False input_masks = self._collect_input_masks(inputs, args, kwargs) if (self._expects_mask_arg and input_masks is not None and not self._call_arg_was_passed('mask', args, kwargs)): mask_arg_passed_by_framework = True kwargs['mask'] = input_masks # If `training` argument was not explicitly passed, propagate `training` # value from this layer's calling layer. training_arg_passed_by_framework = False # Priority 1: `training` was explicitly passed. if self._call_arg_was_passed('training', args, kwargs): training_value = self._get_call_arg_value('training', args, kwargs) if not self._expects_training_arg: kwargs.pop('training') else: training_value = None # Priority 2: `training` was passed to a parent layer. if call_context.training is not None: training_value = call_context.training # Priority 3a: `learning_phase()` has been set. elif backend.global_learning_phase_is_set(): training_value = backend.learning_phase() # Priority 3b: Pass the `learning_phase()` if in the Keras FuncGraph. elif build_graph: with backend.get_graph().as_default(): if base_layer_utils.is_in_keras_graph(): training_value = backend.learning_phase() if self._expects_training_arg and training_value is not None: # Force the training_value to be bool type which matches to the contract # for layer/model call args. if tensor_util.is_tensor(training_value): training_value = math_ops.cast(training_value, dtypes.bool) else: training_value = bool(training_value) kwargs['training'] = training_value training_arg_passed_by_framework = True # Only create Keras history if at least one tensor originates from a # `keras.Input`. Otherwise this Layer may be being used outside the Keras # framework. if build_graph and base_layer_utils.needs_keras_history(inputs): base_layer_utils.create_keras_history(inputs) # Clear eager losses on top level model call. # We are clearing the losses only on the top level model call and not on # every layer/model call because layer/model may be reused. if (base_layer_utils.is_in_eager_or_tf_function() and not call_context.in_call): self._clear_losses() with call_context.enter(self, inputs, build_graph, training_value): # Check input assumptions set after layer building, e.g. input shape. if build_graph: # Symbolic execution on symbolic tensors. We will attempt to build # the corresponding TF subgraph inside `backend.get_graph()` # TODO(reedwm): We should assert input compatibility after the inputs # are casted, not before. input_spec.assert_input_compatibility(self.input_spec, inputs, self.name) graph = backend.get_graph() with graph.as_default(), backend.name_scope(self._name_scope()): # Build layer if applicable (if the `build` method has been # overridden). self._maybe_build(inputs) cast_inputs = self._maybe_cast_inputs(inputs) # Wrapping `call` function in autograph to allow for dynamic control # flow and control dependencies in call. We are limiting this to # subclassed layers as autograph is strictly needed only for # subclassed layers and models. # tf_convert will respect the value of autograph setting in the # enclosing tf.function, if any. if (base_layer_utils.is_subclassed(self) and not base_layer_utils.from_saved_model(self)): call_fn = autograph.tf_convert( self.call, ag_ctx.control_status_ctx()) else: call_fn = self.call if not self.dynamic: try: with base_layer_utils.autocast_context_manager( self._compute_dtype): # Add auto_control_deps in V2 when they are not already added by # a `tf.function`. if (ops.executing_eagerly_outside_functions() and not base_layer_utils.is_in_eager_or_tf_function()): with auto_control_deps.AutomaticControlDependencies() as acd: outputs = call_fn(cast_inputs, *args, **kwargs) # Wrap Tensors in `outputs` in `tf.identity` to avoid # circular dependencies. outputs = base_layer_utils.mark_as_return(outputs, acd) else: outputs = call_fn(cast_inputs, *args, **kwargs) except errors.OperatorNotAllowedInGraphError as e: raise TypeError('You are attempting to use Python control ' 'flow in a layer that was not declared to be ' 'dynamic. Pass `dynamic=True` to the class ' 'constructor.\nEncountered error:\n"""\n' + str(e) + '\n"""') else: # We will use static shape inference to return symbolic tensors # matching the specifications of the layer outputs. # Since `self.dynamic` is True, we will never attempt to # run the underlying TF graph (which is disconnected). # TODO(fchollet): consider py_func as an alternative, which # would enable us to run the underlying graph if needed. outputs = self._symbolic_call(inputs) if outputs is None: raise ValueError('A layer\'s `call` method should return a ' 'Tensor or a list of Tensors, not None ' '(layer: ' + self.name + ').') if base_layer_utils.have_all_keras_metadata(inputs): if training_arg_passed_by_framework: kwargs.pop('training') if mask_arg_passed_by_framework: kwargs.pop('mask') inputs, outputs = self._set_connectivity_metadata_( inputs, outputs, args, kwargs) self._handle_activity_regularization(inputs, outputs) self._set_mask_metadata(inputs, outputs, input_masks) if hasattr(self, '_set_inputs') and not self.inputs: # Subclassed network: explicitly set metadata normally set by # a call to self._set_inputs(). # TODO(b/120997007): This should be done in Eager as well, but # causes garbage collection issues because of the placeholders # created on the default Keras graph. self._set_inputs(inputs, outputs) else: # Eager execution on data tensors. with backend.name_scope(self._name_scope()): self._maybe_build(inputs) cast_inputs = self._maybe_cast_inputs(inputs) with base_layer_utils.autocast_context_manager( self._compute_dtype): outputs = self.call(cast_inputs, *args, **kwargs) self._handle_activity_regularization(inputs, outputs) self._set_mask_metadata(inputs, outputs, input_masks) return outputs @property def dtype(self): return self._dtype_policy.variable_dtype @property def name(self): return self._name @property def dynamic(self): return self._dynamic @property def trainable(self): return self._trainable @trainable.setter def trainable(self, value): self._trainable = value for layer in getattr(self, '_layers', []): layer.trainable = value @property def activity_regularizer(self): """Optional regularizer function for the output of this layer.""" return self._activity_regularizer @activity_regularizer.setter def activity_regularizer(self, regularizer): """Optional regularizer function for the output of this layer.""" self._activity_regularizer = regularizer @property def input_spec(self): return self._input_spec @input_spec.setter # Must be decorated to prevent tracking, since the input_spec can be nested # InputSpec objects. @trackable.no_automatic_dependency_tracking def input_spec(self, value): for v in nest.flatten(value): if v is not None and not isinstance(v, InputSpec): raise TypeError('Layer input_spec must be an instance of InputSpec. ' 'Got: {}'.format(v)) self._input_spec = value @property def trainable_weights(self): if self.trainable: nested = self._gather_children_attribute('trainable_weights') return self._trainable_weights + nested else: return [] @property def non_trainable_weights(self): if self.trainable: nested = self._gather_children_attribute('non_trainable_weights') return self._non_trainable_weights + nested else: nested = self._gather_children_attribute('weights') return self._trainable_weights + self._non_trainable_weights + nested @property def weights(self): """Returns the list of all layer variables/weights. Returns: A list of variables. """ return self.trainable_weights + self.non_trainable_weights @property def updates(self): if not self.trainable and not self.stateful: return [] with backend.get_graph().as_default(): updates = [] for u in self._updates: if callable(u): try: u = u() except errors.InaccessibleTensorError: base_layer_utils.check_graph_consistency( method='add_update', force_raise=True) raise # check_graph_consistency may not always raise. base_layer_utils.check_graph_consistency(u, method='add_update') updates.append(u) return updates + self._gather_children_attribute('updates') @property def losses(self): """Losses which are associated with this `Layer`. Variable regularization tensors are created when this property is accessed, so it is eager safe: accessing `losses` under a `tf.GradientTape` will propagate gradients back to the corresponding variables. Returns: A list of tensors. """ collected_losses = [] # If any eager losses are present, we assume the model to be part of an # eager training loop (either a custom one or the one used when # `run_eagerly=True`), and so we always return just the eager losses in that # case. if self._eager_losses: collected_losses.extend(self._eager_losses) else: collected_losses.extend(self._losses) for regularizer in self._callable_losses: loss_tensor = regularizer() if loss_tensor is not None: collected_losses.append(loss_tensor) return collected_losses + self._gather_children_attribute('losses') @doc_controls.for_subclass_implementers def add_loss(self, losses, inputs=None): """Add loss tensor(s), potentially dependent on layer inputs. Some losses (for instance, activity regularization losses) may be dependent on the inputs passed when calling a layer. Hence, when reusing the same layer on different inputs `a` and `b`, some entries in `layer.losses` may be dependent on `a` and some on `b`. This method automatically keeps track of dependencies. This method can be used inside a subclassed layer or model's `call` function, in which case `losses` should be a Tensor or list of Tensors. Example: ```python class MyLayer(tf.keras.layers.Layer): def call(inputs, self): self.add_loss(tf.abs(tf.reduce_mean(inputs)), inputs=True) return inputs ``` This method can also be called directly on a Functional Model during construction. In this case, any loss Tensors passed to this Model must be symbolic and be able to be traced back to the model's `Input`s. These losses become part of the model's topology and are tracked in `get_config`. Example: ```python inputs = tf.keras.Input(shape=(10,)) x = tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Dense(1)(x) model = tf.keras.Model(inputs, outputs) # Actvity regularization. model.add_loss(tf.abs(tf.reduce_mean(x))) ``` If this is not the case for your loss (if, for example, your loss references a `Variable` of one of the model's layers), you can wrap your loss in a zero-argument lambda. These losses are not tracked as part of the model's topology since they can't be serialized. Example: ```python inputs = tf.keras.Input(shape=(10,)) x = tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Dense(1)(x) model = tf.keras.Model(inputs, outputs) # Weight regularization. model.add_loss(lambda: tf.reduce_mean(x.kernel)) ``` The `get_losses_for` method allows to retrieve the losses relevant to a specific set of inputs. Arguments: losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses may also be zero-argument callables which create a loss tensor. inputs: Ignored when executing eagerly. If anything other than None is passed, it signals the losses are conditional on some of the layer's inputs, and thus they should only be run where these inputs are available. This is the case for activity regularization losses, for instance. If `None` is passed, the losses are assumed to be unconditional, and will apply across all dataflows of the layer (e.g. weight regularization losses). """ def _tag_unconditional(loss): if callable(loss): loss = loss() if loss is None: return None # Will be filtered out when computing the .losses property if not tensor_util.is_tensor(loss): loss = ops.convert_to_tensor(loss, dtype=backend.floatx()) loss._unconditional_loss = (inputs is None) # pylint: disable=protected-access return loss losses = nest.flatten(losses) callable_losses = [] eager_losses = [] symbolic_losses = [] for loss in losses: if callable(loss): callable_losses.append(functools.partial(_tag_unconditional, loss)) continue if loss is None: continue if not tensor_util.is_tensor(loss): loss = ops.convert_to_tensor(loss, dtype=backend.floatx()) # TF Functions should take the eager path. if (tf_utils.is_symbolic_tensor(loss) and not base_layer_utils.is_in_tf_function()): symbolic_losses.append(_tag_unconditional(loss)) base_layer_utils.check_graph_consistency(loss, method='add_loss') elif tensor_util.is_tensor(loss): eager_losses.append(_tag_unconditional(loss)) self._callable_losses += callable_losses in_call_context = base_layer_utils.call_context().in_call if eager_losses and not in_call_context: raise ValueError( 'Expected a symbolic Tensors or a callable for the loss value. ' 'Please wrap your loss computation in a zero argument `lambda`.') self._eager_losses += eager_losses if in_call_context: for symbolic_loss in symbolic_losses: self._losses.append(symbolic_loss) else: for symbolic_loss in symbolic_losses: if getattr(self, '_is_graph_network', False): self._graph_network_add_loss(symbolic_loss) else: # Possible a loss was added in a Layer's `build`. self._losses.append(symbolic_loss) @trackable.no_automatic_dependency_tracking def _clear_losses(self): """Used every step in eager to reset losses.""" self._eager_losses = [] if hasattr(self, '_layers'): for layer in trackable_layer_utils.filter_empty_layer_containers( self._layers): layer._clear_losses() @property def metrics(self): return self._metrics + self._gather_children_attribute('metrics') @doc_controls.for_subclass_implementers def add_metric(self, value, aggregation=None, name=None): """Adds metric tensor to the layer. Args: value: Metric tensor. aggregation: Sample-wise metric reduction function. If `aggregation=None`, it indicates that the metric tensor provided has been aggregated already. eg, `bin_acc = BinaryAccuracy(name='acc')` followed by `model.add_metric(bin_acc(y_true, y_pred))`. If aggregation='mean', the given metric tensor will be sample-wise reduced using `mean` function. eg, `model.add_metric(tf.reduce_sum(outputs), name='output_mean', aggregation='mean')`. name: String metric name. Raises: ValueError: If `aggregation` is anything other than None or `mean`. """ if aggregation is not None and aggregation != 'mean': raise ValueError( 'We currently support only `mean` sample-wise metric aggregation. ' 'You provided aggregation=`%s`' % aggregation) from_metric_obj = hasattr(value, '_metric_obj') is_symbolic = tf_utils.is_symbolic_tensor(value) in_call_context = base_layer_utils.call_context().in_call if name is None and not from_metric_obj: # Eg. `self.add_metric(math_ops.reduce_sum(x), aggregation='mean')` # In eager mode, we use metric name to lookup a metric. Without a name, # a new Mean metric wrapper will be created on every model/layer call. # So, we raise an error when no name is provided. # We will do the same for symbolic mode for consistency although a name # will be generated if no name is provided. # We will not raise this error in the foll use case for the sake of # consistency as name in provided in the metric constructor. # mean = metrics.Mean(name='my_metric') # model.add_metric(mean(outputs)) raise ValueError('Please provide a name for your metric like ' '`self.add_metric(tf.reduce_sum(inputs), ' 'name=\'mean_activation\', aggregation=\'mean\')`') elif from_metric_obj: name = value._metric_obj.name if in_call_context: # TF Function path should take the eager path. if is_symbolic and not base_layer_utils.is_in_tf_function(): self._symbolic_add_metric(value, aggregation, name) else: self._eager_add_metric(value, aggregation, name) else: if not is_symbolic: raise ValueError('Expected a symbolic Tensor for the metric value, ' 'received: ' + str(value)) # Possible a metric was added in a Layer's `build`. if not getattr(self, '_is_graph_network', False): with backend.get_graph().as_default(): self._symbolic_add_metric(value, aggregation, name) return if from_metric_obj: raise ValueError('Using the result of calling a `Metric` object ' 'when calling `add_metric` on a Functional ' 'Model is not supported. Please pass the ' 'Tensor to monitor directly.') # Insert layers into the Keras Graph Network. self._graph_network_add_metric(value, aggregation, name) @deprecation.deprecated_args(None, '`inputs` is now automatically inferred', 'inputs') @doc_controls.for_subclass_implementers def add_update(self, updates, inputs=None): """Add update op(s), potentially dependent on layer inputs. Weight updates (for instance, the updates of the moving mean and variance in a BatchNormalization layer) may be dependent on the inputs passed when calling a layer. Hence, when reusing the same layer on different inputs `a` and `b`, some entries in `layer.updates` may be dependent on `a` and some on `b`. This method automatically keeps track of dependencies. The `get_updates_for` method allows to retrieve the updates relevant to a specific set of inputs. This call is ignored when eager execution is enabled (in that case, variable updates are run on the fly and thus do not need to be tracked for later execution). Arguments: updates: Update op, or list/tuple of update ops, or zero-arg callable that returns an update op. A zero-arg callable should be passed in order to disable running the updates by setting `trainable=False` on this Layer, when executing in Eager mode. inputs: Deprecated, will be automatically inferred. """ if ds_context.has_strategy() and ds_context.in_cross_replica_context(): # Updates don't need to be run in a cross-replica context. if (ops.executing_eagerly_outside_functions() and not base_layer_utils.is_in_keras_graph()): raise RuntimeError( # pylint: disable=g-doc-exception '`add_update` was called in a cross-replica context. This is not ' 'expected. If you require this feature, please file an issue.') return updates = generic_utils.to_list(updates) call_context = base_layer_utils.call_context() # All updates can be run immediately in Eager or in a tf.function. if base_layer_utils.is_in_eager_or_tf_function(): if not call_context.frozen: for update in updates: if callable(update): update() return if call_context.in_call: relevant_inputs = call_context.inputs else: inbound_nodes = getattr(self, '_inbound_nodes', []) relevant_inputs = [node.input_tensors for node in inbound_nodes] def process_update(x): """Standardize update ops. Arguments: x: Tensor, op, or callable. Returns: An update op. """ if callable(x): update = lambda: process_update(x()) if not ops.executing_eagerly_outside_functions(): # In V1 mode, call the callable right away and process. This is needed # for TPU strategy. return update() elif isinstance(x, ops.Operation): update = x elif hasattr(x, 'op'): update = x.op else: update = ops.convert_to_tensor(x) reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update]) update._unconditional_update = update not in reachable return update updates = [process_update(x) for x in updates] # Non-callable Updates are run automatically inside `call` in V2, so # they do not need to be tracked later. if ops.executing_eagerly_outside_functions() and call_context.in_call: updates = [u for u in updates if callable(u)] self._updates += updates def set_weights(self, weights): """Sets the weights of the layer, from Numpy arrays. Arguments: weights: a list of Numpy arrays. The number of arrays and their shape must match number of the dimensions of the weights of the layer (i.e. it should match the output of `get_weights`). Raises: ValueError: If the provided weights list does not match the layer's specifications. """ params = self.weights if len(params) != len(weights): raise ValueError('You called `set_weights(weights)` on layer "' + self.name + '" with a weight list of length ' + str(len(weights)) + ', but the layer was expecting ' + str(len(params)) + ' weights. Provided weights: ' + str(weights)[:50] + '...') if not params: return weight_value_tuples = [] for p, w in zip(params, weights): ref_shape = p.shape if not ref_shape.is_compatible_with(w.shape): raise ValueError('Layer weight shape ' + str(ref_shape) + ' not compatible with ' 'provided weight shape ' + str(w.shape)) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) def get_weights(self): """Returns the current weights of the layer. Returns: Weights values as a list of numpy arrays. """ params = self.weights return backend.batch_get_value(params) def get_updates_for(self, inputs): """Retrieves updates relevant to a specific set of inputs. Arguments: inputs: Input tensor or list/tuple of input tensors. Returns: List of update ops of the layer that depend on `inputs`. """ if inputs is None: # Requesting unconditional updates. return [u for u in self.updates if u._unconditional_update] # Requesting input-conditional updates. updates = [u for u in self.updates if not u._unconditional_update] inputs = nest.flatten(inputs) reachable = tf_utils.get_reachable_from_inputs(inputs, updates) return [u for u in updates if u in reachable] def get_losses_for(self, inputs): """Retrieves losses relevant to a specific set of inputs. Arguments: inputs: Input tensor or list/tuple of input tensors. Returns: List of loss tensors of the layer that depend on `inputs`. """ if inputs is None: # Requesting unconditional losses. return [l for l in self.losses if l._unconditional_loss] # Requesting input-conditional losses. losses = [l for l in self.losses if not l._unconditional_loss] inputs = nest.flatten(inputs) reachable = tf_utils.get_reachable_from_inputs(inputs, losses) return [l for l in losses if l in reachable] def get_input_mask_at(self, node_index): """Retrieves the input mask tensor(s) of a layer at a given node. Arguments: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A mask tensor (or list of tensors if the layer has multiple inputs). """ inputs = self.get_input_at(node_index) if isinstance(inputs, list): return [getattr(x, '_keras_mask', None) for x in inputs] else: return getattr(inputs, '_keras_mask', None) def get_output_mask_at(self, node_index): """Retrieves the output mask tensor(s) of a layer at a given node. Arguments: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A mask tensor (or list of tensors if the layer has multiple outputs). """ output = self.get_output_at(node_index) if isinstance(output, list): return [getattr(x, '_keras_mask', None) for x in output] else: return getattr(output, '_keras_mask', None) @property def input_mask(self): """Retrieves the input mask tensor(s) of a layer. Only applicable if the layer has exactly one inbound node, i.e. if it is connected to one incoming layer. Returns: Input mask tensor (potentially None) or list of input mask tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. """ inputs = self.input if isinstance(inputs, list): return [getattr(x, '_keras_mask', None) for x in inputs] else: return getattr(inputs, '_keras_mask', None) @property def output_mask(self): """Retrieves the output mask tensor(s) of a layer. Only applicable if the layer has exactly one inbound node, i.e. if it is connected to one incoming layer. Returns: Output mask tensor (potentially None) or list of output mask tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. """ output = self.output if isinstance(output, list): return [getattr(x, '_keras_mask', None) for x in output] else: return getattr(output, '_keras_mask', None) def get_input_shape_at(self, node_index): """Retrieves the input shape(s) of a layer at a given node. Arguments: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A shape tuple (or list of shape tuples if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode. """ return self._get_node_attribute_at_index(node_index, 'input_shapes', 'input shape') def get_output_shape_at(self, node_index): """Retrieves the output shape(s) of a layer at a given node. Arguments: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A shape tuple (or list of shape tuples if the layer has multiple outputs). Raises: RuntimeError: If called in Eager mode. """ return self._get_node_attribute_at_index(node_index, 'output_shapes', 'output shape') def get_input_at(self, node_index): """Retrieves the input tensor(s) of a layer at a given node. Arguments: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A tensor (or list of tensors if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode. """ return self._get_node_attribute_at_index(node_index, 'input_tensors', 'input') def get_output_at(self, node_index): """Retrieves the output tensor(s) of a layer at a given node. Arguments: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A tensor (or list of tensors if the layer has multiple outputs). Raises: RuntimeError: If called in Eager mode. """ return self._get_node_attribute_at_index(node_index, 'output_tensors', 'output') @property def input(self): """Retrieves the input tensor(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer. Returns: Input tensor or list of input tensors. Raises: RuntimeError: If called in Eager mode. AttributeError: If no inbound nodes are found. """ if not self._inbound_nodes: raise AttributeError('Layer ' + self.name + ' is not connected, no input to return.') return self._get_node_attribute_at_index(0, 'input_tensors', 'input') @property def output(self): """Retrieves the output tensor(s) of a layer. Only applicable if the layer has exactly one output, i.e. if it is connected to one incoming layer. Returns: Output tensor or list of output tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. RuntimeError: if called in Eager mode. """ if not self._inbound_nodes: raise AttributeError('Layer ' + self.name + ' has no inbound nodes.') return self._get_node_attribute_at_index(0, 'output_tensors', 'output') @property def input_shape(self): """Retrieves the input shape(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer, or if all inputs have the same shape. Returns: Input shape, as an integer shape tuple (or list of shape tuples, one tuple per input tensor). Raises: AttributeError: if the layer has no defined input_shape. RuntimeError: if called in Eager mode. """ if not self._inbound_nodes: raise AttributeError('The layer has never been called ' 'and thus has no defined input shape.') all_input_shapes = set( [str(node.input_shapes) for node in self._inbound_nodes]) if len(all_input_shapes) == 1: return self._inbound_nodes[0].input_shapes else: raise AttributeError('The layer "' + str(self.name) + ' has multiple inbound nodes, ' 'with different input shapes. Hence ' 'the notion of "input shape" is ' 'ill-defined for the layer. ' 'Use `get_input_shape_at(node_index)` ' 'instead.') def count_params(self): """Count the total number of scalars composing the weights. Returns: An integer count. Raises: ValueError: if the layer isn't yet built (in which case its weights aren't yet defined). """ if not self.built: if getattr(self, '_is_graph_network', False): with tf_utils.maybe_init_scope(self): self._maybe_build(self.inputs) else: raise ValueError('You tried to call `count_params` on ' + self.name + ', but the layer isn\'t built. ' 'You can build it manually via: `' + self.name + '.build(batch_input_shape)`.') return int(sum(np.prod(w.shape.as_list()) for w in self.weights)) @property def output_shape(self): """Retrieves the output shape(s) of a layer. Only applicable if the layer has one output, or if all outputs have the same shape. Returns: Output shape, as an integer shape tuple (or list of shape tuples, one tuple per output tensor). Raises: AttributeError: if the layer has no defined output shape. RuntimeError: if called in Eager mode. """ if not self._inbound_nodes: raise AttributeError('The layer has never been called ' 'and thus has no defined output shape.') all_output_shapes = set( [str(node.output_shapes) for node in self._inbound_nodes]) if len(all_output_shapes) == 1: return self._inbound_nodes[0].output_shapes else: raise AttributeError('The layer "%s"' ' has multiple inbound nodes, ' 'with different output shapes. Hence ' 'the notion of "output shape" is ' 'ill-defined for the layer. ' 'Use `get_output_shape_at(node_index)` ' 'instead.' % self.name) @property @doc_controls.do_not_doc_inheritable def inbound_nodes(self): """Deprecated, do NOT use! Only for compatibility with external Keras.""" return self._inbound_nodes @property @doc_controls.do_not_doc_inheritable def outbound_nodes(self): """Deprecated, do NOT use! Only for compatibility with external Keras.""" return self._outbound_nodes ############################################################################## # Methods & attributes below are public aliases of other methods. # ############################################################################## @deprecation.deprecated( date=None, instructions='Please use `layer.__call__` method instead.') @doc_controls.do_not_doc_inheritable def apply(self, inputs, *args, **kwargs): """Deprecated, do NOT use! This is an alias of `self.__call__`. Arguments: inputs: Input tensor(s). *args: additional positional arguments to be passed to `self.call`. **kwargs: additional keyword arguments to be passed to `self.call`. Returns: Output tensor(s). """ return self.__call__(inputs, *args, **kwargs) @deprecation.deprecated( date=None, instructions='Please use `layer.add_weight` method instead.') @doc_controls.do_not_doc_inheritable def add_variable(self, *args, **kwargs): """Deprecated, do NOT use! Alias for `add_weight`.""" return self.add_weight(*args, **kwargs) @property def variables(self): """Returns the list of all layer variables/weights. Alias of `self.weights`. Returns: A list of variables. """ return self.weights @property def trainable_variables(self): return self.trainable_weights @property def non_trainable_variables(self): return self.non_trainable_weights ############################################################################## # Methods & attributes below are all private and only used by the framework. # ############################################################################## def _set_dtype_policy(self, dtype): """Sets self._dtype_policy.""" if isinstance(dtype, policy.Policy): self._dtype_policy = dtype elif dtype: self._dtype_policy = policy.Policy(dtypes.as_dtype(dtype).name) else: self._dtype_policy = policy.global_policy() if self._dtype_policy.should_cast_variables and backend.is_tpu_strategy( ds_context.get_strategy()): # TODO(b/137859335): Supoprt this. AutoCastVariables currently do not work # properly when wrapping TPUMirroredVariables. raise ValueError('DType Policies ending in "_with_float32_vars" are ' 'not yet supported with TPUStrategy. Got policy: %s' % self._dtype_policy.name) # This has no impact on the layer behavior, and is only used for printing # warnings. self._dtype_defaulted_to_floatx = (not dtype and policy.policy_defaults_to_floatx()) # TODO(reedwm): Expose this property? @property def _compute_dtype(self): """The layer's compute dtype. Unless mixed-precision is used, this is the same as `Layer.dtype`. If self._autocast is True, layer's will cast floating-point inputs to this. Returns: The layer's compute dtype. """ return self._dtype_policy.compute_dtype def _maybe_cast_inputs(self, inputs): """Maybe casts the inputs to the compute dtype. If self._compute_dtype is floating-point, and self_autocast is True, floating-point inputs are casted to self._compute_dtype. Args: inputs: Input tensor, or structure of input tensors. Returns: `inputs`, but tensors may have been casted to self._compute_dtype """ compute_dtype = self._compute_dtype if (self._autocast and compute_dtype and dtypes.as_dtype(compute_dtype).is_floating): def f(x): cast_types = (ops.Tensor, sparse_tensor.SparseTensor, ragged_tensor.RaggedTensor) if (isinstance(x, cast_types) and x.dtype.is_floating and x.dtype.base_dtype.name != compute_dtype): if self._dtype_defaulted_to_floatx: self._warn_about_input_casting(x.dtype.base_dtype) return math_ops.cast(x, compute_dtype) else: return x return nest.map_structure(f, inputs) else: return inputs def _warn_about_input_casting(self, input_dtype): # self._already_warned_about_input_casting is only retrieved or set in this # function. already_warned = getattr(self, '_already_warned_about_input_casting', False) if not already_warned: tf_logging.warn( "Layer {self.name} is casting an input tensor from dtype " "{input_dtype} to the layer's dtype of {layer_dtype}, which is new " "behavior in TensorFlow 2. The layer has dtype {layer_dtype} " "because it's dtype defaults to floatx.\n\n" "" "If you intended to run this layer in {layer_dtype}, you can safely " "ignore this warning. If in doubt, this warning is likely only an " "issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n\n" "" "To change all layers to have dtype {input_dtype} by default, call " "`tf.keras.backend.set_floatx('{input_dtype}')`. To change just this " "layer, pass dtype='{input_dtype}' to the layer constructor. If you " "are the author of this layer, you can disable autocasting by " "passing autocast=False to the base Layer constructor.\n".format( self=self, input_dtype=input_dtype.name, layer_dtype=self._compute_dtype)) self._already_warned_about_input_casting = True # _dtype used to be an attribute set in the constructor. We still expose it # because some clients still use it. # TODO(reedwm): Deprecate, then remove the _dtype property. @property def _dtype(self): # This is equivalent to returning self.dtype . We do not return self.dtype # as it would cause infinite recursion in a few subclasses, which override # "dtype" to return self._dtype. return self._dtype_policy.variable_dtype @_dtype.setter def _dtype(self, value): value = dtypes.as_dtype(value).name self._dtype_policy = policy.Policy(value) def _name_scope(self): return self.name def _init_set_name(self, name, zero_based=True): if not name: self._name = backend.unique_object_name( generic_utils.to_snake_case(self.__class__.__name__), zero_based=zero_based) else: self._name = name def _get_existing_metric(self, name=None): match = [m for m in self._metrics if m.name == name] if not match: return if len(match) > 1: raise ValueError( 'Please provide different names for the metrics you have added. ' 'We found {} metrics with the name: "{}"'.format(len(match), name)) return match[0] def _eager_add_metric(self, value, aggregation=None, name=None): # If the given metric is available in `metrics` list we just update state # on it, otherwise we create a new metric instance and # add it to the `metrics` list. metric_obj = getattr(value, '_metric_obj', None) if metric_obj: name = metric_obj.name match = self._get_existing_metric(name) if match: # Tensors that come from a Metric object already updated the Metric state. if not metric_obj: match(value) return if not metric_obj: assert aggregation is not None metric_obj, _ = base_layer_utils.create_mean_metric(value, name) self._metrics.append(metric_obj) def _symbolic_add_metric(self, value, aggregation=None, name=None): base_layer_utils.check_graph_consistency(value, method='add_metric') match = self._get_existing_metric(name) if aggregation is None: # Iterate over the metrics and check if the given metric exists already. # This can happen when a metric instance is created in subclassed model # layer `__init__` and we have tracked that instance already in # model.__setattr__. if match: result_tensor = value metric_obj = match elif hasattr(value, '_metric_obj'): # We track the instance using the metadata on the result tensor. result_tensor = value metric_obj = result_tensor._metric_obj self._metrics.append(metric_obj) else: raise ValueError( 'We do not support adding an aggregated metric result tensor that ' 'is not the output of a `tf.keras.metrics.Metric` metric instance. ' 'Without having access to the metric instance we cannot reset the ' 'state of a metric after every epoch during training. You can ' 'create a `tf.keras.metrics.Metric` instance and pass the result ' 'here or pass an un-aggregated result with `aggregation` parameter ' 'set as `mean`. For example: `self.add_metric(tf.reduce_sum(inputs)' ', name=\'mean_activation\', aggregation=\'mean\')`') else: # If a non-aggregated tensor is given as input (ie. `aggregation` is # explicitly set to `mean`), we wrap the tensor in `Mean` metric. if match: result_tensor = match(value) metric_obj = match else: metric_obj, result_tensor = base_layer_utils.create_mean_metric( value, name) self._metrics.append(metric_obj) def _handle_weight_regularization(self, name, variable, regularizer): """Create lambdas which compute regularization losses.""" def _loss_for_variable(v): """Creates a regularization loss `Tensor` for variable `v`.""" with backend.name_scope(name + '/Regularizer'): regularization = regularizer(v) return regularization if isinstance(variable, tf_variables.PartitionedVariable): for v in variable: self.add_loss(functools.partial(_loss_for_variable, v)) else: self.add_loss(functools.partial(_loss_for_variable, variable)) def _handle_activity_regularization(self, inputs, outputs): # Apply activity regularization. # Note that it should be applied every time the layer creates a new # output, since it is output-specific. if self._activity_regularizer: output_list = nest.flatten(outputs) with backend.name_scope('ActivityRegularizer'): for output in output_list: activity_loss = self._activity_regularizer(output) batch_size = math_ops.cast( array_ops.shape(output)[0], activity_loss.dtype) # Make activity regularization strength batch-agnostic. mean_activity_loss = activity_loss / batch_size base_layer_utils.check_graph_consistency( mean_activity_loss, method='activity_regularizer') self.add_loss(mean_activity_loss, inputs=inputs) def _set_mask_metadata(self, inputs, outputs, previous_mask): flat_outputs = nest.flatten(outputs) mask_already_computed = ( getattr(self, '_compute_output_and_mask_jointly', False) or all(getattr(x, '_keras_mask', None) is not None for x in flat_outputs)) # Only compute the mask if the Layer explicitly supports masking or has # overridden `compute_mask`. should_compute_mask = ( hasattr(self, 'compute_mask') and (self.supports_masking or not getattr(self.compute_mask, '_is_default', False))) if mask_already_computed: flat_masks = [getattr(x, '_keras_mask', None) for x in flat_outputs] elif not should_compute_mask: flat_masks = [None for _ in flat_outputs] else: output_masks = self.compute_mask(inputs, previous_mask) # `compute_mask` can return a single `None` even when a Layer # has multiple outputs. if output_masks is None: flat_masks = [None for _ in flat_outputs] else: flat_masks = nest.flatten(output_masks) for output, mask in zip(flat_outputs, flat_masks): try: output._keras_mask = mask except AttributeError: # C Type such as np.ndarray. pass if tf_utils.are_all_symbolic_tensors(flat_outputs): for output in flat_outputs: if getattr(output, '_keras_mask', None) is not None: # Do not track masks for `TensorFlowOpLayer` construction. output._keras_mask._keras_history_checked = True def _collect_input_masks(self, inputs, args, kwargs): """Checks if `mask` argument was passed, else gathers mask from inputs.""" if self._call_arg_was_passed('mask', args, kwargs): return self._get_call_arg_value('mask', args, kwargs) if not self._should_compute_mask: return None input_masks = nest.map_structure(lambda t: getattr(t, '_keras_mask', None), inputs) if generic_utils.is_all_none(input_masks): return None return input_masks def _call_arg_was_passed(self, arg_name, args, kwargs, inputs_in_args=False): if arg_name in kwargs: return True call_fn_args = self._call_fn_args if not inputs_in_args: # Ignore `inputs` arg. call_fn_args = call_fn_args[1:] if arg_name in dict(zip(call_fn_args, args)): return True return False def _get_call_arg_value(self, arg_name, args, kwargs, inputs_in_args=False): if arg_name in kwargs: return kwargs[arg_name] call_fn_args = self._call_fn_args if not inputs_in_args: # Ignore `inputs` arg. call_fn_args = call_fn_args[1:] args_dict = dict(zip(call_fn_args, args)) return args_dict[arg_name] def _set_connectivity_metadata_(self, inputs, outputs, args, kwargs): # If the layer returns tensors from its inputs, unmodified, # we copy them to avoid loss of tensor metadata. output_ls = nest.flatten(outputs) inputs_ls = object_identity.ObjectIdentitySet(nest.flatten(inputs)) output_ls_copy = [] for x in output_ls: if x in inputs_ls: with backend.name_scope(self.name): x = array_ops.identity(x) output_ls_copy.append(x) outputs = nest.pack_sequence_as(outputs, output_ls_copy) # Ignore `inputs` arg. arguments = dict(zip(self._call_fn_args[1:], args)) arguments.update(kwargs) # Add an inbound node to the layer, so it can keep track of this call. # This updates the layer history of the output tensor(s). self._add_inbound_node( input_tensors=inputs, output_tensors=outputs, arguments=arguments) return inputs, outputs def _add_inbound_node(self, input_tensors, output_tensors, arguments=None): """Internal method to create an inbound node for the layer. Arguments: input_tensors: list of input tensors. output_tensors: list of output tensors. arguments: dictionary of keyword arguments that were passed to the `call` method of the layer at the call that created the node. """ inbound_layers = nest.map_structure(lambda t: t._keras_history.layer, input_tensors) node_indices = nest.map_structure(lambda t: t._keras_history.node_index, input_tensors) tensor_indices = nest.map_structure(lambda t: t._keras_history.tensor_index, input_tensors) # Create node, add it to inbound nodes. node_module.Node( self, inbound_layers=inbound_layers, node_indices=node_indices, tensor_indices=tensor_indices, input_tensors=input_tensors, output_tensors=output_tensors, arguments=arguments) # Update tensor history metadata. # The metadata attribute consists of # 1) a layer instance # 2) a node index for the layer # 3) a tensor index for the node. # The allows layer reuse (multiple nodes per layer) and multi-output # or multi-input layers (e.g. a layer can return multiple tensors, # and each can be sent to a different layer). for i, tensor in enumerate(nest.flatten(output_tensors)): tensor._keras_history = KerasHistory(self, len(self._inbound_nodes) - 1, i) # pylint: disable=protected-access def _get_node_attribute_at_index(self, node_index, attr, attr_name): """Private utility to retrieves an attribute (e.g. inputs) from a node. This is used to implement the methods: - get_input_shape_at - get_output_shape_at - get_input_at etc... Arguments: node_index: Integer index of the node from which to retrieve the attribute. attr: Exact node attribute name. attr_name: Human-readable attribute name, for error messages. Returns: The layer's attribute `attr` at the node of index `node_index`. Raises: RuntimeError: If the layer has no inbound nodes, or if called in Eager mode. ValueError: If the index provided does not match any node. """ if not self._inbound_nodes: raise RuntimeError('The layer has never been called ' 'and thus has no defined ' + attr_name + '.') if not len(self._inbound_nodes) > node_index: raise ValueError('Asked to get ' + attr_name + ' at node ' + str(node_index) + ', but the layer has only ' + str(len(self._inbound_nodes)) + ' inbound nodes.') values = getattr(self._inbound_nodes[node_index], attr) if isinstance(values, list) and len(values) == 1: return values[0] else: return values def _maybe_build(self, inputs): # Check input assumptions set before layer building, e.g. input rank. if not self.built: input_spec.assert_input_compatibility( self.input_spec, inputs, self.name) input_list = nest.flatten(inputs) if input_list and self._dtype_policy.compute_dtype is None: try: dtype = input_list[0].dtype.base_dtype.name except AttributeError: pass else: self._dtype_policy = policy.with_input_dtype(self._dtype_policy, dtype) input_shapes = None if all(hasattr(x, 'shape') for x in input_list): input_shapes = nest.map_structure(lambda x: x.shape, inputs) # Only call `build` if the user has manually overridden the build method. if not hasattr(self.build, '_is_default'): # Any setup work performed only once should happen in an `init_scope` # to avoid creating symbolic Tensors that will later pollute any eager # operations. with tf_utils.maybe_init_scope(self): self.build(input_shapes) # We must set self.built since user defined build functions are not # constrained to set self.built. self.built = True # Optionally load weight values specified at layer instantiation. if getattr(self, '_initial_weights', None) is not None: self.set_weights(self._initial_weights) self._initial_weights = None def _symbolic_call(self, inputs): input_shapes = nest.map_structure(lambda x: x.shape, inputs) output_shapes = self.compute_output_shape(input_shapes) def _make_placeholder_like(shape): ph = backend.placeholder(shape=shape, dtype=self.dtype) ph._keras_mask = None return ph return nest.map_structure(_make_placeholder_like, output_shapes) def _get_trainable_state(self): """Get the `trainable` state of each sublayer. Returns: A dict mapping all sublayers to their `trainable` value. """ layers = trackable_layer_utils.filter_empty_layer_containers(self._layers) # Keep track of each top-level layers' `trainable` as well as the # state of all of its sublayers. trainable_state = {self: self.trainable} for layer in layers: trainable_state.update(layer._get_trainable_state()) return trainable_state def _set_trainable_state(self, trainable_state): """Set `trainable` state for each sublayer.""" layers = trackable_layer_utils.filter_empty_layer_containers(self._layers) if self in trainable_state: self.trainable = trainable_state[self] for layer in layers: layer._set_trainable_state(trainable_state) @property def _obj_reference_counts(self): """A dictionary counting the number of attributes referencing an object.""" self._maybe_create_attribute('_obj_reference_counts_dict', object_identity.ObjectIdentityDictionary()) return self._obj_reference_counts_dict def _maybe_create_attribute(self, name, default_value): """Create the attribute with the default value if it hasn't been created. This is useful for fields that is used for tracking purpose, _trainable_weights, or _layers. Note that user could create a layer subclass and assign an internal field before invoking the Layer.__init__(), the __setattr__() need to create the tracking fields and __init__() need to not override them. Args: name: String, the name of the attribute. default_value: Object, the default value of the attribute. """ if not hasattr(self, name): super(Layer, self).__setattr__(name, default_value) def __delattr__(self, name): # For any super.__delattr__() call, we will directly use the implementation # in Trackable and skip the behavior in AutoTrackable. The Layer was # originally use Trackable as base class, the change of using Module as base # class forced us to have AutoTrackable in the class hierarchy. Skipping # the __delattr__ and __setattr__ in AutoTrackable will keep the status quo. existing_value = getattr(self, name, None) # If this value is replacing an existing object assigned to an attribute, we # should clean it out to avoid leaking memory. First we check if there are # other attributes referencing it. reference_counts = self._obj_reference_counts if existing_value not in reference_counts: super(tracking.AutoTrackable, self).__delattr__(name) return reference_count = reference_counts[existing_value] if reference_count > 1: # There are other remaining references. We can't remove this object from # _layers etc. reference_counts[existing_value] = reference_count - 1 super(tracking.AutoTrackable, self).__delattr__(name) return else: # This is the last remaining reference. del reference_counts[existing_value] super(tracking.AutoTrackable, self).__delattr__(name) if (isinstance(existing_value, Layer) or trackable_layer_utils.has_weights(existing_value)): super(tracking.AutoTrackable, self).__setattr__( '_layers', [l for l in self._layers if l is not existing_value]) if isinstance(existing_value, tf_variables.Variable): super(tracking.AutoTrackable, self).__setattr__( '_trainable_weights', [w for w in self._trainable_weights if w is not existing_value]) super(tracking.AutoTrackable, self).__setattr__( '_non_trainable_weights', [w for w in self._non_trainable_weights if w is not existing_value]) def __setattr__(self, name, value): if (name == '_self_setattr_tracking' or not getattr(self, '_self_setattr_tracking', True) or getattr(self, '_is_graph_network', False) or # Exclude @property.setters from tracking hasattr(self.__class__, name)): try: super(tracking.AutoTrackable, self).__setattr__(name, value) except AttributeError: raise AttributeError( ('Can\'t set the attribute "{}", likely because it conflicts with ' 'an existing read-only @property of the object. Please choose a ' 'different name.').format(name)) return # Keep track of trackable objects, for the needs of `Network.save_weights`. value = data_structures.sticky_attribute_assignment( trackable=self, value=value, name=name) reference_counts = self._obj_reference_counts reference_counts[value] = reference_counts.get(value, 0) + 1 # Clean out the old attribute, which clears _layers and _trainable_weights # if necessary. try: self.__delattr__(name) except AttributeError: pass # TODO(scottzhu): Need to track Module object as well for weight tracking. # Be careful about metric if it becomes a Module in future. # Append value to self._layers if relevant # Sequential models use a separate layer tracking mechanism, so skip the # logic defined here for tracking layers. if (self.__class__.__name__ != 'Sequential' and (isinstance(value, Layer) or trackable_layer_utils.has_weights(value))): self._maybe_create_attribute('_layers', []) # We need to check object identity to avoid de-duplicating empty # container types which compare equal. if not any((layer is value for layer in self._layers)): self._layers.append(value) if hasattr(value, '_use_resource_variables'): # Legacy layers (V1 tf.layers) must always use # resource variables. value._use_resource_variables = True # Append value to list of trainable / non-trainable weights if relevant # TODO(b/125122625): This won't pick up on any variables added to a # list/dict after creation. for val in nest.flatten(value): # TODO(b/126450014): Remove `_UnreadVariable` check here when assign ops # no longer return True for isinstance Variable checks. if not isinstance(val, tf_variables.Variable): continue if isinstance(val, resource_variable_ops._UnreadVariable): # pylint: disable=protected-access continue # Users may add extra weights/variables # simply by assigning them to attributes (invalid for graph networks) self._maybe_create_attribute('_trainable_weights', []) self._maybe_create_attribute('_non_trainable_weights', []) if val.trainable: if any(val is w for w in self._trainable_weights): continue self._trainable_weights.append(val) else: if any(val is w for w in self._non_trainable_weights): continue self._non_trainable_weights.append(val) backend.track_variable(val) # Skip the auto trackable from tf.Module to keep status quo. See the comment # at __delattr__. super(tracking.AutoTrackable, self).__setattr__(name, value) def _gather_children_attribute(self, attribute): assert attribute in { 'weights', 'trainable_weights', 'non_trainable_weights', 'updates', 'losses', 'metrics' } if hasattr(self, '_layers'): nested_layers = trackable_layer_utils.filter_empty_layer_containers( self._layers) return list( itertools.chain.from_iterable( getattr(layer, attribute) for layer in nested_layers)) return [] # This is a hack so that the is_layer (within # training/trackable/layer_utils.py) check doesn't get the weights attr. # TODO(b/110718070): Remove when fixed. def _is_layer(self): return True def _init_call_fn_args(self): # Clear cached call function arguments. self.__class__._call_fn_args.fget.cache.pop(self, None) self.__class__._call_accepts_kwargs.fget.cache.pop(self, None) call_fn_args = self._call_fn_args self._expects_training_arg = ('training' in call_fn_args or self._call_accepts_kwargs) self._expects_mask_arg = ('mask' in call_fn_args or self._call_accepts_kwargs) @property @tracking.cached_per_instance def _call_fn_args(self): all_args = tf_inspect.getfullargspec(self.call).args # Scrub `self` that appears if a decorator was applied. if all_args and all_args[0] == 'self': return all_args[1:] return all_args @property @tracking.cached_per_instance def _call_accepts_kwargs(self): return tf_inspect.getfullargspec(self.call).varkw is not None @property @tracking.cached_per_instance def _should_compute_mask(self): return ('mask' in self._call_fn_args or getattr(self, 'compute_mask', None) is not None) @property def _object_identifier(self): """String stored in object identifier field in the SavedModel proto. Returns: A string with the object identifier, which is used at load time. """ return '_tf_keras_layer' @property def _eager_losses(self): # A list of loss values containing activity regularizers and losses # manually added through `add_loss` during eager execution. It is cleared # after every batch. # Because we plan on eventually allowing a same model instance to be trained # in eager mode or graph mode alternatively, we need to keep track of # eager losses and symbolic losses via separate attributes. if not hasattr(self._thread_local, '_eager_losses'): self._thread_local._eager_losses = [] return self._thread_local._eager_losses @_eager_losses.setter def _eager_losses(self, losses): self._thread_local._eager_losses = losses @property def _tracking_metadata(self): """String stored in metadata field in the SavedModel proto. Returns: A serialized JSON storing information necessary for recreating this layer. """ # TODO(kathywu): Add support for metrics serialization. # TODO(kathywu): Synchronize with the keras spec (go/keras-json-spec) once # the python config serialization has caught up. # Create a dictionary containing python layer state attributes. Any new # attribute that impacts the layer execution in some way should be added to # this dict. # Unlike a model's JSON configuration, which only # contains class_name and each layer's get_config() object, this stores more # information to accurately recreate the layer. # For backwards compatibility, any changes to this list should be additive. # Modifying or removing attributes may only be done with a sufficient # explanation. metadata = dict( class_name=type(self).__name__, name=self.name, trainable=self.trainable, expects_training_arg=self._expects_training_arg, dtype=self.dtype, batch_input_shape=getattr(self, '_batch_input_shape', None)) try: # Store the config dictionary, which is only used by the revived object # to return the original config when revived_obj.get_config() is called. # It is not important for recreating the revived object. metadata['config'] = self.get_config() except NotImplementedError: # in the case of a subclassed model, the get_config() method will throw # a NotImplementedError. pass if self.input_spec is not None: # Layer's input_spec has already been type-checked in the property setter. metadata['input_spec'] = nest.map_structure( lambda x: None if x is None else serialize_keras_object(x), self.input_spec) else: metadata['input_spec'] = None if (self.activity_regularizer is not None and hasattr(self.activity_regularizer, 'get_config')): metadata['activity_regularizer'] = serialize_keras_object( self.activity_regularizer) else: metadata['activity_regularizer'] = None return json.dumps(metadata, default=serialization.get_json_type) def _list_extra_dependencies_for_serialization(self, serialization_cache): """Lists extra dependencies to serialize to SavedModel. By overriding this method, extra dependencies can be attached to the serialized Layer. For example, this is used to save the list of `variables` and `trainable_variables`, which are python properties in a Layer object, but are represented as a static list in the SavedModel. Args: serialization_cache: A dictionary shared between all objects in the same object graph. This object is passed to both `_list_extra_dependencies_for_serialization` and `_list_functions_for_serialization`. Returns: A dictionary mapping attribute names to trackable objects. The entire list of attributes are listed in the `saved_model._LayerAttributes` class. """ return (saved_model.serialize_all_attributes(self, serialization_cache) .objects_to_serialize) def _list_functions_for_serialization(self, serialization_cache): """Lists the functions to include when serializing a Layer. Args: serialization_cache: Dictionary passed to all objects in the same object graph during serialization. Returns: A dictionary mapping attribute names to `Function` or `ConcreteFunction`. The entire list of attributes are listed in the `saved_model._LayerAttributes` class. """ # Create a dictionary containing the layer's call and loss functions. fns = (saved_model.serialize_all_attributes(self, serialization_cache) .functions_to_serialize) # The parent Autotrackable class saves all user-defined tf.functions, and # returns them in _list_functions_for_serialization(). Add these functions # to the dict. fns.update(super(Layer, self)._list_functions_for_serialization( serialization_cache)) return fns @property def _unique_trainable_weights(self): """Dedupe trainable weights while maintaining order as much as possible.""" trainable_weights = self.trainable_weights output, seen_weights = [], object_identity.ObjectIdentitySet() for w in trainable_weights: if w not in seen_weights: output.append(w) seen_weights.add(w) return output class TensorFlowOpLayer(Layer): """Wraps a TensorFlow Operation in a Layer. This class is used internally by the Functional API. When a user uses a raw TensorFlow Operation on symbolic tensors originating from an `Input` Layer, the resultant operation will be wrapped with this Layer object in order to make the operation compatible with the Keras API. This Layer will create a new, identical operation (except for inputs and outputs) every time it is called. If `run_eagerly` is `True`, the op creation and calculation will happen inside an Eager function. Instances of this Layer are created when `autolambda` is called, which is whenever a Layer's `__call__` encounters symbolic inputs that do not have Keras metadata, or when a Network's `__init__` encounters outputs that do not have Keras metadata. Attributes: node_def: String, the serialized NodeDef of the Op this layer will wrap. name: String, the name of the Layer. constants: Dict of NumPy arrays, the values of any Tensors needed for this Operation that do not originate from a Keras `Input` Layer. Since all placeholders must come from Keras `Input` Layers, these Tensors must be treated as constant in the Functional API. trainable: Bool, whether this Layer is trainable. Currently Variables are not supported, and so this parameter has no effect. dtype: The default dtype of this Layer. Inherited from `Layer` and has no effect on this class, however is used in `get_config`. """ def __init__(self, node_def, name, constants=None, trainable=True, dtype=None): # Pass autocast=False, as if inputs are cast, input types might not match # Operation type. super(TensorFlowOpLayer, self).__init__( name=_TF_OP_LAYER_NAME_PREFIX + name, trainable=trainable, dtype=dtype, autocast=False) _keras_layers_gauge.get_cell('TensorflowOpLayer').set(True) if isinstance(node_def, dict): self.node_def = json_format.ParseDict(node_def, node_def_pb2.NodeDef()) else: if not isinstance(node_def, bytes): node_def = node_def.encode('utf-8') self.node_def = node_def_pb2.NodeDef.FromString(node_def) # JSON serialization stringifies keys which are integer input indices. self.constants = ({ int(index): constant for index, constant in constants.items() } if constants is not None else {}) # Layer uses original op unless it is called on new inputs. # This means `built` is not set in `__call__`. self.built = True def call(self, inputs): if context.executing_eagerly(): return self._defun_call(inputs) return self._make_op(inputs) def _make_node_def(self, graph): node_def = node_def_pb2.NodeDef() node_def.CopyFrom(self.node_def) node_def.name = graph.unique_name(node_def.name) return node_def def _make_op(self, inputs): inputs = nest.flatten(inputs) graph = inputs[0].graph node_def = self._make_node_def(graph) with graph.as_default(): for index, constant in self.constants.items(): # Recreate constant in graph to add distribution context. value = tensor_util.constant_value(constant) if value is not None: constant = constant_op.constant(value, name=node_def.input[index]) inputs.insert(index, constant) # Check for case where first input should be a list of Tensors. if 'N' in node_def.attr: num_tensors = node_def.attr['N'].i inputs = [inputs[:num_tensors]] + inputs[num_tensors:] c_op = ops._create_c_op(graph, node_def, inputs, control_inputs=[]) op = graph._create_op_from_tf_operation(c_op) op._control_flow_post_processing() # Record the gradient because custom-made ops don't go through the # code-gen'd eager call path op_type = compat.as_str(op.op_def.name) attr_names = [compat.as_str(attr.name) for attr in op.op_def.attr] attrs = [] for attr_name in attr_names: attrs.append(attr_name) attrs.append(op.get_attr(attr_name)) attrs = tuple(attrs) execute.record_gradient(op_type, op.inputs, attrs, op.outputs, op.name) if len(op.outputs) == 1: return op.outputs[0] return op.outputs @function.defun def _defun_call(self, inputs): """Wraps the op creation method in an Eager function for `run_eagerly`.""" return self._make_op(inputs) def get_config(self): config = super(TensorFlowOpLayer, self).get_config() config.update({ # `__init__` prefixes the name. Revert to the constructor argument. 'name': config['name'][len(_TF_OP_LAYER_NAME_PREFIX):], 'node_def': json_format.MessageToDict(self.node_def), 'constants': { i: backend.get_value(c) for i, c in self.constants.items() } }) return config class AddLoss(Layer): """Adds its inputs as a loss. Attributes: unconditional: Whether or not the loss should be conditioned on the inputs. """ def __init__(self, unconditional, **kwargs): # Pass autocast=False, as there is no reason to cast loss to a different # dtype. kwargs['autocast'] = False super(AddLoss, self).__init__(**kwargs) self.unconditional = unconditional def call(self, inputs): self.add_loss(inputs, inputs=(not self.unconditional)) return inputs def get_config(self): config = super(AddLoss, self).get_config() config.update({'unconditional': self.unconditional}) return config class AddMetric(Layer): """Adds its inputs as a metric. Attributes: aggregation: 'mean' or None. How the inputs should be aggregated. metric_name: The name to use for this metric. """ def __init__(self, aggregation=None, metric_name=None, **kwargs): super(AddMetric, self).__init__(**kwargs) self.aggregation = aggregation self.metric_name = metric_name def call(self, inputs): self.add_metric(inputs, self.aggregation, self.metric_name) return inputs def get_config(self): config = super(AddMetric, self).get_config() config.update({ 'aggregation': self.aggregation, 'metric_name': self.metric_name }) return config class KerasHistory( collections.namedtuple('KerasHistory', ['layer', 'node_index', 'tensor_index'])): """Tracks the Layer call that created a Tensor, for Keras Graph Networks. During construction of Keras Graph Networks, this metadata is added to each Tensor produced as the output of a Layer, starting with an `InputLayer`. This allows Keras to track how each Tensor was produced, and this information is later retraced by the `keras.engine.Network` class to reconstruct the Keras Graph Network. Attributes: layer: The Layer that produced the Tensor. node_index: The specific call to the Layer that produced this Tensor. Layers can be called multiple times in order to share weights. A new node is created every time a Tensor is called. tensor_index: The output index for this Tensor. Always zero if the Layer that produced this Tensor only has one output. Nested structures of Tensors are deterministically assigned an index via `nest.flatten`. """ # Added to maintain memory and performance characteristics of `namedtuple` # while subclassing. __slots__ = () # Avoid breaking users who directly import this symbol from this file. # TODO(fchollet): remove this. InputSpec = input_spec.InputSpec # pylint:disable=invalid-name
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/base_layer.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Training related logic for Keras model in TF 2.0 context. Note that all the code under this module is under active development, please DO NOT use it unless you are really sure what you are doing. """ # pylint: disable=protected-access from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.framework import errors from tensorflow.python.keras import callbacks as cbks from tensorflow.python.keras.distribute import distributed_training_utils as dist_utils from tensorflow.python.keras.engine import data_adapter from tensorflow.python.keras.engine import training_utils from tensorflow.python.keras.engine import training_v2_utils from tensorflow.python.keras.utils.mode_keys import ModeKeys from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest from tensorflow.python.util import tf_contextlib # The list of DataAdapter that support validation_split, only numpy and data # tensor support validation_split for now. _ADAPTER_FOR_VALIDATION_SPLIT = [data_adapter.TensorLikeDataAdapter] # The list of DataAdapter that support model._standardize_user_data. Currently # keras.sequence/python generator will cause error when calling # model._standardize_user_data, this should be updated in future cl, eg, the # dataset/generate/sequence input will be peeked and processed by # model._standardize_user_data() _ADAPTER_FOR_STANDARDIZE_USER_DATA = [ data_adapter.TensorLikeDataAdapter, data_adapter.DatasetAdapter, data_adapter.CompositeTensorDataAdapter ] def run_one_epoch(model, iterator, execution_function, dataset_size=None, batch_size=None, strategy=None, steps_per_epoch=None, num_samples=None, mode=ModeKeys.TRAIN, training_context=None, total_epochs=None): """Run the execution function with the data from iterator. Given the dataset iterator and execution function, get the data from iterator and call it with the execution function to get the result (metric/loss). It will run for steps_per_epoch or until to the iterator is fully consumed. Args: model: The keras model to run. iterator: the dataset iterator to fetch the data. execution_function: a tf.function that can be called with data. dataset_size: the size of iterator, None when unknown. batch_size: The size of the current batch. strategy: the distribution strategy instance from the model. steps_per_epoch: the number of steps to run for the epoch. num_samples: the number of samples for the whole epoch if known. This can be used to calculate the final partial batch, and scale the loss. mode: the mode for the current epoch. training_context: the context that contains callbacks and progress bar. total_epochs: the total number of epochs that will be run. Used when throw error when the iterator unexpectedly reaches its end. Returns: The loss and metric value from the model. """ # Only use the sample to count if there is a partial batch at the end. use_steps = num_samples is None if mode == ModeKeys.PREDICT: aggregator = training_utils.OutputsAggregator( use_steps=use_steps, steps=steps_per_epoch, num_samples=num_samples, batch_size=batch_size) else: aggregator = training_utils.MetricsAggregator( use_steps=use_steps, steps=steps_per_epoch, num_samples=num_samples) callbacks = training_context.callbacks progbar = training_context.progbar if callbacks.model.stop_training: return target_steps = steps_per_epoch or np.inf step = 0 while step < target_steps: if use_steps: current_batch_size = 1 elif step < target_steps - 1: current_batch_size = batch_size else: current_batch_size = num_samples - step * batch_size with training_context.on_batch( step=step, mode=mode, size=current_batch_size) as batch_logs: try: batch_outs = execution_function(iterator) except (StopIteration, errors.OutOfRangeError): # TODO(kaftan): File bug about tf function and errors.OutOfRangeError? # Are there any other C++ errors tf function should recapture? # The only acceptable case here is that the input has a unknown # length, and configured to fully consume it. if (dataset_size is None and steps_per_epoch is None and step > 0): # The input passed by the user ran out of batches. # Now we know the cardinality of the input(dataset or generator). steps_per_epoch = step aggregator.steps = steps_per_epoch progbar.params['steps'] = steps_per_epoch progbar.progbar.target = steps_per_epoch else: callbacks.model.stop_training = True logging.warning( 'Your input ran out of data; interrupting training. ' 'Make sure that your dataset or generator can generate at ' 'least `steps_per_epoch * epochs` batches (in this case, ' '{} batches). You may need to use the repeat() function ' 'when building your dataset.'.format( total_epochs * steps_per_epoch)) # In either case, break out the loop for training batch. # Also note the training_context that data inputs are exhausted, so all # the post batch hooks can be skipped. batch_logs['data_exhausted'] = True break if mode != ModeKeys.PREDICT: data_batch_size = batch_outs['batch_size'] batch_outs = (batch_outs['total_loss'] + batch_outs['output_losses'] + batch_outs['metrics']) if current_batch_size != data_batch_size: batch_logs['size'] = data_batch_size current_batch_size = data_batch_size else: batch_outs = _aggregate_predict_results(strategy, batch_outs, model) if step == 0: aggregator.create(batch_outs) if use_steps: aggregator.aggregate(batch_outs) else: aggregator.aggregate( batch_outs, batch_start=step * batch_size, batch_end=step * batch_size + current_batch_size) cbks.make_logs(model, batch_logs, batch_outs, mode) step += 1 if callbacks.model.stop_training: break # End of an epoch. aggregator.finalize() return aggregator.results class Loop(training_utils.TrainingLoop): """The training loop for the TF 2.0. This class has some existing assumption for runtime, eg eager by default, have distribution strategy, etc. """ def fit( self, model, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, max_queue_size=10, workers=1, use_multiprocessing=False, **kwargs): batch_size = model._validate_or_infer_batch_size( batch_size, steps_per_epoch, x) strategy = _get_distribution_strategy(model) batch_size, steps_per_epoch = dist_utils.process_batch_and_step_size( strategy, x, batch_size, steps_per_epoch, ModeKeys.TRAIN, validation_split=validation_split) dist_utils.validate_callbacks(input_callbacks=callbacks, optimizer=model.optimizer) # Enter tf.distribute.Strategy scope. with strategy.scope(): training_data_adapter, validation_adapter = _process_training_inputs( model, x, y, batch_size=batch_size, epochs=epochs, sample_weights=sample_weight, class_weights=class_weight, validation_split=validation_split, steps_per_epoch=steps_per_epoch, shuffle=shuffle, validation_data=validation_data, validation_steps=validation_steps, distribution_strategy=strategy, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing) total_samples = _get_total_number_of_samples(training_data_adapter) use_sample = total_samples is not None do_validation = (validation_adapter is not None) recreate_training_iterator = ( training_data_adapter.should_recreate_iterator(steps_per_epoch)) if not steps_per_epoch: # TODO(b/139762795): Add step inference for when steps is None to # prevent end of sequence warning message. steps_per_epoch = training_data_adapter.get_size() # tf.print('{} on {} steps.'.format(ModeKeys.TRAIN, steps_per_epoch)) training_context = TrainingContext() initial_epoch = model._maybe_load_initial_epoch_from_ckpt( initial_epoch, ModeKeys.TRAIN) training_dataset = training_data_adapter.get_dataset() # Raise an error if steps_per_epoch isn't specified but the dataset # is infinite. # TODO(scottzhu): This check should probably happen in the adapter training_utils.infer_steps_for_dataset( model, training_dataset, steps_per_epoch, steps_name='steps_per_epoch', epochs=0) training_dataset = strategy.experimental_distribute_dataset( training_dataset) training_function = training_v2_utils._get_or_make_execution_function( model, ModeKeys.TRAIN) training_data_iter = None if do_validation: validation_dataset = validation_adapter.get_dataset() if not validation_steps: # Raise an error if validation_steps isn't specified but the # validation dataset is infinite. validation_steps = ( validation_adapter.get_size() or training_utils.infer_steps_for_dataset( model, validation_dataset, validation_steps, steps_name='validation_steps')) eval_function = training_v2_utils._get_or_make_execution_function( model, ModeKeys.TEST) eval_data_iter = None validation_dataset = strategy.experimental_distribute_dataset( validation_dataset) val_total_samples = _get_total_number_of_samples(validation_adapter) else: val_total_samples = None if verbose and (total_samples or steps_per_epoch): _print_train_info(total_samples, steps_per_epoch, val_total_samples, validation_steps) training_callbacks = cbks.configure_callbacks( callbacks, model, do_validation=do_validation, batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch, samples=total_samples or steps_per_epoch, count_mode='samples' if use_sample else 'steps', verbose=0, # Handle ProgBarLogger separately in this loop. mode=ModeKeys.TRAIN) with training_context.on_start(model, training_callbacks, use_sample, verbose, ModeKeys.TRAIN): for epoch in range(initial_epoch, epochs): if training_context.callbacks.model.stop_training: break # Training with training_context.on_epoch(epoch, ModeKeys.TRAIN) as epoch_logs: model.reset_metrics() if training_data_iter is None or recreate_training_iterator: if (training_data_iter is not None and distribution_strategy_context.has_strategy()): # TODO(kaftan): remove this when MultiDeviceIterator is a ## compositetensor (unless this is more efficient) training_data_iter._initializer # pylint: disable=pointless-statement else: training_data_iter = iter(training_dataset) training_result = run_one_epoch( model, training_data_iter, training_function, dataset_size=training_data_adapter.get_size(), batch_size=training_data_adapter.batch_size(), strategy=strategy, steps_per_epoch=steps_per_epoch, num_samples=total_samples, mode=ModeKeys.TRAIN, training_context=training_context, total_epochs=epochs) cbks.make_logs(model, epoch_logs, training_result, ModeKeys.TRAIN) # Evaluation if (do_validation and training_utils.should_run_validation(validation_freq, epoch) and not training_callbacks.model.stop_training): if (eval_data_iter is not None and distribution_strategy_context.has_strategy()): # TODO(kaftan): remove this when MultiDeviceIterator is a ## compositetensor (unless this is more efficient) eval_data_iter._initializer # pylint: disable=pointless-statement else: eval_data_iter = iter(validation_dataset) validation_callbacks = cbks.configure_callbacks( training_callbacks, model, batch_size=batch_size, epochs=1, steps_per_epoch=validation_steps, samples=val_total_samples or validation_steps, count_mode='samples' if use_sample else 'steps', verbose=0, # Handle ProgBarLogger separately in this loop. mode=ModeKeys.TEST) eval_context = TrainingContext() with eval_context.on_start( model, validation_callbacks, use_sample, verbose=0, mode=ModeKeys.TEST): with eval_context.on_epoch(epoch, ModeKeys.TEST): model.reset_metrics() eval_result = run_one_epoch( model, eval_data_iter, eval_function, dataset_size=validation_adapter.get_size(), batch_size=validation_adapter.batch_size(), strategy=strategy, steps_per_epoch=validation_steps, num_samples=val_total_samples, mode=ModeKeys.TEST, training_context=eval_context, total_epochs=1) cbks.make_logs(model, epoch_logs, eval_result, ModeKeys.TEST, prefix='val_') return model.history def _model_iteration( self, model, mode, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, **kwargs): batch_size = model._validate_or_infer_batch_size( batch_size, steps, x) strategy = _get_distribution_strategy(model) batch_size, steps = dist_utils.process_batch_and_step_size( strategy, x, batch_size, steps, mode) dist_utils.validate_callbacks(input_callbacks=callbacks, optimizer=model.optimizer) # Enter tf.distribute.Strategy scope. with strategy.scope(): adapter = _process_inputs( model, x, y, batch_size=batch_size, sample_weights=sample_weight, steps=steps, distribution_strategy=strategy, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing) total_samples = _get_total_number_of_samples(adapter) use_sample = total_samples is not None dataset = adapter.get_dataset() if not steps: # Raise an error if `steps` isn't specified but the dataset # is infinite. steps = adapter.get_size() or training_utils.infer_steps_for_dataset( model, dataset, steps, steps_name='steps') # tf.print('{} on {} steps.'.format(ModeKeys.TRAIN, steps_per_epoch)) training_context = TrainingContext() dataset = strategy.experimental_distribute_dataset(dataset) execution_function = training_v2_utils._get_or_make_execution_function( model, mode) data_iterator = iter(dataset) callbacks = cbks.configure_callbacks( callbacks, model, do_validation=False, batch_size=batch_size, epochs=1, steps_per_epoch=steps, samples=use_sample, count_mode='samples' if use_sample else 'steps', verbose=0, # Handle ProgBarLogger separately in this loop. mode=mode) with training_context.on_start( model, callbacks, use_sample, verbose, mode): with training_context.on_epoch(0, mode) as epoch_logs: model.reset_metrics() result = run_one_epoch( model, data_iterator, execution_function, dataset_size=adapter.get_size(), batch_size=adapter.batch_size(), strategy=strategy, steps_per_epoch=steps, num_samples=total_samples, mode=mode, training_context=training_context, total_epochs=1) cbks.make_logs(model, epoch_logs, result, mode) if len(result) == 1: result = result[0] return result def evaluate( self, model, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, **kwargs): return self._model_iteration( model, ModeKeys.TEST, x=x, y=y, batch_size=batch_size, verbose=verbose, sample_weight=sample_weight, steps=steps, callbacks=callbacks, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, **kwargs) def predict(self, model, x, batch_size=None, verbose=0, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, **kwargs): return self._model_iteration( model, ModeKeys.PREDICT, x=x, batch_size=batch_size, verbose=verbose, steps=steps, callbacks=callbacks, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, **kwargs) def _get_distribution_strategy(model): """Get the model's distribution strategy.""" if model._compile_time_distribution_strategy: strategy = model._compile_time_distribution_strategy else: # Grab the active strategy if the model was never compiled # but it is now predicting. strategy = distribution_strategy_context.get_strategy() return strategy def _process_training_inputs(model, x, y, batch_size=None, epochs=1, sample_weights=None, class_weights=None, steps_per_epoch=None, validation_split=0., validation_data=None, validation_steps=None, shuffle=True, distribution_strategy=None, max_queue_size=10, workers=1, use_multiprocessing=False): """Process the data input for fit() with respect to validation_split.""" if validation_split and 0. < validation_split < 1. and validation_data: raise ValueError('validation_data and validation_split cannot be used ' 'at same time.') adapter_cls = data_adapter.select_data_adapter(x, y) # Handle validation_split, we want to split the data and get the training # section before we give it to data adapter. if validation_split and 0. < validation_split < 1.: if adapter_cls not in _ADAPTER_FOR_VALIDATION_SPLIT: raise ValueError( '`validation_split` argument is not supported when ' 'data adapter is {}. Received: x={}, validation_split={}'.format( adapter_cls, x, validation_split)) # Retrieve the training section from x and y, and then construct dataset # from it. x, y, sample_weights = model._standardize_user_data( x, y, sample_weight=sample_weights, class_weight=class_weights, batch_size=batch_size, check_steps=False, steps=steps_per_epoch) (x, y, sample_weights, val_x, val_y, val_sample_weights) = training_utils.split_training_and_validation_data( x, y, sample_weights, validation_split) train_adapter = adapter_cls( x, y, batch_size=batch_size, epochs=epochs, sample_weights=sample_weights, shuffle=shuffle, distribution_strategy=distribution_strategy) val_adapter = adapter_cls(val_x, val_y, sample_weights=val_sample_weights, batch_size=batch_size, distribution_strategy=distribution_strategy) else: train_adapter = _process_inputs( model, x, y, sample_weights=sample_weights, batch_size=batch_size, epochs=epochs, class_weights=class_weights, shuffle=shuffle, steps=steps_per_epoch, distribution_strategy=distribution_strategy, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing) val_adapter = None if validation_data: (val_x, val_y, val_sample_weights) = training_utils.unpack_validation_data( validation_data) # For eval data, we use the training data batch_size it was unknown. # This is useful for generator/sequence training data input with numpy # validation data input. if not batch_size: batch_size = train_adapter.batch_size() val_adapter = _process_inputs(model, val_x, val_y, sample_weights=val_sample_weights, batch_size=batch_size, class_weights=class_weights, steps=validation_steps, distribution_strategy=distribution_strategy) elif validation_steps: raise ValueError('`validation_steps` should not be specified if ' '`validation_data` is None.') return train_adapter, val_adapter def _process_inputs(model, x, y, batch_size=None, epochs=1, sample_weights=None, class_weights=None, shuffle=False, steps=None, distribution_strategy=None, max_queue_size=10, workers=1, use_multiprocessing=False): """Process the inputs for fit/eval/predict().""" adapter_cls = data_adapter.select_data_adapter(x, y) if adapter_cls in _ADAPTER_FOR_STANDARDIZE_USER_DATA: x, y, sample_weights = model._standardize_user_data( x, y, sample_weight=sample_weights, class_weight=class_weights, batch_size=batch_size, check_steps=False, steps=steps) adapter = adapter_cls( x, y, batch_size=batch_size, epochs=epochs, steps=steps, sample_weights=sample_weights, shuffle=shuffle, distribution_strategy=distribution_strategy, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing) # As a fallback for the data type that does not work with # _standardize_user_data, use the _prepare_model_with_inputs. if adapter_cls not in _ADAPTER_FOR_STANDARDIZE_USER_DATA: training_v2_utils._prepare_model_with_inputs(model, adapter.get_dataset()) return adapter def _get_total_number_of_samples(adapter): if not adapter.get_size() or not adapter.batch_size(): return None total_sample = adapter.get_size() * adapter.batch_size() if adapter.has_partial_batch(): total_sample -= (adapter.batch_size() - adapter.partial_batch_size()) return total_sample def _aggregate_predict_results(strategy, batch_outs, model): if not isinstance(batch_outs, list): batch_outs = [batch_outs] total_batch_outs = [] for i in range(len(model.outputs)): num_replicas = strategy.num_replicas_in_sync nested_outs = batch_outs[i * num_replicas:i * num_replicas + num_replicas] total_batch_outs.append( dist_utils.concat_along_batch_dimension(nest.flatten(nested_outs))) return total_batch_outs def _print_train_info(total_samples, steps, val_total_samples, val_steps): increment = 'samples' if total_samples else 'steps' conjunction = 'on' if total_samples else 'for' msg = 'Train {} {} {}'.format(conjunction, total_samples or steps, increment) if val_total_samples or val_steps: increment = 'samples' if val_total_samples else 'steps' conjunction = 'on' if val_total_samples else 'for' msg += ', validate {} {} {}'.format(conjunction, val_total_samples or val_steps, increment) print(msg) class TrainingContext(object): """Utility object that wrap around callbacks and progress bars.""" @tf_contextlib.contextmanager def on_start(self, model, callbacks=None, use_samples=False, verbose=0, mode=ModeKeys.TRAIN): """Provide a scope for the whole training process.""" # TODO(omalleyt): Handle ProgBar as part of Callbacks once hooks are ready. progbar = training_utils.get_progbar( model, 'samples' if use_samples else 'steps') progbar.params = callbacks.params progbar.params['verbose'] = verbose callbacks.model.stop_training = False callbacks._call_begin_hook(mode) progbar.on_train_begin() # Cache those two instance so that it can be used in other functions. self.callbacks = callbacks self.progbar = progbar try: yield finally: # End of all epochs self.callbacks._call_end_hook(mode) @tf_contextlib.contextmanager def on_epoch(self, epoch=0, mode=ModeKeys.TRAIN): """Provide a scope for running one epoch.""" epoch_logs = {} if mode == ModeKeys.TRAIN: self.callbacks.on_epoch_begin(epoch, epoch_logs) self.progbar.on_epoch_begin(epoch, epoch_logs) try: yield epoch_logs finally: if mode == ModeKeys.TRAIN: # Epochs only apply to `fit`. self.callbacks.on_epoch_end(epoch, epoch_logs) self.progbar.on_epoch_end(epoch, epoch_logs) @tf_contextlib.contextmanager def on_batch(self, step=0, mode=ModeKeys.TRAIN, size=1): """Provide a scope for running one batch.""" batch_logs = {'batch': step, 'size': size} self.callbacks._call_batch_hook( mode, 'begin', step, batch_logs) self.progbar.on_batch_begin(step, batch_logs) try: yield batch_logs finally: if not batch_logs.pop('data_exhausted', False): self.callbacks._call_batch_hook( mode, 'end', step, batch_logs) self.progbar.on_batch_end(step, batch_logs)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/training_v2.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility object to handler partial batches for TPUStrategy.""" # pylint: disable=protected-access from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import six from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend as K from tensorflow.python.ops import array_ops from tensorflow.python.util import nest class PartialBatchPaddingHandler(object): """A container that holds info about partial batches for `predict()`.""" def __init__(self, output_shape): self.padded_batch_size = 0 self.padding_mask = array_ops.zeros(0) self.output_shape = output_shape def get_real_batch_size(self, dataset_batch): """Returns the number of elements in a potentially partial batch.""" if isinstance(dataset_batch, (tuple, list)): dataset_batch = dataset_batch[0] assert nest.flatten(dataset_batch) def _find_any_tensor(batch_features): tensors = [ x for x in nest.flatten(batch_features) if tensor_util.is_tensor(x) ] if not tensors: raise ValueError('Cannot find any Tensor in features dict.') return tensors[0] return K.cast(K.shape(_find_any_tensor(dataset_batch))[0], dtype='int64') def update_mask(self, padding_mask, dataset_batch): """Calculate and cache the amount of padding required for a batch.""" original_batch_size = self.get_real_batch_size(dataset_batch) missing_count = self.padded_batch_size - original_batch_size mask = K.concatenate([array_ops.ones(original_batch_size), array_ops.zeros(missing_count)], axis=0) return K.concatenate([padding_mask, mask], axis=0) def pad_batch(self, *dataset_batch_elements): """Pads out the batch dimension of a tensor to the complete batch size.""" def _pad(batch): """Helper function to pad nested data within each batch elements.""" padded_dict_batch = {} if isinstance(batch, dict): for key, value in six.iteritems(batch): padded_dict_batch[key] = _pad(value) return padded_dict_batch rank = len(batch.shape) assert rank > 0 missing_count = (self.padded_batch_size - self.get_real_batch_size(batch)) padding = K.stack([[0, missing_count]] + [[0, 0]] * (rank - 1)) return array_ops.pad(batch, padding, 'constant') if len(dataset_batch_elements) == 1: return _pad(dataset_batch_elements[0]) batch_elements = [] for batch_element in dataset_batch_elements: batch_elements.append(_pad(batch_element)) return tuple(batch_elements) def apply_mask(self, prediction_result): """Removes prediction output that corresponds to padded input.""" padding_mask = K.get_value(self.padding_mask) assert len(padding_mask.shape) == 1 if len(self.output_shape) == 1: prediction = np.take(prediction_result, np.nonzero( padding_mask[:len(prediction_result)]), axis=0) if prediction.shape[0] == 1: prediction = np.squeeze(prediction, axis=0) return prediction else: predictions = [] for i in range(len(self.output_shape)): prediction = prediction_result[i] prediction = np.take(prediction, np.nonzero( padding_mask[:len(prediction)]), axis=0) predictions.append(np.squeeze(prediction)) return predictions
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/partial_batch_padding_handler.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Model saving utilities. Everything has been moved to keras/saving/. This file will be deleted soon. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras.saving import * # pylint: disable=wildcard-import
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/saving.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests specific to `Sequential` model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util as tf_test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class TestSequential(keras_parameterized.TestCase): """Most Sequential model API tests are covered in `training_test.py`. """ @keras_parameterized.run_all_keras_modes def test_basic_methods(self): model = keras.models.Sequential() model.add(keras.layers.Dense(1, input_dim=2)) model.add(keras.layers.Dropout(0.3, name='dp')) model.add(keras.layers.Dense(2, kernel_regularizer='l2', kernel_constraint='max_norm')) self.assertEqual(len(model.layers), 3) self.assertEqual(len(model.weights), 2 * 2) self.assertEqual(model.get_layer(name='dp').name, 'dp') @keras_parameterized.run_all_keras_modes def test_input_defined_first_layer(self): model = keras.models.Sequential() model.add(keras.Input(shape=(2,), name='input_layer')) model.add(keras.layers.Dense(1)) model.add(keras.layers.Dropout(0.3, name='dp')) model.add(keras.layers.Dense(2, kernel_regularizer='l2', kernel_constraint='max_norm')) self.assertLen(model.layers, 3) self.assertLen(model.weights, 2 * 2) self.assertEqual(model.get_layer(name='dp').name, 'dp') @keras_parameterized.run_all_keras_modes def test_single_layer_in_init(self): model = keras.models.Sequential(keras.layers.Dense(1)) self.assertLen(model.layers, 1) @keras_parameterized.run_all_keras_modes def test_sequential_pop(self): num_hidden = 5 input_dim = 3 batch_size = 5 num_classes = 2 model = testing_utils.get_small_sequential_mlp( num_hidden, num_classes, input_dim) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.random.random((batch_size, input_dim)) y = np.random.random((batch_size, num_classes)) model.fit(x, y, epochs=1) model.pop() self.assertEqual(len(model.layers), 1) self.assertEqual(model.output_shape, (None, num_hidden)) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) y = np.random.random((batch_size, num_hidden)) model.fit(x, y, epochs=1) # Test popping single-layer model model = keras.models.Sequential() model.add(keras.layers.Dense(num_hidden, input_dim=input_dim)) model.pop() self.assertEqual(model.layers, []) self.assertEqual(model.outputs, None) # Invalid use case model = keras.models.Sequential() with self.assertRaises(TypeError): model.pop() @keras_parameterized.run_all_keras_modes def test_sequential_deferred_build_with_np_arrays(self): num_hidden = 5 input_dim = 3 batch_size = 5 num_classes = 2 model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes) model.compile( loss='mse', optimizer='rmsprop', metrics=[keras.metrics.CategoricalAccuracy()], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertEqual(len(model.layers), 2) with self.assertRaisesRegexp( ValueError, 'Weights for model .* have not yet been created'): len(model.weights) self.assertFalse(model.built) x = np.random.random((batch_size, input_dim)) y = np.random.random((batch_size, num_classes)) model.fit(x, y, epochs=1) self.assertTrue(model.built) self.assertFalse(model._is_graph_network) self.assertEqual(len(model.weights), 2 * 2) @keras_parameterized.run_all_keras_modes def test_sequential_deferred_build_with_dataset_iterators(self): num_hidden = 5 input_dim = 3 num_classes = 2 num_samples = 50 steps_per_epoch = 10 model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes) model.compile( loss='mse', optimizer='rmsprop', metrics=[keras.metrics.CategoricalAccuracy()], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertEqual(len(model.layers), 2) with self.assertRaisesRegexp( ValueError, 'Weights for model .* have not yet been created'): len(model.weights) self.assertFalse(model.built) x = array_ops.ones((num_samples, input_dim)) y = array_ops.zeros((num_samples, num_classes)) dataset = dataset_ops.Dataset.from_tensor_slices((x, y)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=steps_per_epoch) self.assertTrue(model.built) self.assertEqual(len(model.weights), 2 * 2) self.assertFalse(model._is_graph_network) # TODO(kaftan) This test fails w/ run_with_all_keras_modes. File ticket @parameterized.parameters((True,), (False,)) @tf_test_util.run_deprecated_v1 def test_training_and_eval_methods_on_symbolic_tensors(self, deferred): with self.cached_session(): def get_model(): if deferred: model = testing_utils.get_small_sequential_mlp(10, 4) else: model = testing_utils.get_small_sequential_mlp(10, 4, input_dim=3) model.compile( optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) return model inputs = keras.backend.zeros(shape=(10, 3)) targets = keras.backend.zeros(shape=(10, 4)) model = get_model() model.fit(inputs, targets, epochs=10, steps_per_epoch=30) model = get_model() model.evaluate(inputs, targets, steps=2, verbose=0) model = get_model() model.predict(inputs, steps=2) model = get_model() model.train_on_batch(inputs, targets) model = get_model() model.test_on_batch(inputs, targets) model = get_model() model.fit( inputs, targets, epochs=1, steps_per_epoch=2, verbose=0, validation_data=(inputs, targets), validation_steps=2) @keras_parameterized.run_all_keras_modes def test_invalid_use_cases(self): # Added objects must be layer instances with self.assertRaises(TypeError): model = keras.models.Sequential() model.add(None) # Added layers cannot have multiple outputs class MyLayer(keras.layers.Layer): def call(self, inputs): return [3 * inputs, 2 * inputs] def compute_output_shape(self, input_shape): return [input_shape, input_shape] with self.assertRaises(ValueError): model = keras.models.Sequential() model.add(MyLayer(input_shape=(3,))) with self.assertRaises(TypeError): model = keras.models.Sequential() model.add(keras.layers.Dense(1, input_dim=1)) model.add(MyLayer()) @keras_parameterized.run_all_keras_modes def test_nested_sequential_trainability(self): input_dim = 20 num_units = 10 num_classes = 2 inner_model = keras.models.Sequential() inner_model.add(keras.layers.Dense(num_units, input_shape=(input_dim,))) model = keras.models.Sequential() model.add(inner_model) model.add(keras.layers.Dense(num_classes)) self.assertEqual(len(model.layers), 2) self.assertEqual(len(model.trainable_weights), 4) inner_model.trainable = False self.assertEqual(len(model.trainable_weights), 2) inner_model.trainable = True self.assertEqual(len(model.trainable_weights), 4) def test_sequential_update_disabling(self): val_a = np.random.random((10, 4)) val_out = np.random.random((10, 4)) with self.cached_session(): model = keras.models.Sequential() model.add(keras.layers.BatchNormalization(input_shape=(4,))) assert model.updates model.trainable = False assert not model.updates model.compile('sgd', 'mse') assert not model.updates x1 = model.predict(val_a) model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) self.assertAllClose(x1, x2, atol=1e-7) model.trainable = True model.compile('sgd', 'mse') assert model.updates model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) assert np.abs(np.sum(x1 - x2)) > 1e-5 @keras_parameterized.run_all_keras_modes def test_sequential_deferred_build_serialization(self): num_hidden = 5 input_dim = 3 batch_size = 5 num_classes = 2 model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes) model.compile( loss='mse', optimizer='rmsprop', metrics=[keras.metrics.CategoricalAccuracy()], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertFalse(model.built) x = np.random.random((batch_size, input_dim)) y = np.random.random((batch_size, num_classes)) model.train_on_batch(x, y) self.assertTrue(model.built) config = model.get_config() self.assertIn('build_input_shape', config) new_model = keras.models.Sequential.from_config(config) self.assertEqual(len(new_model.layers), 2) self.assertEqual(len(new_model.weights), 4) @keras_parameterized.run_all_keras_modes def test_sequential_shape_inference_deferred(self): model = testing_utils.get_small_sequential_mlp(4, 5) output_shape = model.compute_output_shape((None, 7)) self.assertEqual(tuple(output_shape.as_list()), (None, 5)) @keras_parameterized.run_all_keras_modes def test_sequential_build_deferred(self): model = testing_utils.get_small_sequential_mlp(4, 5) model.build((None, 10)) self.assertTrue(model.built) self.assertEqual(len(model.weights), 4) # Test with nested model model = testing_utils.get_small_sequential_mlp(4, 3) inner_model = testing_utils.get_small_sequential_mlp(4, 5) model.add(inner_model) model.build((None, 10)) self.assertTrue(model.built) self.assertEqual(len(model.weights), 8) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_sequential_deferred_manual_build(self): model = testing_utils.get_small_sequential_mlp(4, 5) self.assertFalse(model.built) model(array_ops.zeros([1, 2])) self.assertTrue(model.built) self.assertEqual(len(model.outputs), 0) model.compile( 'rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertEqual(len(model.outputs), 0) model.train_on_batch(np.zeros((1, 2)), np.zeros((1, 5))) self.assertEqual(len(model.outputs), 1) @keras_parameterized.run_all_keras_modes def test_sequential_nesting(self): model = testing_utils.get_small_sequential_mlp(4, 3) inner_model = testing_utils.get_small_sequential_mlp(4, 5) model.add(inner_model) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.random.random((2, 6)) y = np.random.random((2, 5)) model.fit(x, y, epochs=1) @keras_parameterized.run_all_keras_modes def test_variable_names(self): model = keras.models.Sequential([keras.layers.Dense(3)]) model.add(keras.layers.Dense(2)) model(array_ops.ones([2, 4])) self.assertEqual( ['sequential/dense/kernel:0', 'sequential/dense/bias:0', 'sequential/dense_1/kernel:0', 'sequential/dense_1/bias:0'], [v.name for v in model.variables]) @keras_parameterized.run_all_keras_modes def test_input_assumptions_propagation(self): model = keras.models.Sequential() model.add(keras.layers.Dense(1)) if context.executing_eagerly(): with self.assertRaisesRegexp(ValueError, 'expected min_ndim=2, found ndim=0'): model(1.0) @keras_parameterized.run_all_keras_modes def test_string_input(self): seq = keras.Sequential([ keras.layers.InputLayer(input_shape=(1,), dtype=dtypes.string), keras.layers.Lambda(lambda x: x[0]) ]) seq.run_eagerly = testing_utils.should_run_eagerly() seq._experimental_run_tf_function = testing_utils.should_run_tf_function() preds = seq.predict([['tensorflow eager']]) self.assertEqual(preds.shape, (1,)) @keras_parameterized.run_all_keras_modes def test_multi_output_layer_not_accepted(self): class MultiOutputLayer(keras.layers.Layer): def call(self, inputs): return inputs, inputs with self.assertRaisesRegexp( ValueError, 'should have a single output tensor'): keras.Sequential([MultiOutputLayer(input_shape=(3,))]) @keras_parameterized.run_all_keras_modes def test_layer_add_after_compile_deferred(self): model = keras.Sequential([keras.layers.Dense(3)]) self.assertFalse(model.built) self.assertFalse(model.inputs) self.assertFalse(model.outputs) model.compile('adam', loss='mse') model.fit(np.random.random((1, 3)), np.random.random((1, 3))) self.assertTrue(model.built) self.assertTrue(model.inputs) self.assertTrue(model.outputs) model.add(keras.layers.Dense(3)) self.assertTrue(model.built) self.assertTrue(model.inputs) self.assertTrue(model.outputs) model.compile('adam', loss='mse') model.fit(np.random.random((1, 3)), np.random.random((1, 3))) def test_sequential_layer_tracking(self): """Test that Sequential only tracks layers added in init or `.add`.""" layer = keras.layers.Dense(1) model = keras.Sequential([layer]) self.assertEqual(model._layers[-1], layer) model.a = [keras.layers.Dense(3)] # should not be added to the layers list. self.assertEqual(model._layers[-1], layer) layer2 = keras.layers.Dense(2) model.add(layer2) self.assertEqual(model._layers[-1], layer2) model.a = [keras.layers.Dense(3)] # should not be added to the layers list. self.assertEqual(model._layers[-1], layer2) model.pop() self.assertEqual(model._layers[-1], layer) class TestSequentialEagerIntegration(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes def test_defun_on_call(self): # Check that one can subclass Sequential and place the `call` in a `defun`. class MySequential(keras.Sequential): def __init__(self, name=None): super(MySequential, self).__init__(name=name) self.call = function.defun(self.call) model = MySequential() model.add(keras.layers.Dense(4, activation='relu')) model.add(keras.layers.Dense(5, activation='softmax')) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.random.random((2, 6)) y = np.random.random((2, 5)) model.fit(x, y, epochs=1) @keras_parameterized.run_all_keras_modes def test_build_before_fit(self): # Fix for b/112433577 model = testing_utils.get_small_sequential_mlp(4, 5) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.build((None, 6)) x = np.random.random((2, 6)) y = np.random.random((2, 5)) model.fit(x, y, epochs=1) @keras_parameterized.run_all_keras_modes def test_sequential_model_fails_with_dict_inputs(self): num_classes = 5 model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=num_classes) model.compile( 'rmsprop', metrics=['acc'], weighted_metrics=['mae'], loss='categorical_crossentropy', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = {'dense_input': np.random.random((10, 1))} y = np.random.randint(num_classes, size=(10, 1)) with self.assertRaisesRegexp( ValueError, 'Passing a dictionary input to a Sequential Model which ' 'doesn\'t have FeatureLayer as the first layer is an error'): model.fit(x, y, batch_size=5, epochs=1) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/sequential_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Keras Engine: graph topology and training loop functionality. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # TODO(fchollet): Remove hourglass imports once external code is done importing # non-public APIs. from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.input_layer import Input from tensorflow.python.keras.engine.input_layer import InputLayer from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.utils.layer_utils import get_source_inputs del absolute_import del division del print_function
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adapter module that convert different input data objects into tf.dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import itertools import math import numpy as np import six from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import ops from tensorflow.python.framework.ops import composite_tensor from tensorflow.python.keras.engine import training_utils from tensorflow.python.keras.utils import data_utils from tensorflow.python.ops import array_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest from tensorflow.python.util import tf_inspect @six.add_metaclass(abc.ABCMeta) class DataAdapter(object): """Base class for input data adapter. In TF 2.0, tf.data is the preferred API for user to feed in data. In order to simplify the training code path, all the input data object will be converted to `tf.data.Dataset` if possible. Note that since this class is mainly targeted for TF 2.0, it might have a lot of assumptions under the hood, eg eager context by default, distribution strategy, etc. In the meantime, some legacy feature support might be dropped, eg, Iterator from dataset API in v1, etc. The sample usage of this class is like: ``` x = tf.data.Dataset.range(100) adapter_cls = [NumpyArrayDataAdapter, ..., DatasetAdapter] applicable_adapters = [cls for cls in adapter_cls if cls.can_handle(x)] if len(applicable_adapters) != 1: raise ValueError("Expect only one adapter class to handle the input") dataset = applicable_adapters[0](x).get_dataset() for data in dataset: # training ``` """ @staticmethod def can_handle(x, y=None): """Whether the current DataAdapter could handle the input x and y. Structure wise, x and y can be single object, or list of objects if there multiple input/output, or dictionary of objects when the intput/output are named. Args: x: input features. y: target labels. Note that y could be None in the case of prediction. Returns: boolean """ raise NotImplementedError @abc.abstractmethod def __init__(self, x, y=None, **kwargs): """Create a DataAdapter based on data inputs. The caller must make sure to call `can_handle()` first before invoking this method. Provide unsupported data type will result into unexpected behavior. Args: x: input features. y: target labels. Note that y could be None in the case of prediction. **kwargs: Other keyword arguments for DataAdapter during the construction of the tf.dataset.Dataset. For example: - Numpy data might have `sample_weights` which will be used for weighting the loss function during training. - Numpy data might need to have `batch_size` parameter when constructing the dataset and iterator. - Certain input might need to be distribution strategy aware. When `distribution_strategy` is passed, the created dataset need to respect the strategy. DataAdapter might choose to ignore any keyword argument if it doesn't use it, or raise exception if any required argument is not provide. """ if not self.can_handle(x, y): raise ValueError("{} Cannot handle input {}".format(self.__class__, x)) @abc.abstractmethod def get_dataset(self): """Get a dataset instance for the current DataAdapter. Note that the dataset returned does not repeat for epoch, so caller might need to create new iterator for the same dataset at the beginning of the epoch. This behavior might change in future. Returns: An tf.dataset.Dataset. Caller might use the dataset in different context, eg iter(dataset) in eager to get the value directly, or in graph mode, provide the iterator tensor to Keras model function. """ raise NotImplementedError @abc.abstractmethod def get_size(self): """Return the size (number of batches) for the dataset created. For certain type of the data input, the number of batches is known, eg for Numpy data, the size is same as (number_of_element / batch_size). Whereas for dataset or python generator, the size is unknown since it may or may not have a end state. Returns: int, the number of batches for the dataset, or None if it is unknown. The caller could use this to control the loop of training, show progress bar, or handle unexpected StopIteration error. """ raise NotImplementedError @abc.abstractmethod def batch_size(self): """Return the batch size of the dataset created. For certain type of the data input, the batch size is known, and even required, like numpy array. Where as for dataset, the batch is unknown unless we take a peek. Returns: int, the batch size of the dataset, or None if it is unknown. """ raise NotImplementedError @abc.abstractmethod def has_partial_batch(self): """Whether the dataset has partial batch at the end.""" raise NotImplementedError @abc.abstractmethod def partial_batch_size(self): """The size of the final partial batch for dataset. Will return None if has_partial_batch is False or batch_size is None. """ raise NotImplementedError def should_recreate_iterator(self, steps_per_epoch): """Returns whether a new iterator should be created every epoch.""" # Only recreate iterator when the data has a fixed length, which will be # fully consumed every epoch, or has a unknown length (dataset, generator) # and will be fully consumed (steps_per_epoch is None) return self.get_size() is not None or steps_per_epoch is None class TensorLikeDataAdapter(DataAdapter): """Adapter that handles Tensor-like objects, e.g. EagerTensor and NumPy.""" @staticmethod def can_handle(x, y=None): # TODO(kaftan): Check performance implications of using a flatten # here for other types of inputs. flat_inputs = nest.flatten(x) if y is not None: flat_inputs += nest.flatten(y) def _is_tensor(v): if isinstance(v, (ops.Tensor, np.ndarray)): return True return False return all(_is_tensor(v) for v in flat_inputs) def __init__(self, x, y=None, sample_weights=None, batch_size=None, epochs=1, steps=None, shuffle=False, **kwargs): super(TensorLikeDataAdapter, self).__init__(x, y, **kwargs) x = _process_numpy_inputs(x) y = _process_numpy_inputs(y) sample_weights = _process_numpy_inputs(sample_weights) # If sample_weights are not specified for an output use 1.0 as weights. if sample_weights is not None and any(w is None for w in sample_weights): weight = next(s for s in sample_weights if s is not None) sample_weights = training_utils.list_to_tuple([ array_ops.ones((weight.shape[0],)) if sw is None else sw for sw in sample_weights ]) if y is not None and sample_weights is not None: inputs = (x, y, sample_weights) elif y is not None: # Sample weight is only needed for training, so if y is None, then # sample_weight is ignored. inputs = (x, y) else: inputs = (x,) num_samples = int(nest.flatten(x)[0].shape[0]) # If batch_size is not passed but steps is, calculate from the input data. if steps and not batch_size: batch_size = int(math.ceil(num_samples / steps)) if not batch_size: raise ValueError( "`batch_size` or `steps` is required for `Tensor` or `NumPy`" " input data.") self._size = int(math.ceil(num_samples / batch_size)) self._batch_size = batch_size self._has_partial_batch = (self._size != (num_samples // batch_size)) self._partial_batch_size = None if self._has_partial_batch: self._partial_batch_size = ( num_samples - (self._size - 1) * self._batch_size) # Vectorized version of shuffle. # This is a performance improvement over using `from_tensor_slices`. # The indices of the data are shuffled and batched, and these indices # are then zipped with the data and used to extract a batch of the data # at each step. The performance improvements here come from: # 1. vectorized batch using gather # 2. parallelized map # 3. vectorized shuffle by using reshape and unbatch # 4. disabled static optimizations indices_list = [] for _ in range(epochs): indices = np.arange(num_samples) if shuffle: np.random.shuffle(indices) full_batch_indices = np.reshape( indices[:(num_samples // batch_size) * batch_size], [-1, batch_size]) partial_batch_indices = indices[(num_samples // batch_size) * batch_size:] epoch_indices_ds = dataset_ops.DatasetV2.from_tensors( full_batch_indices).unbatch() if partial_batch_indices.size: epoch_indices_ds = epoch_indices_ds.concatenate( dataset_ops.DatasetV2.from_tensors(partial_batch_indices)) indices_list.append(epoch_indices_ds) indices_ds = dataset_ops.DatasetV2.from_tensor_slices( indices_list).flat_map(lambda x: x) data_ds = dataset_ops.DatasetV2.from_tensors(inputs).repeat() dataset = dataset_ops.DatasetV2.zip((data_ds, indices_ds)) def _nested_grab_batch(data, indices): """Grabs batches of Tensors in `data` based on `indices`.""" def _grab_batch(x): """Grabs a batch of `x`.""" x_batch = array_ops.gather(x, indices) x_shape = x.shape.as_list() if not self._has_partial_batch: # Recover the batch shape info. x_shape[0] = self._batch_size x_batch.set_shape(x_shape) elif self._partial_batch_size >= num_samples: # Only one batch per epoch. x_shape[0] = self._partial_batch_size x_batch.set_shape(x_shape) return x_batch return nest.map_structure(_grab_batch, data) dataset = dataset.map( _nested_grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE) # Default optimizations are disabled to avoid the overhead of (unnecessary) # input pipeline graph serialization and deserialization options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False dataset = dataset.with_options(options) self._dataset = dataset def get_dataset(self): return self._dataset def get_size(self): return self._size def batch_size(self): return self._batch_size def has_partial_batch(self): return self._has_partial_batch def partial_batch_size(self): return self._partial_batch_size def should_recreate_iterator(self, _): # An infinite dataset is always created here. return False class CompositeTensorDataAdapter(DataAdapter): """Adapter that handles Tensor-like objects, e.g. EagerTensor and NumPy.""" @staticmethod def can_handle(x, y=None): flat_inputs = nest.flatten(x) if y is not None: flat_inputs += nest.flatten(y) def _is_composite(v): # Dataset inherits from CompositeTensor but shouldn't be handled here. if (isinstance(v, composite_tensor.CompositeTensor) and not isinstance(v, dataset_ops.DatasetV2)): return True return False def _is_tensor_or_composite(v): if isinstance(v, (ops.Tensor, np.ndarray)): return True return _is_composite(v) return (any(_is_composite(v) for v in flat_inputs) and all(_is_tensor_or_composite(v) for v in flat_inputs)) def __init__(self, x, y=None, sample_weights=None, batch_size=None, steps=None, shuffle=False, **kwargs): super(CompositeTensorDataAdapter, self).__init__(x, y, **kwargs) x = _process_numpy_inputs(x) y = _process_numpy_inputs(y) sample_weights = _process_numpy_inputs(sample_weights) # If sample_weights are not specified for an output use 1.0 as weights. if (sample_weights is not None and any([sw is None for sw in sample_weights])): weight = next(s for s in sample_weights if s is not None) sample_weights = training_utils.list_to_tuple([ array_ops.ones((weight.shape[0],)) if sw is None else sw for sw in sample_weights ]) if y is not None and sample_weights is not None: inputs = (x, y, sample_weights) elif y is not None: # Sample weight is only needed for training, so if y is None, then # sample_weight is ignored. inputs = (x, y) else: inputs = (x,) dataset = dataset_ops.DatasetV2.from_tensor_slices(inputs) num_samples = int(nest.flatten(x)[0].shape[0]) if shuffle: dataset = dataset.shuffle(num_samples) # If batch_size is not passed but steps is, calculate from the input data. if steps and not batch_size: batch_size = int(math.ceil(num_samples/steps)) if not batch_size: raise ValueError( "`batch_size` or `steps` is required for `Tensor` or `NumPy`" " input data.") dataset = dataset.batch(batch_size) self._size = int(math.ceil(num_samples / batch_size)) self._batch_size = batch_size self._has_partial_batch = (self._size != (num_samples // batch_size)) self._partial_batch_size = None if self._has_partial_batch: self._partial_batch_size = ( num_samples - (self._size - 1) * self._batch_size) self._dataset = dataset def get_dataset(self): return self._dataset def get_size(self): return self._size def batch_size(self): return self._batch_size def has_partial_batch(self): return self._has_partial_batch def partial_batch_size(self): return self._partial_batch_size class ListsOfScalarsDataAdapter(DataAdapter): """Adapter that handles lists of scalars and lists of lists of scalars.""" @staticmethod def can_handle(x, y=None): handles_x = ListsOfScalarsDataAdapter._is_list_of_scalars(x) handles_y = True if y is not None: handles_y = ListsOfScalarsDataAdapter._is_list_of_scalars(y) return handles_x and handles_y @staticmethod def _is_list_of_scalars(inp): if isinstance(inp, (float, int, str)): return True if isinstance(inp, (list, tuple)): return ListsOfScalarsDataAdapter._is_list_of_scalars(inp[0]) return False def __init__( self, x, y=None, sample_weights=None, batch_size=None, shuffle=False, **kwargs): super(ListsOfScalarsDataAdapter, self).__init__(x, y, **kwargs) x = np.asarray(x) if y is not None: y = np.asarray(y) if sample_weights is not None: sample_weights = np.asarray(sample_weights) self._internal_adapter = TensorLikeDataAdapter( x, y=y, sample_weights=sample_weights, batch_size=batch_size, shuffle=shuffle, **kwargs) def get_dataset(self): return self._internal_adapter.get_dataset() def get_size(self): return self._internal_adapter.get_size() def batch_size(self): return self._internal_adapter.batch_size() def has_partial_batch(self): return self._internal_adapter.has_partial_batch() def partial_batch_size(self): return self._internal_adapter.partial_batch_size() class DatasetAdapter(DataAdapter): """Adapter that handles `tf.data.Dataset`.""" @staticmethod def can_handle(x, y=None): return isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)) def __init__(self, x, y=None, sample_weights=None, **kwargs): super(DatasetAdapter, self).__init__(x, y, **kwargs) if not is_none_or_empty(y): raise ValueError("`y` argument is not supported when using " "dataset as input.") if not is_none_or_empty(sample_weights): raise ValueError("`sample_weight` argument is not supported when using " "dataset as input.") # Note that the dataset instance is immutable, its fine to reusing the user # provided dataset. self._dataset = x def get_dataset(self): return self._dataset def get_size(self): # The size of dataset is unknown, unless its fully consumed. return None def batch_size(self): return None def has_partial_batch(self): return False def partial_batch_size(self): return None class GeneratorDataAdapter(DataAdapter): """Adapter that handles python generator.""" @staticmethod def can_handle(x, y=None): return tf_inspect.isgenerator(x) def __init__(self, x, y=None, sample_weights=None, workers=1, use_multiprocessing=False, max_queue_size=10, **kwargs): super(GeneratorDataAdapter, self).__init__(x, y, **kwargs) if not is_none_or_empty(y): raise ValueError("`y` argument is not supported when using " "python generator as input.") if not is_none_or_empty(sample_weights): raise ValueError("`sample_weight` argument is not supported when using " "python generator as input.") # Since we have to know the dtype of the python generator when we build the # dataset, we have to take a peek for the python generator first. Since the # peeked data cannot be push back to generator, we create a new generator by # adding the peeked data at head. peek = next(x) nested_dtypes = nest.map_structure(lambda t: t.dtype, peek) nested_shape = nest.map_structure(lambda t: t.shape, peek) # Note that dataset API takes a callable that creates a generator object, # rather than generator itself, which is why we define a function here. if workers > 0: if use_multiprocessing: logging.warning( UserWarning("Using a generator with `use_multiprocessing=True` " "and multiple workers may duplicate your data. " "Please consider using the `tf.data.Dataset`.")) def generator_fn(): enqueuer = data_utils.GeneratorEnqueuer( itertools.chain([peek], x), use_multiprocessing=use_multiprocessing) enqueuer.start(workers=workers, max_queue_size=max_queue_size) return enqueuer.get() else: def generator_fn(): return itertools.chain([peek], x) self._batch_size = int(nest.flatten(peek)[0].shape[0]) self._dataset = dataset_ops.DatasetV2.from_generator( generator_fn, nested_dtypes, output_shapes=nested_shape) def get_dataset(self): return self._dataset def get_size(self): return None def batch_size(self): return self._batch_size def has_partial_batch(self): return False def partial_batch_size(self): return None class KerasSequenceAdapter(DataAdapter): """Adapter that handles `keras.utils.Sequence`.""" @staticmethod def can_handle(x, y=None): return isinstance(x, data_utils.Sequence) def __init__(self, x, y=None, sample_weights=None, shuffle=False, workers=1, use_multiprocessing=False, max_queue_size=10, **kwargs): super(KerasSequenceAdapter, self).__init__(x, y, **kwargs) if not is_none_or_empty(y): raise ValueError("`y` argument is not supported when using " "`keras.utils.Sequence` as input.") if not is_none_or_empty(sample_weights): raise ValueError("`sample_weight` argument is not supported when using " "`keras.utils.Sequence` as input.") peek = x[0] nested_dtypes = nest.map_structure(lambda t: t.dtype, peek) nested_shape = nest.map_structure(lambda t: t.shape, peek) if workers > 0: def generator_fn(): enqueuer = data_utils.OrderedEnqueuer( x, use_multiprocessing=use_multiprocessing) enqueuer.start(workers=workers, max_queue_size=max_queue_size) return enqueuer.get() else: def generator_fn(): for i in range(len(x)): yield x[i] dataset = dataset_ops.DatasetV2.from_generator(generator_fn, nested_dtypes, output_shapes=nested_shape) if shuffle: dataset = dataset.shuffle(len(x)) self._dataset = dataset self._size = len(x) self._batch_size = int(nest.flatten(peek)[0].shape[0]) def get_dataset(self): return self._dataset def get_size(self): return self._size def batch_size(self): return self._batch_size def has_partial_batch(self): return False def partial_batch_size(self): return None ALL_ADAPTER_CLS = [ ListsOfScalarsDataAdapter, TensorLikeDataAdapter, DatasetAdapter, GeneratorDataAdapter, KerasSequenceAdapter, CompositeTensorDataAdapter ] def select_data_adapter(x, y): """Selects a data adapter than can handle a given x and y.""" adapter_cls = [cls for cls in ALL_ADAPTER_CLS if cls.can_handle(x, y)] if not adapter_cls: # TODO(scottzhu): This should be a less implementation-specific error. raise ValueError( "Failed to find data adapter that can handle " "input: {}, {}".format( _type_name(x), _type_name(y))) elif len(adapter_cls) > 1: raise RuntimeError( "Data adapters should be mutually exclusive for " "handling inputs. Found multiple adapters {} to handle " "input: {}, {}".format( adapter_cls, _type_name(x), _type_name(y))) return adapter_cls[0] def _type_name(x): """Generates a description of the type of an object.""" if isinstance(x, dict): key_types = set(_type_name(key) for key in x.keys()) val_types = set(_type_name(key) for key in x.values()) return "({} containing {} keys and {} values)".format( type(x), key_types, val_types) if isinstance(x, (list, tuple)): types = set(_type_name(val) for val in x) return "({} containing values of types {})".format( type(x), types) return str(type(x)) def _process_numpy_inputs(inputs): """Process numpy array inputs. For numpy inputs, it is possible to be single numpy array, or list/dict of them. They could also be preprocessed by other lib to match with the order of position for the model. The result here should be something that can be used to build dataset. Args: inputs: single or list/tuple/dict of numpy array. Returns: numpy arrays can be used to build dataset. """ if is_none_or_empty(inputs): return None flat_inputs = nest.flatten(inputs) if len(flat_inputs) == 1: return flat_inputs[0] def _convert_non_tensor(x): # Don't call `ops.convert_to_tensor` on all `inputs` because # `SparseTensors` can't be converted to `Tensor`. if isinstance(x, np.ndarray): return ops.convert_to_tensor(x) return x inputs = nest.map_structure(_convert_non_tensor, inputs) # For more complicated structure, we only convert the out most list to tuple # since dataset will stack the list, but treat elements in the tuple as # individual element. return training_utils.list_to_tuple(inputs) def is_none_or_empty(inputs): # util method to check if the input is a None or a empty list. # the python "not" check will raise an error like below if the input is a # numpy array # "The truth value of an array with more than one element is ambiguous. # Use a.any() or a.all()" return inputs is None or not nest.flatten(inputs)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/data_adapter.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #,============================================================================ """Tests for layer graphs construction & handling.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import input_layer as input_layer_lib from tensorflow.python.keras.engine import network as network_lib from tensorflow.python.keras.engine import training from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.platform import test class NetworkConstructionTest(keras_parameterized.TestCase): @test_util.run_deprecated_v1 def test_get_updates(self): class MyLayer(keras.layers.Layer): def build(self, input_shape): self.a = self.add_variable('a', (1, 1), 'float32', trainable=False) self.b = self.add_variable('b', (1, 1), 'float32', trainable=False) self.add_update(state_ops.assign_add(self.a, [[1.]], name='unconditional_update')) self.built = True def call(self, inputs): self.add_update(state_ops.assign_add(self.b, inputs, name='conditional_update'), inputs=True) return inputs + 1 x1 = input_layer_lib.Input(shape=(1,)) layer = MyLayer() _ = layer(x1) self.assertEqual(len(layer.updates), 2) self.assertEqual(len(layer.get_updates_for(x1)), 1) self.assertEqual(len(layer.get_updates_for(None)), 1) x2 = input_layer_lib.Input(shape=(1,)) y2 = layer(x2) self.assertEqual(len(layer.updates), 3) self.assertEqual(len(layer.get_updates_for(x1)), 1) self.assertEqual(len(layer.get_updates_for(x2)), 1) self.assertEqual(len(layer.get_updates_for(None)), 1) network = network_lib.Network(x2, y2) self.assertEqual(len(network.updates), 3) self.assertEqual(len(network.get_updates_for(x2)), 1) self.assertEqual(len(network.get_updates_for(None)), 1) x3 = input_layer_lib.Input(shape=(1,)) _ = layer(x3) self.assertEqual(len(network.updates), 4) x4 = input_layer_lib.Input(shape=(1,)) _ = network(x4) self.assertEqual(len(network.updates), 5) self.assertEqual(len(network.get_updates_for(x2)), 1) self.assertEqual(len(network.get_updates_for(x4)), 1) self.assertEqual(len(network.get_updates_for(None)), 1) network.add_update(state_ops.assign_add(layer.a, [[1]])) self.assertEqual(len(network.updates), 6) self.assertEqual(len(network.get_updates_for(None)), 2) network.add_update(state_ops.assign_add(layer.b, x4), inputs=True) self.assertEqual(len(network.updates), 7) self.assertEqual(len(network.get_updates_for(x4)), 2) @test_util.run_in_graph_and_eager_modes() def test_get_updates_bn(self): x1 = input_layer_lib.Input(shape=(1,)) layer = keras.layers.BatchNormalization() _ = layer(x1) self.assertEqual(len(layer.updates), 2) self.assertEqual(len(layer.get_updates_for(x1)), 2) self.assertEqual(len(layer.get_updates_for(None)), 0) @test_util.run_deprecated_v1 def test_get_losses(self): class MyLayer(keras.layers.Layer): def build(self, input_shape): self.a = self.add_variable('a', (1, 1), 'float32', trainable=False) self.b = self.add_variable('b', (1, 1), 'float32', trainable=False) self.add_loss(math_ops.reduce_sum(self.a)) self.built = True def call(self, inputs): self.add_loss(math_ops.reduce_sum(inputs), inputs=True) return inputs + 1 x1 = input_layer_lib.Input(shape=(1,)) layer = MyLayer() _ = layer(x1) self.assertEqual(len(layer.losses), 2) self.assertEqual(len(layer.get_losses_for(x1)), 1) self.assertEqual(len(layer.get_losses_for(None)), 1) x2 = input_layer_lib.Input(shape=(1,)) y2 = layer(x2) self.assertEqual(len(layer.losses), 3) self.assertEqual(len(layer.get_losses_for(x1)), 1) self.assertEqual(len(layer.get_losses_for(x2)), 1) self.assertEqual(len(layer.get_losses_for(None)), 1) network = network_lib.Network(x2, y2) self.assertEqual(len(network.losses), 3) self.assertEqual(len(network.get_losses_for(x1)), 1) self.assertEqual(len(network.get_losses_for(x2)), 1) self.assertEqual(len(network.get_losses_for(None)), 1) x3 = input_layer_lib.Input(shape=(1,)) _ = layer(x3) self.assertEqual(len(network.losses), 4) x4 = input_layer_lib.Input(shape=(1,)) _ = network(x4) self.assertEqual(len(network.losses), 5) self.assertEqual(len(network.get_losses_for(x2)), 1) self.assertEqual(len(network.get_losses_for(x4)), 1) self.assertEqual(len(network.get_losses_for(None)), 1) @test_util.run_in_graph_and_eager_modes() def testTopologicalAttributes(self): # test layer attributes / methods related to cross-layer connectivity. a = input_layer_lib.Input(shape=(32,), name='input_a') b = input_layer_lib.Input(shape=(32,), name='input_b') # test input, output, input_shape, output_shape test_layer = keras.layers.Dense(16, name='test_layer') a_test = test_layer(a) self.assertIs(test_layer.input, a) self.assertIs(test_layer.output, a_test) self.assertEqual(test_layer.input_shape, (None, 32)) self.assertEqual(test_layer.output_shape, (None, 16)) # test `get_*_at` methods dense = keras.layers.Dense(16, name='dense_1') a_2 = dense(a) b_2 = dense(b) self.assertIs(dense.get_input_at(0), a) self.assertIs(dense.get_input_at(1), b) self.assertIs(dense.get_output_at(0), a_2) self.assertIs(dense.get_output_at(1), b_2) self.assertEqual(dense.get_input_shape_at(0), (None, 32)) self.assertEqual(dense.get_input_shape_at(1), (None, 32)) self.assertEqual(dense.get_output_shape_at(0), (None, 16)) self.assertEqual(dense.get_output_shape_at(1), (None, 16)) # Test invalid value for attribute retrieval. with self.assertRaises(ValueError): dense.get_input_at(2) with self.assertRaises(AttributeError): new_dense = keras.layers.Dense(16) _ = new_dense.input with self.assertRaises(AttributeError): new_dense = keras.layers.Dense(16) _ = new_dense.output with self.assertRaises(AttributeError): new_dense = keras.layers.Dense(16) _ = new_dense.output_shape with self.assertRaises(AttributeError): new_dense = keras.layers.Dense(16) _ = new_dense.input_shape with self.assertRaises(AttributeError): new_dense = keras.layers.Dense(16) a = input_layer_lib.Input(shape=(3, 32)) a = input_layer_lib.Input(shape=(5, 32)) a_2 = dense(a) b_2 = dense(b) _ = new_dense.input_shape with self.assertRaises(AttributeError): new_dense = keras.layers.Dense(16) a = input_layer_lib.Input(shape=(3, 32)) a = input_layer_lib.Input(shape=(5, 32)) a_2 = dense(a) b_2 = dense(b) _ = new_dense.output_shape def _assertAllIs(self, a, b): self.assertTrue(all(x is y for x, y in zip(a, b))) @test_util.run_in_graph_and_eager_modes() def testTopologicalAttributesMultiOutputLayer(self): class PowersLayer(keras.layers.Layer): def call(self, inputs): return [inputs**2, inputs**3] x = input_layer_lib.Input(shape=(32,)) test_layer = PowersLayer() p1, p2 = test_layer(x) # pylint: disable=not-callable self.assertIs(test_layer.input, x) self._assertAllIs(test_layer.output, [p1, p2]) self.assertEqual(test_layer.input_shape, (None, 32)) self.assertEqual(test_layer.output_shape, [(None, 32), (None, 32)]) @test_util.run_in_graph_and_eager_modes() def testTopologicalAttributesMultiInputLayer(self): class AddLayer(keras.layers.Layer): def call(self, inputs): assert len(inputs) == 2 return inputs[0] + inputs[1] a = input_layer_lib.Input(shape=(32,)) b = input_layer_lib.Input(shape=(32,)) test_layer = AddLayer() y = test_layer([a, b]) # pylint: disable=not-callable self._assertAllIs(test_layer.input, [a, b]) self.assertIs(test_layer.output, y) self.assertEqual(test_layer.input_shape, [(None, 32), (None, 32)]) self.assertEqual(test_layer.output_shape, (None, 32)) @test_util.run_deprecated_v1 def testBasicNetwork(self): # minimum viable network x = input_layer_lib.Input(shape=(32,)) dense = keras.layers.Dense(2) y = dense(x) network = network_lib.Network(x, y, name='dense_network') # test basic attributes self.assertEqual(network.name, 'dense_network') self.assertEqual(len(network.layers), 2) # InputLayer + Dense self.assertEqual(network.layers[1], dense) self._assertAllIs(network.weights, dense.weights) self._assertAllIs(network.trainable_weights, dense.trainable_weights) self._assertAllIs(network.non_trainable_weights, dense.non_trainable_weights) # test callability on Input x_2 = input_layer_lib.Input(shape=(32,)) y_2 = network(x_2) self.assertEqual(y_2.shape.as_list(), [None, 2]) # test callability on regular tensor x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32)) y_2 = network(x_2) self.assertEqual(y_2.shape.as_list(), [None, 2]) # test network `trainable` attribute network.trainable = False self._assertAllIs(network.weights, dense.weights) self.assertEqual(network.trainable_weights, []) self._assertAllIs(network.non_trainable_weights, dense.trainable_weights + dense.non_trainable_weights) @test_util.run_in_graph_and_eager_modes def test_trainable_weights(self): a = keras.layers.Input(shape=(2,)) b = keras.layers.Dense(1)(a) model = keras.models.Model(a, b) weights = model.weights self._assertAllIs(model.trainable_weights, weights) self.assertListEqual(model.non_trainable_weights, []) model.trainable = False self.assertListEqual(model.trainable_weights, []) self._assertAllIs(model.non_trainable_weights, weights) model.trainable = True self._assertAllIs(model.trainable_weights, weights) self.assertListEqual(model.non_trainable_weights, []) model.layers[1].trainable = False self.assertListEqual(model.trainable_weights, []) self._assertAllIs(model.non_trainable_weights, weights) # sequential model model = keras.models.Sequential() model.add(keras.layers.Dense(1, input_dim=2)) weights = model.weights self._assertAllIs(model.trainable_weights, weights) self.assertListEqual(model.non_trainable_weights, []) model.trainable = False self.assertListEqual(model.trainable_weights, []) self._assertAllIs(model.non_trainable_weights, weights) model.trainable = True self._assertAllIs(model.trainable_weights, weights) self.assertListEqual(model.non_trainable_weights, []) model.layers[0].trainable = False self.assertListEqual(model.trainable_weights, []) self._assertAllIs(model.non_trainable_weights, weights) @test_util.run_deprecated_v1 def test_layer_call_arguments(self): # Test the ability to pass and serialize arguments to `call`. inp = keras.layers.Input(shape=(2,)) x = keras.layers.Dense(3)(inp) x = keras.layers.Dropout(0.5)(x, training=True) model = keras.models.Model(inp, x) # Would be `dropout/cond/Merge` by default self.assertTrue(model.output.op.name.endswith('dropout/mul_1')) # Test that argument is kept when applying the model inp2 = keras.layers.Input(shape=(2,)) out2 = model(inp2) self.assertTrue(out2.op.name.endswith('dropout/mul_1')) # Test that argument is kept after loading a model config = model.get_config() model = keras.models.Model.from_config(config) self.assertTrue(model.output.op.name.endswith('dropout/mul_1')) def test_node_construction(self): # test basics a = keras.layers.Input(shape=(32,), name='input_a') b = keras.layers.Input(shape=(32,), name='input_b') with self.assertRaises(ValueError): _ = keras.layers.Input(shape=(32,), batch_shape=(10, 32)) with self.assertRaises(ValueError): _ = keras.layers.Input(shape=(32,), unknown_kwarg=None) self.assertListEqual(a.shape.as_list(), [None, 32]) a_layer, a_node_index, a_tensor_index = a._keras_history b_layer, _, _ = b._keras_history self.assertEqual(len(a_layer._inbound_nodes), 1) self.assertEqual(a_tensor_index, 0) node = a_layer._inbound_nodes[a_node_index] self.assertEqual(node.outbound_layer, a_layer) self.assertListEqual(node.inbound_layers, []) self.assertListEqual(node.input_tensors, [a]) self.assertListEqual(node.input_shapes, [(None, 32)]) self.assertListEqual(node.output_tensors, [a]) self.assertListEqual(node.output_shapes, [(None, 32)]) dense = keras.layers.Dense(16, name='dense_1') a_2 = dense(a) b_2 = dense(b) self.assertEqual(len(dense._inbound_nodes), 2) self.assertEqual(len(dense._outbound_nodes), 0) self.assertEqual(dense._inbound_nodes[0].inbound_layers, a_layer) self.assertEqual(dense._inbound_nodes[0].outbound_layer, dense) self.assertEqual(dense._inbound_nodes[1].inbound_layers, b_layer) self.assertEqual(dense._inbound_nodes[1].outbound_layer, dense) self.assertIs(dense._inbound_nodes[0].input_tensors, a) self.assertIs(dense._inbound_nodes[1].input_tensors, b) # test layer properties test_layer = keras.layers.Dense(16, name='test_layer') a_test = test_layer(a) self.assertListEqual(test_layer.kernel.shape.as_list(), [32, 16]) self.assertIs(test_layer.input, a) self.assertIs(test_layer.output, a_test) self.assertEqual(test_layer.input_shape, (None, 32)) self.assertEqual(test_layer.output_shape, (None, 16)) self.assertIs(dense.get_input_at(0), a) self.assertIs(dense.get_input_at(1), b) self.assertIs(dense.get_output_at(0), a_2) self.assertIs(dense.get_output_at(1), b_2) self.assertEqual(dense.get_input_shape_at(0), (None, 32)) self.assertEqual(dense.get_input_shape_at(1), (None, 32)) self.assertEqual(dense.get_output_shape_at(0), (None, 16)) self.assertEqual(dense.get_output_shape_at(1), (None, 16)) self.assertEqual(dense.get_input_mask_at(0), None) self.assertEqual(dense.get_input_mask_at(1), None) self.assertEqual(dense.get_output_mask_at(0), None) self.assertEqual(dense.get_output_mask_at(1), None) @test_util.run_in_graph_and_eager_modes() def test_multi_input_layer(self): with self.cached_session(): # test multi-input layer a = keras.layers.Input(shape=(32,), name='input_a') b = keras.layers.Input(shape=(32,), name='input_b') dense = keras.layers.Dense(16, name='dense_1') a_2 = dense(a) b_2 = dense(b) merged = keras.layers.concatenate([a_2, b_2], name='merge') self.assertListEqual(merged.shape.as_list(), [None, 16 * 2]) merge_layer, merge_node_index, merge_tensor_index = merged._keras_history self.assertEqual(merge_node_index, 0) self.assertEqual(merge_tensor_index, 0) self.assertEqual(len(merge_layer._inbound_nodes), 1) self.assertEqual(len(merge_layer._outbound_nodes), 0) self.assertEqual(len(merge_layer._inbound_nodes[0].input_tensors), 2) self.assertEqual(len(merge_layer._inbound_nodes[0].inbound_layers), 2) c = keras.layers.Dense(64, name='dense_2')(merged) d = keras.layers.Dense(5, name='dense_3')(c) model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model') self.assertEqual(len(model.layers), 6) output_shapes = model.compute_output_shape([(None, 32), (None, 32)]) self.assertListEqual(output_shapes[0].as_list(), [None, 64]) self.assertListEqual(output_shapes[1].as_list(), [None, 5]) self.assertListEqual( model.compute_mask([a, b], [None, None]), [None, None]) # we don't check names of first 2 layers (inputs) because # ordering of same-level layers is not fixed self.assertListEqual([l.name for l in model.layers][2:], ['dense_1', 'merge', 'dense_2', 'dense_3']) self.assertListEqual([l.name for l in model._input_layers], ['input_a', 'input_b']) self.assertListEqual([l.name for l in model._output_layers], ['dense_2', 'dense_3']) # actually run model fn = keras.backend.function(model.inputs, model.outputs) input_a_np = np.random.random((10, 32)) input_b_np = np.random.random((10, 32)) fn_outputs = fn([input_a_np, input_b_np]) self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)]) # test get_source_inputs self._assertAllIs(keras.engine.get_source_inputs(c), [a, b]) # serialization / deserialization json_config = model.to_json() recreated_model = keras.models.model_from_json(json_config) recreated_model.compile('rmsprop', 'mse') self.assertListEqual([l.name for l in recreated_model.layers][2:], ['dense_1', 'merge', 'dense_2', 'dense_3']) self.assertListEqual([l.name for l in recreated_model._input_layers], ['input_a', 'input_b']) self.assertListEqual([l.name for l in recreated_model._output_layers], ['dense_2', 'dense_3']) fn = keras.backend.function(recreated_model.inputs, recreated_model.outputs) input_a_np = np.random.random((10, 32)) input_b_np = np.random.random((10, 32)) fn_outputs = fn([input_a_np, input_b_np]) self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)]) def test_multi_output_layer_output_names(self): inp = keras.layers.Input(name='inp', shape=(None,), dtype=dtypes.float32) class _MultiOutput(keras.layers.Layer): def call(self, x): return x + 1., x + 2. out = _MultiOutput(name='out')(inp) model = keras.models.Model(inp, out) self.assertEqual(['out', 'out_1'], model.output_names) self.assertAllClose([[[2.]], [[3.]]], model.predict(np.ones(1))) @test_util.run_deprecated_v1 def test_recursion(self): with self.cached_session(): a = keras.layers.Input(shape=(32,), name='input_a') b = keras.layers.Input(shape=(32,), name='input_b') dense = keras.layers.Dense(16, name='dense_1') a_2 = dense(a) b_2 = dense(b) merged = keras.layers.concatenate([a_2, b_2], name='merge') c = keras.layers.Dense(64, name='dense_2')(merged) d = keras.layers.Dense(5, name='dense_3')(c) model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model') e = keras.layers.Input(shape=(32,), name='input_e') f = keras.layers.Input(shape=(32,), name='input_f') self.assertEqual(len(model.inputs), 2) g, h = model([e, f]) self.assertEqual(len(model.inputs), 2) self.assertEqual(g.name, 'model/dense_2/BiasAdd:0') self.assertListEqual(g.shape.as_list(), c.shape.as_list()) self.assertListEqual(h.shape.as_list(), d.shape.as_list()) # test separate manipulation of different layer outputs i = keras.layers.Dense(7, name='dense_4')(h) final_model = keras.models.Model( inputs=[e, f], outputs=[i, g], name='final') self.assertEqual(len(final_model.inputs), 2) self.assertEqual(len(final_model.outputs), 2) self.assertEqual(len(final_model.layers), 4) # we don't check names of first 2 layers (inputs) because # ordering of same-level layers is not fixed self.assertListEqual([layer.name for layer in final_model.layers][2:], ['model', 'dense_4']) self.assertListEqual( model.compute_mask([e, f], [None, None]), [None, None]) self.assertListEqual( final_model.compute_output_shape([(10, 32), (10, 32)]), [(10, 7), (10, 64)]) # run recursive model fn = keras.backend.function(final_model.inputs, final_model.outputs) input_a_np = np.random.random((10, 32)) input_b_np = np.random.random((10, 32)) fn_outputs = fn([input_a_np, input_b_np]) self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)]) # test serialization model_config = final_model.get_config() recreated_model = keras.models.Model.from_config(model_config) fn = keras.backend.function(recreated_model.inputs, recreated_model.outputs) input_a_np = np.random.random((10, 32)) input_b_np = np.random.random((10, 32)) fn_outputs = fn([input_a_np, input_b_np]) self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)]) @test_util.run_in_graph_and_eager_modes() def test_multi_input_multi_output_recursion(self): with self.cached_session(): # test multi-input multi-output a = keras.layers.Input(shape=(32,), name='input_a') b = keras.layers.Input(shape=(32,), name='input_b') dense = keras.layers.Dense(16, name='dense_1') a_2 = dense(a) b_2 = dense(b) merged = keras.layers.concatenate([a_2, b_2], name='merge') c = keras.layers.Dense(64, name='dense_2')(merged) d = keras.layers.Dense(5, name='dense_3')(c) model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model') j = keras.layers.Input(shape=(32,), name='input_j') k = keras.layers.Input(shape=(32,), name='input_k') _, n = model([j, k]) o = keras.layers.Input(shape=(32,), name='input_o') p = keras.layers.Input(shape=(32,), name='input_p') q, _ = model([o, p]) self.assertListEqual(n.shape.as_list(), [None, 5]) self.assertListEqual(q.shape.as_list(), [None, 64]) s = keras.layers.concatenate([n, q], name='merge_nq') self.assertListEqual(s.shape.as_list(), [None, 64 + 5]) # test with single output as 1-elem list multi_io_model = keras.models.Model([j, k, o, p], [s]) fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs) fn_outputs = fn([ np.random.random((10, 32)), np.random.random((10, 32)), np.random.random((10, 32)), np.random.random((10, 32)) ]) self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)]) # test with single output as tensor multi_io_model = keras.models.Model([j, k, o, p], s) fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs) fn_outputs = fn([ np.random.random((10, 32)), np.random.random((10, 32)), np.random.random((10, 32)), np.random.random((10, 32)) ]) # note that the output of the function will still be a 1-elem list self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)]) # test serialization model_config = multi_io_model.get_config() recreated_model = keras.models.Model.from_config(model_config) fn = keras.backend.function(recreated_model.inputs, recreated_model.outputs) fn_outputs = fn([ np.random.random((10, 32)), np.random.random((10, 32)), np.random.random((10, 32)), np.random.random((10, 32)) ]) # note that the output of the function will still be a 1-elem list self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)]) config = model.get_config() keras.models.Model.from_config(config) model.summary() json_str = model.to_json() keras.models.model_from_json(json_str) @test_util.run_in_graph_and_eager_modes() def test_invalid_graphs(self): a = keras.layers.Input(shape=(32,), name='input_a') b = keras.layers.Input(shape=(32,), name='input_b') dense = keras.layers.Dense(16, name='dense_1') a_2 = dense(a) b_2 = dense(b) merged = keras.layers.concatenate([a_2, b_2], name='merge') c = keras.layers.Dense(64, name='dense_2')(merged) d = keras.layers.Dense(5, name='dense_3')(c) model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model') # input is not an Input tensor j = keras.layers.Input(shape=(32,), name='input_j') j = keras.layers.Dense(32)(j) k = keras.layers.Input(shape=(32,), name='input_k') m, n = model([j, k]) with self.assertRaises(Exception): keras.models.Model([j, k], [m, n]) # disconnected graph j = keras.layers.Input(shape=(32,), name='input_j') k = keras.layers.Input(shape=(32,), name='input_k') m, n = model([j, k]) with self.assertRaises(Exception): keras.models.Model([j], [m, n]) # redundant outputs j = keras.layers.Input(shape=(32,), name='input_j') k = keras.layers.Input(shape=(32,), name='input_k') m, n = model([j, k]) keras.models.Model([j, k], [m, n, n]) # redundant inputs j = keras.layers.Input(shape=(32,), name='input_j') k = keras.layers.Input(shape=(32,), name='input_k') m, n = model([j, k]) with self.assertRaises(Exception): keras.models.Model([j, k, j], [m, n]) # i have not idea what I'm doing: garbage as inputs/outputs j = keras.layers.Input(shape=(32,), name='input_j') k = keras.layers.Input(shape=(32,), name='input_k') m, n = model([j, k]) with self.assertRaises(Exception): keras.models.Model([j, k], [m, n, 0]) @test_util.run_deprecated_v1 def test_raw_tf_compatibility(self): # test calling layers/models on TF tensors a = keras.layers.Input(shape=(32,), name='input_a') b = keras.layers.Input(shape=(32,), name='input_b') dense = keras.layers.Dense(16, name='dense_1') a_2 = dense(a) b_2 = dense(b) merged = keras.layers.concatenate([a_2, b_2], name='merge') c = keras.layers.Dense(64, name='dense_2')(merged) d = keras.layers.Dense(5, name='dense_3')(c) model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model') j = keras.layers.Input(shape=(32,), name='input_j') k = keras.layers.Input(shape=(32,), name='input_k') self.assertEqual(len(model.inputs), 2) m, n = model([j, k]) self.assertEqual(len(model.inputs), 2) tf_model = keras.models.Model([j, k], [m, n]) j_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32)) k_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32)) m_tf, n_tf = tf_model([j_tf, k_tf]) self.assertListEqual(m_tf.shape.as_list(), [None, 64]) self.assertListEqual(n_tf.shape.as_list(), [None, 5]) # test merge keras.layers.concatenate([j_tf, k_tf], axis=1) keras.layers.add([j_tf, k_tf]) # test tensor input x = array_ops.placeholder(shape=(None, 2), dtype=dtypes.float32) keras.layers.InputLayer(input_tensor=x) x = keras.layers.Input(tensor=x) keras.layers.Dense(2)(x) @test_util.run_in_graph_and_eager_modes() def test_basic_masking(self): a = keras.layers.Input(shape=(10, 32), name='input_a') b = keras.layers.Masking()(a) model = keras.models.Model(a, b) self.assertEqual(model.output_mask.shape.as_list(), [None, 10]) @test_util.run_deprecated_v1 def testMaskingSingleInput(self): class MaskedLayer(keras.layers.Layer): def call(self, inputs, mask=None): if mask is not None: return inputs * mask return inputs def compute_mask(self, inputs, mask=None): return array_ops.ones_like(inputs) if context.executing_eagerly(): a = constant_op.constant([2] * 32) mask = constant_op.constant([0, 1] * 16) a._keras_mask = mask b = MaskedLayer().apply(a) self.assertTrue(hasattr(b, '_keras_mask')) self.assertAllEqual( self.evaluate(array_ops.ones_like(mask)), self.evaluate(getattr(b, '_keras_mask'))) self.assertAllEqual(self.evaluate(a * mask), self.evaluate(b)) else: x = input_layer_lib.Input(shape=(32,)) y = MaskedLayer()(x) # pylint: disable=not-callable network = network_lib.Network(x, y) # test callability on Input x_2 = input_layer_lib.Input(shape=(32,)) y_2 = network(x_2) self.assertEqual(y_2.shape.as_list(), [None, 32]) # test callability on regular tensor x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32)) y_2 = network(x_2) self.assertEqual(y_2.shape.as_list(), [None, 32]) @test_util.run_deprecated_v1 def test_activity_regularization_with_model_composition(self): def reg(x): return math_ops.reduce_sum(x) net_a_input = input_layer_lib.Input((2,)) net_a = net_a_input net_a = keras.layers.Dense(2, kernel_initializer='ones', use_bias=False, activity_regularizer=reg)(net_a) model_a = keras.Model([net_a_input], [net_a]) net_b_input = input_layer_lib.Input((2,)) net_b = model_a(net_b_input) model_b = keras.Model([net_b_input], [net_b]) model_b.compile(optimizer='sgd', loss=None) x = np.ones((1, 2)) loss = model_b.evaluate(x) self.assertEqual(loss, 4.) @keras_parameterized.run_all_keras_modes def test_layer_sharing_at_heterogenous_depth(self): x_val = np.random.random((10, 5)) x = input_layer_lib.Input(shape=(5,)) a = keras.layers.Dense(5, name='A') b = keras.layers.Dense(5, name='B') output = a(b(a(b(x)))) m = keras.models.Model(x, output) m.run_eagerly = testing_utils.should_run_eagerly() m._experimental_run_tf_function = testing_utils.should_run_tf_function() output_val = m.predict(x_val) config = m.get_config() weights = m.get_weights() m2 = keras.models.Model.from_config(config) m2.set_weights(weights) output_val_2 = m2.predict(x_val) self.assertAllClose(output_val, output_val_2, atol=1e-6) @keras_parameterized.run_all_keras_modes def test_layer_sharing_at_heterogenous_depth_with_concat(self): input_shape = (16, 9, 3) input_layer = input_layer_lib.Input(shape=input_shape) a = keras.layers.Dense(3, name='dense_A') b = keras.layers.Dense(3, name='dense_B') c = keras.layers.Dense(3, name='dense_C') x1 = b(a(input_layer)) x2 = a(c(input_layer)) output = keras.layers.concatenate([x1, x2]) m = keras.models.Model(inputs=input_layer, outputs=output) m.run_eagerly = testing_utils.should_run_eagerly() m._experimental_run_tf_function = testing_utils.should_run_tf_function() x_val = np.random.random((10, 16, 9, 3)) output_val = m.predict(x_val) config = m.get_config() weights = m.get_weights() m2 = keras.models.Model.from_config(config) m2.set_weights(weights) output_val_2 = m2.predict(x_val) self.assertAllClose(output_val, output_val_2, atol=1e-6) @keras_parameterized.run_all_keras_modes def test_explicit_training_argument(self): a = keras.layers.Input(shape=(2,)) b = keras.layers.Dropout(0.5)(a) base_model = keras.models.Model(a, b) a = keras.layers.Input(shape=(2,)) b = base_model(a, training=False) model = keras.models.Model(a, b) x = np.ones((100, 2)) y = np.ones((100, 2)) model.compile( optimizer='sgd', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) loss = model.train_on_batch(x, y) self.assertEqual(loss, 0) # In inference mode, output is equal to input. a = keras.layers.Input(shape=(2,)) b = base_model(a, training=True) model = keras.models.Model(a, b) preds = model.predict(x) self.assertEqual(np.min(preds), 0.) # At least one unit was dropped. @keras_parameterized.run_all_keras_modes def test_mask_derived_from_keras_layer(self): inputs = keras.Input((5, 10)) mask = keras.Input((5,)) outputs = keras.layers.RNN(keras.layers.LSTMCell(100))(inputs, mask=mask) model = keras.Model([inputs, mask], outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) history = model.fit( x=[np.ones((10, 5, 10)), np.zeros((10, 5))], y=np.zeros((10, 100)), batch_size=2) # All data is masked, returned values are 0's. self.assertEqual(history.history['loss'][0], 0.0) history = model.fit( x=[np.ones((10, 5, 10)), np.ones((10, 5))], y=np.zeros((10, 100)), batch_size=2) # Data is not masked, returned values are random. self.assertGreater(history.history['loss'][0], 0.0) model = keras.Model.from_config(model.get_config()) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) history = model.fit( x=[np.ones((10, 5, 10)), np.zeros((10, 5))], y=np.zeros((10, 100)), batch_size=2) # All data is masked, returned values are 0's. self.assertEqual(history.history['loss'][0], 0.0) history = model.fit( x=[np.ones((10, 5, 10)), np.ones((10, 5))], y=np.zeros((10, 100)), batch_size=2) # Data is not masked, returned values are random. self.assertGreater(history.history['loss'][0], 0.0) @keras_parameterized.run_all_keras_modes def test_call_arg_derived_from_keras_layer(self): class MyAdd(keras.layers.Layer): def call(self, x1, x2): return x1 + x2 input1 = keras.Input(10) input2 = keras.Input(10) outputs = MyAdd()(input1, input2) model = keras.Model([input1, input2], outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) history = model.fit( x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))], y=10 * np.ones((10, 10)), batch_size=2) # Check that second input was correctly added to first. self.assertEqual(history.history['loss'][0], 0.0) # Check serialization. model = keras.Model.from_config( model.get_config(), custom_objects={'MyAdd': MyAdd}) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) history = model.fit( x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))], y=10 * np.ones((10, 10)), batch_size=2) # Check that second input was correctly added to first. self.assertEqual(history.history['loss'][0], 0.0) @keras_parameterized.run_all_keras_modes def test_call_kwarg_derived_from_keras_layer(self): class MaybeAdd(keras.layers.Layer): def call(self, x1, x2=None): if x2 is not None: return x1 + x2 return x1 input1 = keras.Input(10) input2 = keras.Input(10) outputs = MaybeAdd()(input1, x2=input2) model = keras.Model([input1, input2], outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) history = model.fit( x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))], y=10 * np.ones((10, 10)), batch_size=2) # Check that second input was correctly added to first. self.assertEqual(history.history['loss'][0], 0.0) model = keras.Model.from_config( model.get_config(), custom_objects={'MaybeAdd': MaybeAdd}) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) history = model.fit( x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))], y=10 * np.ones((10, 10)), batch_size=2) # Check that second input was correctly added to first. self.assertEqual(history.history['loss'][0], 0.0) @keras_parameterized.run_all_keras_modes def test_call_nested_arg_derived_from_keras_layer(self): class AddAll(keras.layers.Layer): def call(self, x1, x2, x3=None): out = x1 + x2 if x3 is not None: for t in x3.values(): out += t return out input1 = keras.Input(10) input2 = keras.Input(10) input3 = keras.Input(10) outputs = AddAll()( input1, 4 * array_ops.ones((1, 10)), x3={ 'a': input2, 'b': input3, 'c': 5 * array_ops.ones((1, 10)) }) model = keras.Model([input1, input2, input3], outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) history = model.fit( x=[np.ones((10, 10)), 2 * np.ones((10, 10)), 3 * np.ones((10, 10))], y=15 * np.ones((10, 10)), batch_size=2) # Check that all inputs were correctly added. self.assertEqual(history.history['loss'][0], 0.0) model = keras.Model.from_config( model.get_config(), custom_objects={'AddAll': AddAll}) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) history = model.fit( x=[np.ones((10, 10)), 2 * np.ones((10, 10)), 3 * np.ones((10, 10))], y=15 * np.ones((10, 10)), batch_size=2) # Check that all inputs were correctly added. self.assertEqual(history.history['loss'][0], 0.0) @keras_parameterized.run_all_keras_modes def test_multi_output_model_with_none_masking(self): def func(x): return [x * 0.2, x * 0.3] def output_shape(input_shape): return [input_shape, input_shape] i = keras.layers.Input(shape=(3, 2, 1)) o = keras.layers.Lambda(function=func, output_shape=output_shape)(i) self.assertEqual(keras.backend.int_shape(o[0]), (None, 3, 2, 1)) self.assertEqual(keras.backend.int_shape(o[1]), (None, 3, 2, 1)) o = keras.layers.add(o) model = keras.Model(i, o) model.run_eagerly = testing_utils.should_run_eagerly() model._experimental_run_tf_function = testing_utils.should_run_tf_function() i2 = keras.layers.Input(shape=(3, 2, 1)) o2 = model(i2) model2 = keras.Model(i2, o2) model2.run_eagerly = testing_utils.should_run_eagerly() model2._experimental_run_tf_function = testing_utils.should_run_tf_function( ) x = np.random.random((4, 3, 2, 1)) out = model2.predict(x) assert out.shape == (4, 3, 2, 1) self.assertAllClose(out, x * 0.2 + x * 0.3, atol=1e-4) @keras_parameterized.run_all_keras_modes def test_constant_initializer_with_numpy(self): initializer = keras.initializers.Constant(np.ones((3, 2))) model = keras.models.Sequential() model.add( keras.layers.Dense(2, input_shape=(3,), kernel_initializer=initializer)) model.add(keras.layers.Dense(3)) model.compile( loss='mse', optimizer='sgd', metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) json_str = model.to_json() keras.models.model_from_json(json_str) def test_subclassed_error_if_init_not_called(self): class MyNetwork(network_lib.Network): def __init__(self): self._foo = [keras.layers.Dense(10), keras.layers.Dense(10)] with self.assertRaisesRegexp(RuntimeError, 'forgot to call'): MyNetwork() @test_util.run_in_graph_and_eager_modes() def test_int_input_shape(self): inputs = keras.Input(10) self.assertEqual([None, 10], inputs.shape.as_list()) inputs_with_batch = keras.Input(batch_size=20, shape=5) self.assertEqual([20, 5], inputs_with_batch.shape.as_list()) @test_util.run_in_graph_and_eager_modes() def test_model_initialization(self): # Functional model inputs = input_layer_lib.Input(shape=(32,)) outputs = keras.layers.Dense(4)(inputs) with self.assertRaisesRegexp(TypeError, 'unexpected argument'): model = training.Model(inputs, outputs, name='m', trainable=False, dtype='int64') with self.assertRaisesRegexp(TypeError, 'unexpected argument'): model = training.Model(inputs, outputs, name='m', trainable=False, dynamic=False) model = training.Model(inputs, outputs, name='m', trainable=False) self.assertEqual('m', model.name) self.assertFalse(model.trainable) self.assertFalse(model.dynamic) # Subclassed model model = training.Model(name='subclassed', trainable=True, dtype='int64', dynamic=True) self.assertEqual('subclassed', model.name) self.assertTrue(model.dynamic) self.assertTrue(model.trainable) w = model.add_weight('w', [], initializer=keras.initializers.Constant(1)) self.assertEqual(dtypes.int64, w.dtype) def test_disconnected_inputs(self): input_tensor1 = input_layer_lib.Input(shape=[200], name='a') input_tensor2 = input_layer_lib.Input(shape=[10], name='b') output_tensor1 = keras.layers.Dense(units=10)(input_tensor1) net = keras.engine.network.Network( inputs=[input_tensor1, input_tensor2], outputs=[output_tensor1]) net2 = keras.engine.network.Network.from_config(net.get_config()) self.assertLen(net2.inputs, 2) self.assertEqual('a', net2.layers[0].name) self.assertEqual('b', net2.layers[1].name) class DeferredModeTest(test.TestCase): @test_util.run_in_graph_and_eager_modes() def testSimpleNetworkBuilding(self): inputs = input_layer_lib.Input(shape=(32,)) if context.executing_eagerly(): self.assertEqual(inputs.dtype.name, 'float32') self.assertEqual(inputs.shape.as_list(), [None, 32]) x = keras.layers.Dense(2)(inputs) if context.executing_eagerly(): self.assertEqual(x.dtype.name, 'float32') self.assertEqual(x.shape.as_list(), [None, 2]) outputs = keras.layers.Dense(4)(x) network = network_lib.Network(inputs, outputs) self.assertIsInstance(network, network_lib.Network) if context.executing_eagerly(): # It should be possible to call such a network on EagerTensors. inputs = constant_op.constant( np.random.random((10, 32)).astype('float32')) outputs = network(inputs) self.assertEqual(outputs.shape.as_list(), [10, 4]) @test_util.run_in_graph_and_eager_modes() def testMultiIONetworkBuilding(self): input_a = input_layer_lib.Input(shape=(32,)) input_b = input_layer_lib.Input(shape=(16,)) a = keras.layers.Dense(16)(input_a) class AddLayer(keras.layers.Layer): def call(self, inputs): return inputs[0] + inputs[1] c = AddLayer()([a, input_b]) # pylint: disable=not-callable c = keras.layers.Dense(2)(c) network = network_lib.Network([input_a, input_b], [a, c]) if context.executing_eagerly(): a_val = constant_op.constant( np.random.random((10, 32)).astype('float32')) b_val = constant_op.constant( np.random.random((10, 16)).astype('float32')) outputs = network([a_val, b_val]) self.assertEqual(len(outputs), 2) self.assertEqual(outputs[0].shape.as_list(), [10, 16]) self.assertEqual(outputs[1].shape.as_list(), [10, 2]) class DefaultShapeInferenceBehaviorTest(keras_parameterized.TestCase): def _testShapeInference(self, model, input_shape, expected_output_shape): input_value = np.random.random(input_shape) output_value = model.predict(input_value) self.assertEqual(output_value.shape, expected_output_shape) @test_util.run_in_graph_and_eager_modes() def testSingleInputCase(self): class LayerWithOneInput(keras.layers.Layer): def build(self, input_shape): self.w = array_ops.ones(shape=(3, 4)) def call(self, inputs): return keras.backend.dot(inputs, self.w) inputs = input_layer_lib.Input(shape=(3,)) layer = LayerWithOneInput() if context.executing_eagerly(): self.assertEqual( layer.compute_output_shape((None, 3)).as_list(), [None, 4]) # As a side-effect, compute_output_shape builds the layer. self.assertTrue(layer.built) # We can still query the layer's compute_output_shape with compatible # input shapes. self.assertEqual( layer.compute_output_shape((6, 3)).as_list(), [6, 4]) outputs = layer(inputs) model = keras.Model(inputs, outputs) self._testShapeInference(model, (2, 3), (2, 4)) @test_util.run_in_graph_and_eager_modes() def testMultiInputOutputCase(self): class MultiInputOutputLayer(keras.layers.Layer): def build(self, input_shape): self.w = array_ops.ones(shape=(3, 4)) def call(self, inputs): a = keras.backend.dot(inputs[0], self.w) b = a + inputs[1] return [a, b] input_a = input_layer_lib.Input(shape=(3,)) input_b = input_layer_lib.Input(shape=(4,)) output_a, output_b = MultiInputOutputLayer()([input_a, input_b]) model = keras.Model([input_a, input_b], [output_a, output_b]) output_a_val, output_b_val = model.predict( [np.random.random((2, 3)), np.random.random((2, 4))]) self.assertEqual(output_a_val.shape, (2, 4)) self.assertEqual(output_b_val.shape, (2, 4)) @test_util.run_in_graph_and_eager_modes() def testTrainingArgument(self): class LayerWithTrainingArg(keras.layers.Layer): def build(self, input_shape): self.w = array_ops.ones(shape=(3, 4)) def call(self, inputs, training): return keras.backend.dot(inputs, self.w) inputs = input_layer_lib.Input(shape=(3,)) outputs = LayerWithTrainingArg()(inputs, training=False) model = keras.Model(inputs, outputs) self._testShapeInference(model, (2, 3), (2, 4)) @test_util.run_in_graph_and_eager_modes() def testNoneInShape(self): class Model(keras.Model): def __init__(self): super(Model, self).__init__() self.conv1 = keras.layers.Conv2D(8, 3) self.pool = keras.layers.GlobalAveragePooling2D() self.fc = keras.layers.Dense(3) def call(self, x): x = self.conv1(x) x = self.pool(x) x = self.fc(x) return x model = Model() model.build(tensor_shape.TensorShape((None, None, None, 1))) self.assertTrue(model.built, 'Model should be built') self.assertTrue(model.weights, 'Model should have its weights created as it ' 'has been built') sample_input = array_ops.ones((1, 10, 10, 1)) output = model(sample_input) self.assertEqual(output.shape, (1, 3)) @test_util.run_in_graph_and_eager_modes() def testNoneInShapeWithCompoundModel(self): class BasicBlock(keras.Model): def __init__(self): super(BasicBlock, self).__init__() self.conv1 = keras.layers.Conv2D(8, 3) self.pool = keras.layers.GlobalAveragePooling2D() self.dense = keras.layers.Dense(3) def call(self, x): x = self.conv1(x) x = self.pool(x) x = self.dense(x) return x class CompoundModel(keras.Model): def __init__(self): super(CompoundModel, self).__init__() self.block = BasicBlock() def call(self, x): x = self.block(x) # pylint: disable=not-callable return x model = CompoundModel() model.build(tensor_shape.TensorShape((None, None, None, 1))) self.assertTrue(model.built, 'Model should be built') self.assertTrue(model.weights, 'Model should have its weights created as it ' 'has been built') sample_input = array_ops.ones((1, 10, 10, 1)) output = model(sample_input) # pylint: disable=not-callable self.assertEqual(output.shape, (1, 3)) @test_util.run_in_graph_and_eager_modes() def testNoneInShapeWithFunctinalAPI(self): class BasicBlock(keras.Model): # Inherting from keras.layers.Layer since we are calling this layer # inside a model created using functional API. def __init__(self): super(BasicBlock, self).__init__() self.conv1 = keras.layers.Conv2D(8, 3) def call(self, x): x = self.conv1(x) return x input_layer = keras.layers.Input(shape=(None, None, 1)) x = BasicBlock()(input_layer) x = keras.layers.GlobalAveragePooling2D()(x) output_layer = keras.layers.Dense(3)(x) model = keras.Model(inputs=input_layer, outputs=output_layer) model.build(tensor_shape.TensorShape((None, None, None, 1))) self.assertTrue(model.built, 'Model should be built') self.assertTrue(model.weights, 'Model should have its weights created as it ' 'has been built') sample_input = array_ops.ones((1, 10, 10, 1)) output = model(sample_input) self.assertEqual(output.shape, (1, 3)) @keras_parameterized.run_all_keras_modes def test_sequential_as_downstream_of_masking_layer(self): inputs = keras.layers.Input(shape=(3, 4)) x = keras.layers.Masking(mask_value=0., input_shape=(3, 4))(inputs) s = keras.Sequential() s.add(keras.layers.Dense(5, input_shape=(4,))) x = keras.layers.wrappers.TimeDistributed(s)(x) model = keras.Model(inputs=inputs, outputs=x) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model_input = np.random.randint( low=1, high=5, size=(10, 3, 4)).astype('float32') for i in range(4): model_input[i, i:, :] = 0. model.fit(model_input, np.random.random((10, 3, 5)), epochs=1, batch_size=6) if not context.executing_eagerly(): # Note: this doesn't work in eager due to DeferredTensor/ops compatibility # issue. mask_outputs = [model.layers[1].compute_mask(model.layers[1].input)] mask_outputs += [model.layers[2].compute_mask( model.layers[2].input, mask_outputs[-1])] func = keras.backend.function([model.input], mask_outputs) mask_outputs_val = func([model_input]) self.assertAllClose(mask_outputs_val[0], np.any(model_input, axis=-1)) self.assertAllClose(mask_outputs_val[1], np.any(model_input, axis=-1)) @test_util.run_in_graph_and_eager_modes() def test_external_keras_serialization_compat_input_layers(self): inputs = keras.Input(shape=(10,)) outputs = keras.layers.Dense(1)(inputs) model = keras.Model(inputs, outputs) config = model.get_config() # Checks that single inputs and outputs are still saved as 1-element lists. # Saving as 1-element lists or not is equivalent in TF Keras, but only the # 1-element list format is supported in TF.js and keras-team/Keras. self.assertLen(config['input_layers'], 1) self.assertLen(config['output_layers'], 1) @test_util.run_in_graph_and_eager_modes() def test_external_keras_serialization_compat_inbound_nodes(self): # Check single Tensor input. inputs = keras.Input(shape=(10,), name='in') outputs = keras.layers.Dense(1)(inputs) model = keras.Model(inputs, outputs) config = model.get_config() self.assertEqual(config['layers'][1]['inbound_nodes'], [[['in', 0, 0, {}]]]) # Check multiple Tensor input. inputs1 = keras.Input(shape=(10,), name='in1') inputs2 = keras.Input(shape=(10,), name='in2') outputs = keras.layers.Add()([inputs1, inputs2]) model = keras.Model([inputs1, inputs2], outputs) config = model.get_config() self.assertEqual(config['layers'][2]['inbound_nodes'], [[['in1', 0, 0, {}], ['in2', 0, 0, {}]]]) class GraphUtilsTest(test.TestCase): @test_util.run_deprecated_v1 def testGetReachableFromInputs(self): with self.cached_session(): pl_1 = array_ops.placeholder(shape=None, dtype='float32') pl_2 = array_ops.placeholder(shape=None, dtype='float32') pl_3 = array_ops.placeholder(shape=None, dtype='float32') x_1 = pl_1 + pl_2 x_2 = pl_2 * 2 x_3 = pl_3 + 1 x_4 = x_1 + x_2 x_5 = x_3 * pl_1 self.assertEqual( keras.utils.tf_utils.get_reachable_from_inputs([pl_1]), {pl_1, x_1, x_4, x_5, x_1.op, x_4.op, x_5.op}) self.assertEqual( keras.utils.tf_utils.get_reachable_from_inputs([pl_1, pl_2]), {pl_1, pl_2, x_1, x_2, x_4, x_5, x_1.op, x_2.op, x_4.op, x_5.op}) self.assertEqual( keras.utils.tf_utils.get_reachable_from_inputs([pl_3]), {pl_3, x_3, x_5, x_3.op, x_5.op}) self.assertEqual( keras.utils.tf_utils.get_reachable_from_inputs([x_3]), {x_3, x_5, x_5.op}) @test_util.run_all_in_graph_and_eager_modes class NestedNetworkTest(test.TestCase): def test_nested_inputs_network(self): inputs = {'x1': keras.Input(shape=(1,)), 'x2': keras.Input(shape=(1,))} outputs = keras.layers.Add()([inputs['x1'], inputs['x2']]) network = keras.engine.network.Network(inputs, outputs) network = keras.engine.network.Network.from_config(network.get_config()) result_tensor = network({ 'x': array_ops.ones((1, 1), 'float32'), 'y': array_ops.ones((1, 1), 'float32') }) result = self.evaluate(result_tensor) self.assertAllEqual(result, [[2.]]) # TODO(b/122726584): Investigate why concrete batch is flaky in some builds. output_shape = network.compute_output_shape({ 'x1': (None, 1), 'x2': (None, 1) }) self.assertListEqual(output_shape.as_list(), [None, 1]) def test_nested_outputs_network(self): inputs = keras.Input(shape=(1,)) outputs = { 'x+x': keras.layers.Add()([inputs, inputs]), 'x*x': keras.layers.Multiply()([inputs, inputs]) } network = keras.engine.network.Network(inputs, outputs) network = keras.engine.network.Network.from_config(network.get_config()) result_tensor = network(array_ops.ones((1, 1), 'float32')) result = self.evaluate(result_tensor) self.assertAllEqual(result['x+x'], [[2.]]) self.assertAllEqual(result['x*x'], [[1.]]) output_shape = network.compute_output_shape((None, 1)) self.assertListEqual(output_shape['x+x'].as_list(), [None, 1]) self.assertListEqual(output_shape['x*x'].as_list(), [None, 1]) def test_nested_network_inside_network(self): inner_inputs = { 'x1': keras.Input(shape=(1,)), 'x2': keras.Input(shape=(1,)) } inner_outputs = { 'x1+x2': keras.layers.Add()([inner_inputs['x1'], inner_inputs['x2']]), 'x1*x2': keras.layers.Multiply()([inner_inputs['x1'], inner_inputs['x2']]) } inner_network = keras.engine.network.Network(inner_inputs, inner_outputs) inputs = [keras.Input(shape=(1,)), keras.Input(shape=(1,))] middle = inner_network({'x1': inputs[0], 'x2': inputs[1]}) outputs = keras.layers.Add()([middle['x1+x2'], middle['x1*x2']]) network = keras.engine.network.Network(inputs, outputs) network = keras.engine.network.Network.from_config(network.get_config()) # Computes: `(x1+x2) + (x1*x2)` result_tensor = network( [array_ops.ones((1, 1), 'float32'), array_ops.ones((1, 1), 'float32')]) result = self.evaluate(result_tensor) self.assertAllEqual(result, [[3.]]) output_shape = network.compute_output_shape([(None, 1), (None, 1)]) self.assertListEqual(output_shape.as_list(), [None, 1]) @test_util.run_in_graph_and_eager_modes def test_updates_with_direct_call(self): inputs = keras.Input(shape=(10,)) x = keras.layers.BatchNormalization()(inputs) x = keras.layers.Dense(10)(x) model = keras.Model(inputs, x) ph = keras.backend.placeholder(shape=(10, 10)) model(ph) self.assertLen(model.get_updates_for(ph), 2) self.assertLen(model.get_updates_for(None), 0) @keras_parameterized.run_all_keras_modes class AddLossTest(keras_parameterized.TestCase): def test_add_loss_outside_call_only_loss(self): inputs = keras.Input((10,)) mid = keras.layers.Dense(10)(inputs) outputs = keras.layers.Dense(1)(mid) model = keras.Model(inputs, outputs) model.add_loss(math_ops.reduce_mean(outputs)) self.assertLen(model.losses, 1) initial_weights = model.get_weights() x = np.ones((10, 10)) model.compile( 'sgd', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, batch_size=2, epochs=1) model2 = model.from_config(model.get_config()) model2.compile( 'sgd', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model2.set_weights(initial_weights) model2.fit(x, batch_size=2, epochs=1) # The TFOpLayer and the AddLoss layer are serialized. self.assertLen(model2.layers, 5) self.assertAllClose(model.get_weights(), model2.get_weights()) def test_add_loss_outside_call_multiple_losses(self): inputs = keras.Input((10,)) x1 = keras.layers.Dense(10)(inputs) x2 = keras.layers.Dense(10)(x1) outputs = keras.layers.Dense(1)(x2) model = keras.Model(inputs, outputs) model.add_loss(math_ops.reduce_sum(x1 * x2)) model.add_loss(math_ops.reduce_mean(outputs)) self.assertLen(model.losses, 2) initial_weights = model.get_weights() x, y = np.ones((10, 10)), np.ones((10, 1)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, batch_size=2, epochs=1) model2 = model.from_config(model.get_config()) model2.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model2.set_weights(initial_weights) model2.fit(x, y, batch_size=2, epochs=1) self.assertAllClose(model.get_weights(), model2.get_weights()) @keras_parameterized.run_all_keras_modes class WeightAccessTest(keras_parameterized.TestCase): def test_functional_model(self): inputs = keras.Input((10,)) x1 = keras.layers.Dense(10)(inputs) x2 = keras.layers.Dense(10)(x1) outputs = keras.layers.Dense(1)(x2) model = keras.Model(inputs, outputs) self.assertEqual(len(model.weights), 6) def test_sequential_model_with_input_shape(self): x1 = keras.layers.Dense(10, input_shape=(10,)) x2 = keras.layers.Dense(10) x3 = keras.layers.Dense(1) model = keras.models.Sequential([x1, x2, x3]) self.assertEqual(len(model.weights), 6) def test_sequential_model_without_input_shape(self): x1 = keras.layers.Dense(10) x2 = keras.layers.Dense(10) x3 = keras.layers.Dense(1) model = keras.models.Sequential([x1, x2, x3]) with self.assertRaisesRegexp( ValueError, 'Weights for model .* have not yet been created'): _ = model.weights def test_subclass_model_with_build_method(self): class SubclassModel(keras.models.Model): def build(self, input_shape): self.w = self.add_weight(shape=input_shape[-1], initializer='ones') def call(self, inputs): return inputs * self.w model = SubclassModel() with self.assertRaisesRegexp( ValueError, 'Weights for model .* have not yet been created'): _ = model.weights model(keras.Input((10,))) self.assertEqual(len(model.weights), 1) def test_subclass_model_without_build_method(self): class SubclassModel(keras.models.Model): def __init__(self): super(SubclassModel, self).__init__() self.w = self.add_weight(shape=(), initializer='ones') def call(self, inputs): return inputs * self.w model = SubclassModel() self.assertEqual(len(model.weights), 1) @test_util.run_all_in_graph_and_eager_modes class DTypeTest(keras_parameterized.TestCase): @testing_utils.enable_v2_dtype_behavior def test_graph_network_dtype(self): inputs = keras.Input((10,)) outputs = keras.layers.Dense(10)(inputs) network = network_lib.Network(inputs, outputs) self.assertEqual(network.dtype, 'float32') @testing_utils.enable_v2_dtype_behavior def test_subclassed_network_dtype(self): class IdentityNetwork(network_lib.Network): def call(self, inputs): return inputs network = IdentityNetwork() self.assertEqual(network.dtype, 'float32') self.assertEqual(network(array_ops.constant(1, 'float64')).dtype, 'float32') network = IdentityNetwork(dtype='float16') self.assertEqual(network.dtype, 'float16') self.assertEqual(network(array_ops.constant(1, 'float64')).dtype, 'float16') network = IdentityNetwork(autocast=False) self.assertEqual(network.dtype, 'float32') self.assertEqual(network(array_ops.constant(1, 'float64')).dtype, 'float64') if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/network_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Part of the Keras training engine related to plain array data. """ # pylint: disable=protected-access from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import numpy as np from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.eager import context from tensorflow.python.framework import errors from tensorflow.python.keras import backend as K from tensorflow.python.keras import callbacks as cbks from tensorflow.python.keras.distribute import distributed_training_utils from tensorflow.python.keras.engine import training_utils from tensorflow.python.keras.utils.generic_utils import make_batches from tensorflow.python.keras.utils.generic_utils import slice_arrays from tensorflow.python.keras.utils.mode_keys import ModeKeys from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest try: from scipy.sparse import issparse # pylint: disable=g-import-not-at-top except ImportError: issparse = None def model_iteration(model, inputs, targets=None, sample_weights=None, batch_size=None, epochs=1, verbose=1, callbacks=None, val_inputs=None, val_targets=None, val_sample_weights=None, shuffle=True, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, mode=ModeKeys.TRAIN, validation_in_fit=False, prepared_feed_values_from_dataset=False, steps_name='steps', **kwargs): """Loop function for arrays of data with modes TRAIN/TEST/PREDICT. Arguments: model: Keras Model instance. inputs: Either a list or dictionary of arrays, or a dataset instance. targets: List/dictionary of input arrays. sample_weights: Optional list of sample weight arrays. batch_size: Integer batch size or None if unknown. epochs: Number of times to iterate over the data verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. Note that the progress bar is not particularly useful when logged to a file, so verbose=2 is recommended when not running interactively (eg, in a production environment). callbacks: List of callbacks to be called during training val_inputs: Either a list or dictionary of arrays, or a dataset instance. val_targets: List/dictionary of target arrays. val_sample_weights: Optional list of sample weight arrays. shuffle: Whether to shuffle the data at the beginning of each epoch concatenation of list the display names of the outputs of `f` and the list of display names of the outputs of `f_val`. initial_epoch: Epoch at which to start training (useful for resuming a previous training run) steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. validation_steps: Number of steps to run validation for (only if doing validation from data tensors). Ignored with the default value of `None`. validation_freq: Only relevant if validation data is provided. Integer or `collections_abc.Container` instance (e.g. list, tuple, etc.). If an integer, specifies how many training epochs to run before a new validation run is performed, e.g. `validation_freq=2` runs validation every 2 epochs. If a Container, specifies the epochs on which to run validation, e.g. `validation_freq=[1, 2, 10]` runs validation at the end of the 1st, 2nd, and 10th epochs. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. validation_in_fit: if true, then this method is invoked from within training iteration (for validation). In the case where `val_inputs` is a dataset, this flag indicates that its iterator and feed values are already created so should properly reuse resources. prepared_feed_values_from_dataset: if True, `inputs` is a list of feed tensors returned from `_prepare_feed_values` call on the validation dataset, so do not call it again on `inputs`. Should only be used for inline validation (i.e., only if `validation_in_fit` is also True). steps_name: The string name of the steps argument, either `steps`, `validation_steps`, or `steps_per_epoch`. Only used for error message formatting. **kwargs: Additional arguments for backwards compatibility. Returns: - In TRAIN mode: `History` object. - In TEST mode: Evaluation metrics. - In PREDICT mode: Outputs of the Model called on inputs. Raises: ValueError: in case of invalid arguments. """ # Backwards compatibility. if 'steps' in kwargs: steps_per_epoch = kwargs.pop('steps') if kwargs: raise TypeError('Unknown arguments: %s' % (kwargs,)) # In case we were passed a dataset, we extract symbolic tensors from it. reset_dataset_after_each_epoch = False input_iterator = None is_dataset = isinstance(inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)) # TODO(fchollet): consider moving `steps_per_epoch` inference to # _standardize_user_data and set reset_dataset_after_each_epoch as an # attribute on the dataset instance. if is_dataset: if steps_per_epoch is None: reset_dataset_after_each_epoch = True steps_per_epoch = training_utils.infer_steps_for_dataset( model, inputs, steps_per_epoch, epochs=epochs, steps_name=steps_name) input_iterator = _get_iterator(inputs, model._distribution_strategy) # Enter tf.distribute.Strategy scope. if model._distribution_strategy: scope = distributed_training_utils.distributed_scope( strategy=model._distribution_strategy, learning_phase=(1 if mode == ModeKeys.TRAIN else 0)) scope.__enter__() use_steps = is_dataset or steps_per_epoch is not None do_validation = val_inputs is not None # Convert Eager Tensors to NumPy arrays to support batching/shuffling. inputs, targets, sample_weights = training_utils. \ convert_eager_tensors_to_numpy((inputs, targets, sample_weights)) # Prepare input data. inputs = input_iterator or inputs if validation_in_fit and prepared_feed_values_from_dataset: # When invoking validation in training loop, avoid creating iterator and # list of feed values for the same validation dataset multiple times (which # essentially would call `iterator.get_next()` that slows down execution and # leads to OOM errors eventually. ins = inputs else: ins = _prepare_feed_values(model, inputs, targets, sample_weights, mode) # `ins` is a function when a distribute strategy is used in Eager mode. In # that case `is_dataset` is True. The code branches that have requirements # about the type of `ins` do not trigger in the distributed case. if not is_dataset: num_samples_or_steps = _get_num_samples_or_steps(ins, batch_size, steps_per_epoch) else: num_samples_or_steps = steps_per_epoch # Update sample_weight_mode of the model if sample_weights is specified by the # user. We need to call this function after we have a handle on the inputs # (both numpy arrays and datasets) in order to determine if the user has # specified sample_weights. _update_sample_weight_mode(model, mode, ins) # Get step function and loop type. As part of building the execution # function we recompile the metrics based on the updated # sample_weight_mode value. f = _make_execution_function(model, mode) # Prepare validation data. Hold references to the iterator and the input list # to properly reinitialize and reuse in multiple validation passes. val_iterator = None if isinstance(val_inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)): if validation_steps is None: # Because we pass an iterator feed instead of a Dataset to the eval # model_iteration() call, it will not trigger the dataset-input path # that determines the number of steps required. To avoid this issue, # set validation_steps here if validation_steps is None. validation_steps = training_utils.infer_steps_for_dataset( model, val_inputs, validation_steps, epochs=epochs, steps_name='validation_steps') val_iterator = _get_iterator(val_inputs, model._distribution_strategy) val_inputs = _prepare_feed_values( model, val_iterator, val_targets, val_sample_weights, ModeKeys.TEST) # Get num steps for printing. val_samples_or_steps = validation_steps else: # Get num samples for printing. val_samples_or_steps = val_inputs and nest.flatten( val_inputs)[0].shape[0] or None if mode == ModeKeys.TRAIN and verbose: _print_train_info(num_samples_or_steps, val_samples_or_steps, is_dataset) # Configure callbacks. count_mode = 'steps' if use_steps else 'samples' callbacks = cbks.configure_callbacks( callbacks, model, do_validation=do_validation, batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch, samples=num_samples_or_steps, verbose=0, # Handle ProgBarLogger separately in this loop. mode=mode) # TODO(omalleyt): Handle ProgBar as part of Callbacks once hooks are ready. progbar = training_utils.get_progbar(model, count_mode) progbar.params = callbacks.params progbar.params['verbose'] = verbose # Find beforehand arrays that need sparse-to-dense conversion. if issparse is not None and not use_steps: indices_for_conversion_to_dense = [] feed = _get_model_feed(model, mode) for i, (input_data, feed_tensor) in enumerate(zip(ins, feed)): if issparse(input_data) and not K.is_sparse(feed_tensor): indices_for_conversion_to_dense.append(i) # Select aggregation method. if mode == ModeKeys.PREDICT: aggregator = training_utils.OutputsAggregator( use_steps, num_samples=None if steps_per_epoch else num_samples_or_steps, steps=steps_per_epoch) else: aggregator = training_utils.MetricsAggregator( use_steps, num_samples=None if steps_per_epoch else num_samples_or_steps, steps=steps_per_epoch) if model._compile_distribution: distributed_training_utils._copy_weights_to_distributed_model(model, mode) callbacks.model.stop_training = False callbacks._call_begin_hook(mode) progbar.on_train_begin() initial_epoch = model._maybe_load_initial_epoch_from_ckpt(initial_epoch, mode) for epoch in range(initial_epoch, epochs): if callbacks.model.stop_training: break # Setup work for each epoch epoch_logs = {} model.reset_metrics() if mode == ModeKeys.TRAIN: callbacks.on_epoch_begin(epoch, epoch_logs) progbar.on_epoch_begin(epoch, epoch_logs) if use_steps: # Step-wise loop. if steps_per_epoch is None: # Loop over dataset until `OutOfRangeError` is raised. target_steps = np.inf else: # Loop over dataset for the specified number of steps. target_steps = steps_per_epoch step = 0 while step < target_steps: batch_logs = {'batch': step, 'size': 1} callbacks._call_batch_hook(mode, 'begin', step, batch_logs) progbar.on_batch_begin(step, batch_logs) # Get outputs. try: # `ins` can be callable in tf.distribute.Strategy + eager case. if not callable(ins) or ( model._distribution_strategy and not distributed_training_utils.is_distributing_by_cloning(model)): actual_inputs = ins else: actual_inputs = ins() batch_outs = f(actual_inputs) except errors.OutOfRangeError: if is_dataset: # The dataset passed by the user ran out of batches. # Now we know the cardinality of the dataset. # If steps_per_epoch was specified, then running out of data is # unexpected, so we stop training and inform the user. if steps_per_epoch: callbacks.model.stop_training = True logging.warning( 'Your dataset ran out of data; interrupting training. ' 'Make sure that your dataset can generate at least ' '`%s * epochs` batches (in this case, %d batches). ' 'You may need to use the repeat() function when ' 'building your dataset.' % (steps_name, steps_per_epoch * epochs)) elif step > 0: steps_per_epoch = step aggregator.steps = steps_per_epoch if mode == ModeKeys.TRAIN: progbar.params['steps'] = steps_per_epoch progbar.progbar.target = steps_per_epoch else: # We ran out of batches while the user passed an iterator (legacy). callbacks.model.stop_training = True logging.warning( 'Your dataset iterator ran out of data; ' 'interrupting training. Make sure that your iterator ' 'can generate at least `%s * epochs` ' 'batches (in this case, %d batches). You may need to' 'use the repeat() function when building your ' 'dataset.' % (steps_name, steps_per_epoch * epochs)) break if not isinstance(batch_outs, list): batch_outs = [batch_outs] if model._distribution_strategy: batch_outs = distributed_training_utils._per_replica_aggregate_batch( model._distribution_strategy, batch_outs, model, mode) # Aggregate results. if step == 0: aggregator.create(batch_outs) aggregator.aggregate(batch_outs) # Callbacks batch end. batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode) callbacks._call_batch_hook(mode, 'end', step, batch_logs) progbar.on_batch_end(step, batch_logs) step += 1 if callbacks.model.stop_training: break else: # Sample-wise loop. index_array = np.arange(num_samples_or_steps) if shuffle == 'batch': index_array = training_utils.batch_shuffle(index_array, batch_size) elif shuffle: np.random.shuffle(index_array) batches = make_batches(num_samples_or_steps, batch_size) for batch_index, (batch_start, batch_end) in enumerate(batches): batch_ids = index_array[batch_start:batch_end] # Slice into a batch. if len(batches) == 1: # If we only have one batch, do not slice. This takes care of # composite tensors in non-Dataset modes; we currently don't support # slicing them. # TODO(b/133517906): Add slicing support. ins_batch = ins else: try: if ins and isinstance(ins[-1], int): # Do not slice the training phase flag. ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]] else: ins_batch = slice_arrays(ins, batch_ids) except TypeError: raise TypeError('TypeError while preparing batch. ' 'If using HDF5 input data, ' 'pass shuffle="batch".') # Sparse to dense conversion. if issparse is not None: for i in indices_for_conversion_to_dense: ins_batch[i] = ins_batch[i].toarray() # Callbacks batch_begin. batch_logs = {'batch': batch_index, 'size': len(batch_ids)} callbacks._call_batch_hook(mode, 'begin', batch_index, batch_logs) progbar.on_batch_begin(batch_index, batch_logs) # Get outputs. batch_outs = f(ins_batch) if not isinstance(batch_outs, list): batch_outs = [batch_outs] # Aggregate results. if batch_index == 0: aggregator.create(batch_outs) aggregator.aggregate(batch_outs, batch_start, batch_end) # Callbacks batch end. batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode) callbacks._call_batch_hook(mode, 'end', batch_index, batch_logs) progbar.on_batch_end(batch_index, batch_logs) if callbacks.model.stop_training: break aggregator.finalize() results = aggregator.results epoch_logs = cbks.make_logs(model, epoch_logs, results, mode) if len(results) == 1: results = results[0] # Run the test loop every `validation_freq` epochs during training. if (do_validation and training_utils.should_run_validation(validation_freq, epoch) and not callbacks.model.stop_training): if model._compile_distribution: # Since we create a new clone from the original model we need to copy # the weights back to the original model before we can run validation. distributed_training_utils._copy_weights_to_original_model( model, ModeKeys.TRAIN) val_results = model_iteration( model, val_inputs, targets=val_targets, sample_weights=val_sample_weights, batch_size=batch_size, steps_per_epoch=validation_steps, callbacks=callbacks, verbose=0, mode=ModeKeys.TEST, validation_in_fit=True, prepared_feed_values_from_dataset=(val_iterator is not None), steps_name='validation_steps') if not isinstance(val_results, list): val_results = [val_results] epoch_logs = cbks.make_logs( model, epoch_logs, val_results, mode, prefix='val_') if val_iterator and epoch < epochs - 1: _reinitialize_iterator(val_iterator, model._distribution_strategy) if mode == ModeKeys.TRAIN: # Epochs only apply to `fit`. callbacks.on_epoch_end(epoch, epoch_logs) progbar.on_epoch_end(epoch, epoch_logs) # Reinitialize dataset iterator for the next epoch. if reset_dataset_after_each_epoch and epoch < epochs - 1: _reinitialize_iterator(input_iterator, model._distribution_strategy) callbacks._call_end_hook(mode) if model._distribution_strategy: if model._compile_distribution: # TODO(priyag, psv): Copy back metrics to the original model as well? distributed_training_utils._copy_weights_to_original_model(model, mode) scope.__exit__(None, None, None) if mode == ModeKeys.TRAIN: return model.history return results def _get_model_feed(model, mode): if mode == ModeKeys.PREDICT: feed = model._feed_inputs else: feed = ( model._feed_inputs + model._feed_targets + model._feed_sample_weights) return feed def _print_train_info(num_samples_or_steps, val_samples_or_steps, is_dataset): increment = 'steps' if is_dataset else 'samples' msg = 'Train on {0} {increment}'.format( num_samples_or_steps, increment=increment) if val_samples_or_steps: msg += ', validate on {0} {increment}'.format( val_samples_or_steps, increment=increment) print(msg) def _get_num_samples_or_steps(ins, batch_size, steps_per_epoch): """Returns total number of samples (when training in batch mode) or steps.""" if steps_per_epoch: return steps_per_epoch return training_utils.check_num_samples(ins, batch_size, steps_per_epoch, 'steps_per_epoch') def _prepare_feed_values(model, inputs, targets, sample_weights, mode): """Prepare feed values to the model execution function. Arguments: model: Model to prepare feed values for. inputs: List or dict of model inputs. targets: Optional list of model targets. sample_weights: Optional list of sample weight arrays. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. Returns: Feed values for the model in the given mode. """ if model._distribution_strategy: if isinstance(inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)): inputs = distributed_training_utils.get_iterator( inputs, model._distribution_strategy) def get_distributed_inputs(): return distributed_training_utils._prepare_feed_values( model, inputs, targets, sample_weights, mode) # In the eager case, we want to call the input method per step, so return # a lambda from here that can be called. Note that this is applicable only # in Distribution Strategy case as it follows the same code path for both # eager and graph modes. # TODO(priyag,omalleyt): Either we should move the training DS with # IteratorV2 to use training_generator code path, or figure out how to # set a symbolic Iterator out of a Dataset when in eager mode. if context.executing_eagerly(): return get_distributed_inputs else: return get_distributed_inputs() if isinstance(inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2, iterator_ops.Iterator)): inputs, targets, sample_weights = model._standardize_user_data( inputs, extract_tensors_from_dataset=True) inputs = training_utils.ModelInputs(inputs).as_list() targets = targets or [] sample_weights = sample_weights or [] ins = inputs + targets + sample_weights if mode == ModeKeys.TRAIN and not isinstance(K.symbolic_learning_phase(), int): ins += [True] # Add learning phase value. return ins def _get_iterator(inputs, distribution_strategy=None): if distribution_strategy: return distributed_training_utils.get_iterator( inputs, distribution_strategy) return training_utils.get_iterator(inputs) def _reinitialize_iterator(iterator, distribution_strategy=None): if distribution_strategy: distributed_training_utils.initialize_iterator( iterator, distribution_strategy) else: training_utils.initialize_iterator(iterator) def _make_execution_function(model, mode): """Makes function to run one step of model execution.""" if model._distribution_strategy: return distributed_training_utils._make_execution_function(model, mode) return model._make_execution_function(mode) def _update_sample_weight_mode(model, mode, inputs): """Updates the sample_weight_mode of a given model.""" # Add a quick return to prevent us from calling model._feed_targets that # accesses certain model properties that may not be set in the `PREDICT` mode. if mode == ModeKeys.PREDICT: return sample_weights = None # `inputs` is the model's inputs + targets + sample_weights + # learning phase placeholder if specified. To update the sample_weight_mode # we need to determine if the user has passed sample weights as part of the # input. if not callable(inputs): sample_weights = inputs[len(model._feed_inputs) + len(model._feed_targets):] has_learning_phase_pl = (mode == ModeKeys.TRAIN and not isinstance(K.symbolic_learning_phase(), int)) if has_learning_phase_pl: sample_weights = sample_weights[:-1] model._update_sample_weight_modes(sample_weights=sample_weights) # Call the DistributionStrategy specific function to update the # sample_weight_mode on the model. if model._distribution_strategy: distributed_training_utils._update_sample_weight_modes(model, mode, sample_weights) # For backwards compatibility for internal users of these loops. fit_loop = functools.partial(model_iteration, mode=ModeKeys.TRAIN) test_loop = functools.partial( model_iteration, mode=ModeKeys.TEST, shuffle=False) predict_loop = functools.partial( model_iteration, mode=ModeKeys.PREDICT, shuffle=False) class ArrayLikeTrainingLoop(training_utils.TrainingLoop): """TrainingLoop that handle inputs like array. This is the default handler for most of the input data types, includes symbolic tensors or Numpy array-like, Datasets and iterators in graph mode (since they generate symbolic tensors). This Function is used to handle model with `run_eagerly` = False. """ def fit(self, model, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, **kwargs): batch_size = model._validate_or_infer_batch_size(batch_size, steps_per_epoch, x) x, y, sample_weights = model._standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight, batch_size=batch_size, check_steps=True, steps_name='steps_per_epoch', steps=steps_per_epoch, validation_split=validation_split, shuffle=shuffle) if validation_data: val_x, val_y, val_sample_weights = model._prepare_validation_data( validation_data, batch_size, validation_steps) elif validation_split and 0. < validation_split < 1.: (x, y, sample_weights, val_x, val_y, val_sample_weights) = training_utils.split_training_and_validation_data( x, y, sample_weights, validation_split) else: if validation_steps: raise ValueError('`validation_steps` should not be specified if ' '`validation_data` is None.') val_x, val_y, val_sample_weights = None, None, None return fit_loop( model, inputs=x, targets=y, sample_weights=sample_weights, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=callbacks, val_inputs=val_x, val_targets=val_y, val_sample_weights=val_sample_weights, shuffle=shuffle, initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, validation_freq=validation_freq, steps_name='steps_per_epoch') def evaluate(self, model, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, **kwargs): batch_size = model._validate_or_infer_batch_size(batch_size, steps, x) x, y, sample_weights = model._standardize_user_data( x, y, sample_weight=sample_weight, batch_size=batch_size, check_steps=True, steps_name='steps', steps=steps) return test_loop( model, inputs=x, targets=y, sample_weights=sample_weights, batch_size=batch_size, verbose=verbose, steps=steps, callbacks=callbacks) def predict(self, model, x, batch_size=None, verbose=0, steps=None, callbacks=None, **kwargs): batch_size = model._validate_or_infer_batch_size(batch_size, steps, x) x, _, _ = model._standardize_user_data( x, check_steps=True, steps_name='steps', steps=steps) return predict_loop( model, x, batch_size=batch_size, verbose=verbose, steps=steps, callbacks=callbacks)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/training_arrays.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Input layer code (`Input` and `InputLayer`). """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import backend from tensorflow.python.keras.distribute import distributed_training_utils from tensorflow.python.keras.engine import base_layer from tensorflow.python.keras.engine import node as node_module from tensorflow.python.keras.utils import tf_utils from tensorflow.python.util.tf_export import keras_export @keras_export('keras.layers.InputLayer') class InputLayer(base_layer.Layer): """Layer to be used as an entry point into a Network (a graph of layers). It can either wrap an existing tensor (pass an `input_tensor` argument) or create a placeholder tensor (pass arguments `input_shape`, and optionally, `dtype`). It is generally recommend to use the functional layer API via `Input`, (which creates an `InputLayer`) without directly using `InputLayer`. This class can create placeholders for tf.Tensors, tf.SparseTensors, and tf.RaggedTensors by choosing 'sparse=True' or 'ragged=True'. Arguments: input_shape: Shape tuple (not including the batch axis), or `TensorShape` instance (not including the batch axis). batch_size: Optional input batch size (integer or None). dtype: Datatype of the input. input_tensor: Optional tensor to use as layer input instead of creating a placeholder. sparse: Boolean, whether the placeholder created is meant to be sparse. ragged: Boolean, whether the placeholder created is meant to be ragged. In this case, values of 'None' in the 'shape' argument represent ragged dimensions. For more information about RaggedTensors, see https://www.tensorflow.org/guide/ragged_tensors. name: Name of the layer (string). """ def __init__(self, input_shape=None, batch_size=None, dtype=None, input_tensor=None, sparse=False, name=None, ragged=False, **kwargs): strategy = distribution_strategy_context.get_strategy() if strategy and batch_size is not None and \ distributed_training_utils.global_batch_size_supported(strategy): if batch_size % strategy.num_replicas_in_sync != 0: raise ValueError('The `batch_size` argument value {} cannot be ' 'divisible by number of replicas {}'.format( batch_size, strategy.num_replicas_in_sync)) batch_size = batch_size // strategy.num_replicas_in_sync if 'batch_input_shape' in kwargs: batch_input_shape = kwargs.pop('batch_input_shape') if input_shape and batch_input_shape: raise ValueError('Only provide the input_shape OR ' 'batch_input_shape argument to ' 'InputLayer, not both at the same time.') batch_size = batch_input_shape[0] input_shape = batch_input_shape[1:] if kwargs: raise ValueError('Unrecognized keyword arguments:', kwargs.keys()) if not name: prefix = 'input' name = prefix + '_' + str(backend.get_uid(prefix)) if not dtype: if input_tensor is None: dtype = backend.floatx() else: dtype = backend.dtype(input_tensor) elif input_tensor is not None and input_tensor.dtype != dtype: raise ValueError('`input_tensor.dtype` differs from `dtype`: %s vs. %s' % (input_tensor.dtype, dtype)) super(InputLayer, self).__init__(dtype=dtype, name=name) self.built = True self.sparse = sparse self.ragged = ragged self.batch_size = batch_size self.supports_masking = True if isinstance(input_shape, tensor_shape.TensorShape): input_shape = tuple(input_shape.as_list()) elif isinstance(input_shape, int): input_shape = (input_shape,) if input_tensor is None: if input_shape is not None: batch_input_shape = (batch_size,) + tuple(input_shape) else: batch_input_shape = None graph = backend.get_graph() with graph.as_default(): input_tensor = backend.placeholder( shape=batch_input_shape, dtype=dtype, name=self.name, sparse=sparse, ragged=ragged) self.is_placeholder = True self._batch_input_shape = batch_input_shape else: if not tf_utils.is_symbolic_tensor(input_tensor): raise ValueError('You should not pass an EagerTensor to `Input`. ' 'For example, instead of creating an ' 'InputLayer, you should instantiate your model and ' 'directly call it on your input.') self.is_placeholder = False self._batch_input_shape = tuple(input_tensor.shape.as_list()) # Create an input node to add to self.outbound_node # and set output_tensors' _keras_history. input_tensor._keras_history = base_layer.KerasHistory(self, 0, 0) input_tensor._keras_mask = None node_module.Node( self, inbound_layers=[], node_indices=[], tensor_indices=[], input_tensors=[input_tensor], output_tensors=[input_tensor]) def get_config(self): config = { 'batch_input_shape': self._batch_input_shape, 'dtype': self.dtype, 'sparse': self.sparse, 'ragged': self.ragged, 'name': self.name } return config @keras_export('keras.layers.Input', 'keras.Input') def Input( # pylint: disable=invalid-name shape=None, batch_size=None, name=None, dtype=None, sparse=False, tensor=None, ragged=False, **kwargs): """`Input()` is used to instantiate a Keras tensor. A Keras tensor is a tensor object from the underlying backend (Theano or TensorFlow), which we augment with certain attributes that allow us to build a Keras model just by knowing the inputs and outputs of the model. For instance, if a, b and c are Keras tensors, it becomes possible to do: `model = Model(input=[a, b], output=c)` The added Keras attribute is: `_keras_history`: Last layer applied to the tensor. the entire layer graph is retrievable from that layer, recursively. Arguments: shape: A shape tuple (integers), not including the batch size. For instance, `shape=(32,)` indicates that the expected input will be batches of 32-dimensional vectors. Elements of this tuple can be None; 'None' elements represent dimensions where the shape is not known. batch_size: optional static batch size (integer). name: An optional name string for the layer. Should be unique in a model (do not reuse the same name twice). It will be autogenerated if it isn't provided. dtype: The data type expected by the input, as a string (`float32`, `float64`, `int32`...) sparse: A boolean specifying whether the placeholder to be created is sparse. Only one of 'ragged' and 'sparse' can be True. tensor: Optional existing tensor to wrap into the `Input` layer. If set, the layer will not create a placeholder tensor. ragged: A boolean specifying whether the placeholder to be created is ragged. Only one of 'ragged' and 'sparse' can be True. In this case, values of 'None' in the 'shape' argument represent ragged dimensions. For more information about RaggedTensors, see https://www.tensorflow.org/guide/ragged_tensors. **kwargs: deprecated arguments support. Supports `batch_shape` and `batch_input_shape`. Returns: A `tensor`. Example: ```python # this is a logistic regression in Keras x = Input(shape=(32,)) y = Dense(16, activation='softmax')(x) model = Model(x, y) ``` Note that even if eager execution is enabled, `Input` produces a symbolic tensor (i.e. a placeholder). This symbolic tensor can be used with other TensorFlow ops, as such: ```python x = Input(shape=(32,)) y = tf.square(x) ``` Raises: ValueError: in case of invalid arguments. """ if sparse and ragged: raise ValueError( 'Cannot set both sparse and ragged to True in a Keras input.') input_layer_config = {'name': name, 'dtype': dtype, 'sparse': sparse, 'ragged': ragged, 'input_tensor': tensor} batch_input_shape = kwargs.pop('batch_input_shape', kwargs.pop('batch_shape', None)) if shape and batch_input_shape: raise ValueError('Only provide the `shape` OR `batch_input_shape` argument ' 'to Input, not both at the same time.') if batch_input_shape: shape = batch_input_shape[1:] input_layer_config.update({'batch_input_shape': batch_input_shape}) else: input_layer_config.update( {'batch_size': batch_size, 'input_shape': shape}) if kwargs: raise ValueError('Unrecognized keyword arguments:', kwargs.keys()) if shape is None and tensor is None: raise ValueError('Please provide to Input either a `shape`' ' or a `tensor` argument. Note that ' '`shape` does not include the batch ' 'dimension.') input_layer = InputLayer(**input_layer_config) # Return tensor including `_keras_history`. # Note that in this case train_output and test_output are the same pointer. outputs = input_layer._inbound_nodes[0].output_tensors if len(outputs) == 1: return outputs[0] else: return outputs
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/input_layer.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Home of the `Sequential` model. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy from tensorflow.python.keras import layers as layer_module from tensorflow.python.keras.engine import base_layer from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.engine import input_layer from tensorflow.python.keras.engine import training from tensorflow.python.keras.engine import training_utils from tensorflow.python.keras.utils import layer_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util import nest from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import keras_export @keras_export('keras.models.Sequential', 'keras.Sequential') class Sequential(training.Model): """Linear stack of layers. Arguments: layers: list of layers to add to the model. Example: ```python # Optionally, the first layer can receive an `input_shape` argument: model = Sequential() model.add(Dense(32, input_shape=(500,))) # Afterwards, we do automatic shape inference: model.add(Dense(32)) # This is identical to the following: model = Sequential() model.add(Dense(32, input_dim=500)) # And to the following: model = Sequential() model.add(Dense(32, batch_input_shape=(None, 500))) # Note that you can also omit the `input_shape` argument: # In that case the model gets built the first time you call `fit` (or other # training and evaluation methods). model = Sequential() model.add(Dense(32)) model.add(Dense(32)) model.compile(optimizer=optimizer, loss=loss) # This builds the model for the first time: model.fit(x, y, batch_size=32, epochs=10) # Note that when using this delayed-build pattern (no input shape specified), # the model doesn't have any weights until the first call # to a training/evaluation method (since it isn't yet built): model = Sequential() model.add(Dense(32)) model.add(Dense(32)) model.weights # returns [] # Whereas if you specify the input shape, the model gets built continuously # as you are adding layers: model = Sequential() model.add(Dense(32, input_shape=(500,))) model.add(Dense(32)) model.weights # returns list of length 4 # When using the delayed-build pattern (no input shape specified), you can # choose to manually build your model by calling `build(batch_input_shape)`: model = Sequential() model.add(Dense(32)) model.add(Dense(32)) model.build((None, 500)) model.weights # returns list of length 4 ``` """ @trackable.no_automatic_dependency_tracking def __init__(self, layers=None, name=None): super(Sequential, self).__init__(name=name) self.supports_masking = True self._build_input_shape = None self._compute_output_and_mask_jointly = True self._layer_call_argspecs = {} # Add to the model any layers passed to the constructor. if layers: if not isinstance(layers, (list, tuple)): layers = [layers] tf_utils.assert_no_legacy_layers(layers) for layer in layers: self.add(layer) @property def layers(self): # Historically, `sequential.layers` only returns layers that were added # via `add`, and omits the auto-generated `InputLayer` that comes at the # bottom of the stack. # `Trackable` manages the `_layers` attributes and does filtering # over it. layers = super(Sequential, self).layers if layers and isinstance(layers[0], input_layer.InputLayer): return layers[1:] return layers[:] @property def dynamic(self): return any(layer.dynamic for layer in self.layers) @trackable.no_automatic_dependency_tracking def add(self, layer): """Adds a layer instance on top of the layer stack. Arguments: layer: layer instance. Raises: TypeError: If `layer` is not a layer instance. ValueError: In case the `layer` argument does not know its input shape. ValueError: In case the `layer` argument has multiple output tensors, or is already connected somewhere else (forbidden in `Sequential` models). """ # If we are passed a Keras tensor created by keras.Input(), we can extract # the input layer from its keras history and use that without any loss of # generality. if hasattr(layer, '_keras_history'): origin_layer = layer._keras_history[0] if isinstance(origin_layer, input_layer.InputLayer): layer = origin_layer if not isinstance(layer, base_layer.Layer): raise TypeError('The added layer must be ' 'an instance of class Layer. ' 'Found: ' + str(layer)) tf_utils.assert_no_legacy_layers([layer]) self.built = False set_inputs = False if not self._layers: if isinstance(layer, input_layer.InputLayer): # Corner case where the user passes an InputLayer layer via `add`. assert len(nest.flatten(layer._inbound_nodes[-1].output_tensors)) == 1 set_inputs = True else: batch_shape, dtype = training_utils.get_input_shape_and_dtype(layer) if batch_shape: # Instantiate an input layer. x = input_layer.Input( batch_shape=batch_shape, dtype=dtype, name=layer.name + '_input') # This will build the current layer # and create the node connecting the current layer # to the input layer we just created. layer(x) set_inputs = True if set_inputs: # If an input layer (placeholder) is available. if len(nest.flatten(layer._inbound_nodes[-1].output_tensors)) != 1: raise ValueError('All layers in a Sequential model ' 'should have a single output tensor. ' 'For multi-output layers, ' 'use the functional API.') self.outputs = [ nest.flatten(layer._inbound_nodes[-1].output_tensors)[0] ] self.inputs = layer_utils.get_source_inputs(self.outputs[0]) elif self.outputs: # If the model is being built continuously on top of an input layer: # refresh its output. output_tensor = layer(self.outputs[0]) if len(nest.flatten(output_tensor)) != 1: raise TypeError('All layers in a Sequential model ' 'should have a single output tensor. ' 'For multi-output layers, ' 'use the functional API.') self.outputs = [output_tensor] if self.outputs: # True if set_inputs or self._is_graph_network or if adding a layer # to an already built deferred seq model. self.built = True if set_inputs or self._is_graph_network: self._init_graph_network(self.inputs, self.outputs, name=self.name) else: self._layers.append(layer) if self._layers: self._track_layers(self._layers) self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call) @trackable.no_automatic_dependency_tracking def pop(self): """Removes the last layer in the model. Raises: TypeError: if there are no layers in the model. """ if not self.layers: raise TypeError('There are no layers in the model.') layer = self._layers.pop() self._layer_call_argspecs.pop(layer) if not self.layers: self.outputs = None self.inputs = None self.built = False elif self._is_graph_network: self.layers[-1]._outbound_nodes = [] self.outputs = [self.layers[-1].output] self._init_graph_network(self.inputs, self.outputs, name=self.name) self.built = True @base_layer_utils.default def build(self, input_shape=None): if self._is_graph_network: self._init_graph_network(self.inputs, self.outputs, name=self.name) else: if input_shape is None: raise ValueError('You must provide an `input_shape` argument.') input_shape = tuple(input_shape) self._build_input_shape = input_shape super(Sequential, self).build(input_shape) self.built = True def call(self, inputs, training=None, mask=None): # pylint: disable=redefined-outer-name if self._is_graph_network: if not self.built: self._init_graph_network(self.inputs, self.outputs, name=self.name) return super(Sequential, self).call(inputs, training=training, mask=mask) outputs = inputs # handle the corner case where self.layers is empty for layer in self.layers: # During each iteration, `inputs` are the inputs to `layer`, and `outputs` # are the outputs of `layer` applied to `inputs`. At the end of each # iteration `inputs` is set to `outputs` to prepare for the next layer. kwargs = {} argspec = self._layer_call_argspecs[layer].args if 'mask' in argspec: kwargs['mask'] = mask if 'training' in argspec: kwargs['training'] = training outputs = layer(inputs, **kwargs) # `outputs` will be the inputs to the next layer. inputs = outputs mask = outputs._keras_mask return outputs def compute_output_shape(self, input_shape): shape = input_shape for layer in self.layers: shape = layer.compute_output_shape(shape) return shape def compute_mask(self, inputs, mask): # TODO(omalleyt): b/123540974 This function is not really safe to call # by itself because it will duplicate any updates and losses in graph # mode by `call`ing the Layers again. outputs = self.call(inputs, mask=mask) return outputs._keras_mask def predict_proba(self, x, batch_size=32, verbose=0): """Generates class probability predictions for the input samples. The input samples are processed batch by batch. Arguments: x: input data, as a Numpy array or list of Numpy arrays (if the model has multiple inputs). batch_size: integer. verbose: verbosity mode, 0 or 1. Returns: A Numpy array of probability predictions. """ preds = self.predict(x, batch_size, verbose) if preds.min() < 0. or preds.max() > 1.: logging.warning('Network returning invalid probability values. ' 'The last layer might not normalize predictions ' 'into probabilities ' '(like softmax or sigmoid would).') return preds def predict_classes(self, x, batch_size=32, verbose=0): """Generate class predictions for the input samples. The input samples are processed batch by batch. Arguments: x: input data, as a Numpy array or list of Numpy arrays (if the model has multiple inputs). batch_size: integer. verbose: verbosity mode, 0 or 1. Returns: A numpy array of class predictions. """ proba = self.predict(x, batch_size=batch_size, verbose=verbose) if proba.shape[-1] > 1: return proba.argmax(axis=-1) else: return (proba > 0.5).astype('int32') def get_config(self): layer_configs = [] for layer in self.layers: layer_configs.append({ 'class_name': layer.__class__.__name__, 'config': layer.get_config() }) # When constructed using an `InputLayer` the first non-input layer may not # have the shape information to reconstruct `Sequential` as a graph network. if (self._is_graph_network and layer_configs and 'batch_input_shape' not in layer_configs[0]['config'] and isinstance(self._layers[0], input_layer.InputLayer)): batch_input_shape = self._layers[0]._batch_input_shape layer_configs[0]['config']['batch_input_shape'] = batch_input_shape config = { 'name': self.name, 'layers': copy.deepcopy(layer_configs) } if self._build_input_shape: config['build_input_shape'] = self._build_input_shape return config @classmethod def from_config(cls, config, custom_objects=None): if 'name' in config: name = config['name'] build_input_shape = config.get('build_input_shape') layer_configs = config['layers'] else: name = None build_input_shape = None layer_configs = config model = cls(name=name) for layer_config in layer_configs: layer = layer_module.deserialize(layer_config, custom_objects=custom_objects) model.add(layer) if not model.inputs and build_input_shape: model.build(build_input_shape) return model @property def input_spec(self): if self.layers and hasattr(self.layers[0], 'input_spec'): return self.layers[0].input_spec return None @property def _object_identifier(self): return '_tf_keras_sequential'
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/sequential.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the base ProcessingLayer and a subclass that uses Combiners.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import numpy as np from tensorflow.python.data.experimental.ops import cardinality from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.keras.engine import training_generator from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.ops import math_ops class PreprocessingLayer(Layer): """Base class for PreprocessingLayers.""" __metaclass__ = abc.ABCMeta @abc.abstractmethod def adapt(self, data, reset_state=True): # TODO(momernick): Add examples. """Fits the state of the preprocessing layer to the data being passed. Arguments: data: The data to train on. It can be passed either as a tf.data Dataset, or as a numpy array. reset_state: Optional argument specifying whether to clear the state of the layer at the start of the call to `adapt`, or whether to start from the existing state. This argument may not be relevant to all preprocessing layers: a subclass of PreprocessingLayer may choose to throw if 'reset_state' is set to False. """ pass class CombinerPreprocessingLayer(PreprocessingLayer): """Base class for PreprocessingLayers that do computation using a Combiner. This class provides several helper methods to make creating a PreprocessingLayer easier. It assumes that the core of your computation will be done via a Combiner object. Subclassing this class to create a PreprocessingLayer allows your layer to be compatible with distributed computation. This class is compatible with Tensorflow 2.0+. """ def __init__(self, combiner, **kwargs): super(CombinerPreprocessingLayer, self).__init__(**kwargs) self._combiner = combiner self._previously_updated = False self.state_variables = collections.OrderedDict() def _add_state_variable(self, name, shape, dtype, initializer=None, partitioner=None, use_resource=None, **kwargs): """Add a variable that can hold state which is updated during adapt(). Args: name: Variable name. shape: Variable shape. Defaults to scalar if unspecified. dtype: The type of the variable. Defaults to `self.dtype` or `float32`. initializer: initializer instance (callable). partitioner: Partitioner to be passed to the `Trackable` API. use_resource: Whether to use `ResourceVariable` **kwargs: Additional keyword arguments. Accepted values are `getter` and `collections`. Returns: The created variable. """ weight = self.add_weight( name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=None, trainable=False, constraint=None, partitioner=partitioner, use_resource=use_resource, **kwargs) # TODO(momernick): Do not allow collisions here. self.state_variables[name] = weight return weight def _restore_updates(self): """Recreates a dict of updates from the layer's weights.""" data_dict = {} for name, var in self.state_variables.items(): data_dict[name] = var.numpy() return data_dict def _dataset_is_infinite(self, dataset): """True if the passed dataset is infinite.""" return math_ops.equal( cardinality.cardinality(dataset), cardinality.INFINITE) def _get_dataset_iterator(self, dataset): """Gets an iterator from a tf.data.Dataset.""" return dataset_ops.make_one_shot_iterator(dataset).get_next def adapt(self, data, reset_state=True): """Fits the state of the preprocessing layer to the data being passed. Arguments: data: The data to train on. It can be passed either as a tf.data Dataset, or as a numpy array. reset_state: Optional argument specifying whether to clear the state of the layer at the start of the call to `adapt`, or whether to start from the existing state. Subclasses may choose to throw if reset_state is set to 'False'. """ if reset_state: accumulator = None else: accumulator = self._combiner.restore(self._restore_updates()) if not isinstance(data, (dataset_ops.DatasetV2, np.ndarray)): raise ValueError( 'adapt() requires a Dataset or a Numpy array as input, got {}'.format( type(data))) if isinstance(data, dataset_ops.DatasetV2): # Validate the datasets to try and ensure we haven't been passed one with # infinite size. That would cause an infinite loop here. if self._dataset_is_infinite(data): raise ValueError( 'The dataset passed to "adapt()" has an infinite number of ' 'elements. Please use dataset.take(...) to make the number ' 'of elements finite.') next_data = self._get_dataset_iterator(data) else: generator, _ = training_generator.convert_to_generator_like( data, batch_size=len(data)) # If the data is not a dataset, we can iterate over it using next(foo); # here, we wrap that into a callable. next_data = lambda: next(generator) # TODO(momernick): Some sort of status bar? # TODO(momernick): Implement parallel processing here? try: data_element = next_data() # First, see if the layer is built or not. If it is not, then we must # build it. if not self.built: try: # If this is a Numpy array or tensor, we can get shape from .shape. # If not, an attribute error will be thrown (and we can assume the # input data is a scalar with shape None. shape = data_element.shape except AttributeError: shape = None self.build(shape) # Once we have built the Layer, we can process the input data. We do so # until we've gotten an exception indicating that we have no more data. while True: accumulator = self._combiner.compute(data_element, accumulator) data_element = next_data() # Note that this belongs to the outer indentation of 'try' - we need to # catch exceptions resulting from the first 'next_data()' invocation as # well. except (StopIteration, errors.OutOfRangeError): pass updates = self._combiner.extract(accumulator) self._set_state_variables(updates) def _set_state_variables(self, updates): """Directly update the internal state of this Layer. This method expects a string-keyed dict of {state_variable_name: state}. The precise nature of the state, and the names associated, are describe by the subclasses of CombinerPreprocessingLayer. Args: updates: A string keyed dict of weights to update. Raises: RuntimeError: if 'build()' was not called before 'set_processing_state'. """ # TODO(momernick): Do we need to do any more input sanitization? if not self.built: raise RuntimeError('_set_state_variables() must be called after build().') with ops.init_scope(): for var_name, value in updates.items(): self.state_variables[var_name].assign(value) class Combiner(object): """Functional object that defines a shardable computation. This object defines functions required to create and manipulate data objects. These data objects, referred to below as 'accumulators', are computation- specific and may be implemented alongside concrete subclasses of Combiner (if necessary - some computations may be simple enough that standard Python types can be used as accumulators). The intent for this class is that by describing computations in this way, we can arbitrarily shard a dataset, perform computations on a subset, and then merge the computation into a final result. This enables distributed computation. The combiner itself does not own any state - all computational state is owned by the accumulator objects. This is so that we can have an arbitrary number of Combiners (thus sharding the computation N ways) without risking any change to the underlying computation. These accumulator objects are uniquely associated with each Combiner; a Combiner defines what the accumulator object should be and will only work with accumulators of that type. """ __metaclass__ = abc.ABCMeta def __repr__(self): return '<{}>'.format(self.__class__.__name__) @abc.abstractmethod def compute(self, batch_values, accumulator=None): """Compute a step in this computation, returning a new accumulator. This method computes a step of the computation described by this Combiner. If an accumulator is passed, the data in that accumulator is also used; so compute(batch_values) results in f(batch_values), while compute(batch_values, accumulator) results in merge(f(batch_values), accumulator). Args: batch_values: A list of ndarrays representing the values of the inputs for this step of the computation. accumulator: the current accumulator. Can be None. Returns: An accumulator that includes the passed batch of inputs. """ pass @abc.abstractmethod def merge(self, accumulators): """Merge several accumulators to a single accumulator. This method takes the partial values in several accumulators and combines them into a single accumulator. This computation must not be order-specific (that is, merge([a, b]) must return the same result as merge([b, a]). Args: accumulators: the accumulators to merge, as a list. Returns: A merged accumulator. """ pass @abc.abstractmethod def extract(self, accumulator): """Convert an accumulator into a dict of output values. Args: accumulator: The accumulator to convert. Returns: A dict of ndarrays representing the data in this accumulator. """ pass @abc.abstractmethod def restore(self, output): """Create an accumulator based on 'output'. This method creates a new accumulator with identical internal state to the one used to create the data in 'output'. This means that if you do output_data = combiner.extract(accumulator_1) accumulator_2 = combiner.restore(output_data) then accumulator_1 and accumulator_2 will have identical internal state, and computations using either of them will be equivalent. Args: output: The data output from a previous computation. Should be in the same form as provided by 'extract_output'. Returns: A new accumulator. """ pass @abc.abstractmethod def serialize(self, accumulator): """Serialize an accumulator for a remote call. This function serializes an accumulator to be sent to a remote process. Args: accumulator: The accumulator to serialize. Returns: A byte string representing the passed accumulator. """ pass @abc.abstractmethod def deserialize(self, encoded_accumulator): """Deserialize an accumulator received from 'serialize()'. This function deserializes an accumulator serialized by 'serialize()'. Args: encoded_accumulator: A byte string representing an accumulator. Returns: The accumulator represented by the passed byte_string. """ pass
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/base_preprocessing_layer.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for model.fit calls with a Dataset object passed as validation_data.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys from absl.testing import parameterized import numpy as np import six from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.layers import core from tensorflow.python.platform import test @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class ValidationDatasetNoLimitTest(keras_parameterized.TestCase): def create_dataset(self, num_samples, batch_size): input_data = np.random.rand(num_samples, 1) expected_data = input_data * 3 dataset = dataset_ops.Dataset.from_tensor_slices((input_data, expected_data)) return dataset.shuffle(10 * batch_size).batch(batch_size) def test_validation_dataset_with_no_step_arg(self): # Create a model that learns y=Mx. layers = [core.Dense(1)] model = testing_utils.get_model_from_layers(layers, input_shape=(1,)) model.compile(loss="mse", optimizer="adam", metrics=["mean_absolute_error"]) train_dataset = self.create_dataset(num_samples=200, batch_size=10) eval_dataset = self.create_dataset(num_samples=50, batch_size=25) history = model.fit(x=train_dataset, validation_data=eval_dataset, epochs=2) evaluation = model.evaluate(x=eval_dataset) # If the fit call used the entire dataset, then the final val MAE error # from the fit history should be equal to the final element in the output # of evaluating the model on the same eval dataset. self.assertAlmostEqual(history.history["val_mean_absolute_error"][-1], evaluation[-1], places=5) class PrintTrainingInfoTest(keras_parameterized.TestCase, parameterized.TestCase): @test_util.run_v1_only("Only relevant in graph mode.") def test_print_info_with_datasets(self): """Print training info should work with val datasets (b/133391839).""" model = keras.models.Sequential([keras.layers.Dense(1, input_shape=(1,))]) model.compile(loss="mse", optimizer="sgd") dataset = dataset_ops.Dataset.from_tensors( ([1.], [1.])).repeat(100).batch(10) val_dataset = dataset_ops.Dataset.from_tensors( ([1.], [1.])).repeat(50).batch(10) mock_stdout = six.StringIO() with test.mock.patch.object(sys, "stdout", mock_stdout): model.fit(dataset, epochs=2, validation_data=val_dataset) self.assertIn( "Train on 10 steps, validate on 5 steps", mock_stdout.getvalue()) @parameterized.named_parameters( ("with_validation", True), ("without_validation", False)) @test_util.run_v1_only("Only relevant in graph mode.") def test_print_info_with_numpy(self, do_validation): """Print training info should work with val datasets (b/133391839).""" model = keras.models.Sequential([keras.layers.Dense(1, input_shape=(2,))]) model.compile(loss="mse", optimizer="sgd") dataset = np.arange(200).reshape(100, 2) if do_validation: val_data = (np.arange(100).reshape(50, 2), np.arange(50).reshape(50, 1)) else: val_data = None mock_stdout = six.StringIO() with test.mock.patch.object(sys, "stdout", mock_stdout): model.fit(dataset, batch_size=10, epochs=2, validation_data=val_data) self.assertIn("Train on 100 samples", mock_stdout.getvalue()) if do_validation: self.assertIn(", validate on 50 samples", mock_stdout.getvalue()) @keras_parameterized.run_all_keras_modes def test_dict_float64_input(self): class MyModel(keras.Model): def __init__(self): super(MyModel, self).__init__(self) self.dense1 = keras.layers.Dense(10, activation="relu") self.dense2 = keras.layers.Dense(10, activation="relu") self.concat = keras.layers.Concatenate() self.dense3 = keras.layers.Dense(1, activation="sigmoid") def call(self, inputs): d1 = self.dense1(inputs["one"]) d2 = self.dense2(inputs["two"]) concat = self.concat([d1, d2]) return self.dense3(concat) model = MyModel() model.compile( loss="mae", optimizer="adam", run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit( x={ "one": np.random.rand(100, 10, 1), "two": np.random.rand(100, 10, 1) }, y=np.random.rand(100, 10, 1)) def test_dict_validation_input(self): """Test case for GitHub issue 30122.""" train_input_0 = np.random.rand(1000, 1) train_input_1 = np.random.rand(1000, 1) train_labels = np.random.rand(1000, 1) val_input_0 = np.random.rand(1000, 1) val_input_1 = np.random.rand(1000, 1) val_labels = np.random.rand(1000, 1) input_0 = keras.Input(shape=(None,), name="input_0") input_1 = keras.Input(shape=(None,), name="input_1") class my_model(keras.Model): def __init__(self): super(my_model, self).__init__(self) self.hidden_layer_0 = keras.layers.Dense(100, activation="relu") self.hidden_layer_1 = keras.layers.Dense(100, activation="relu") self.concat = keras.layers.Concatenate() self.out_layer = keras.layers.Dense(1, activation="sigmoid") def call(self, inputs=[input_0, input_1]): activation_0 = self.hidden_layer_0(inputs["input_0"]) activation_1 = self.hidden_layer_1(inputs["input_1"]) concat = self.concat([activation_0, activation_1]) return self.out_layer(concat) model = my_model() model.compile(loss="mae", optimizer="adam") model.fit( x={ "input_0": train_input_0, "input_1": train_input_1 }, y=train_labels, validation_data=({ "input_0": val_input_0, "input_1": val_input_1 }, val_labels)) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/training_arrays_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Training related logic for Keras model in TF 2.0 context. Note that all the code under this module is under active development, please DO NOT use it unless you are really sure what you are doing. """ # pylint: disable=protected-access from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.eager import def_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_util from tensorflow.python.framework.ops import composite_tensor from tensorflow.python.keras import backend from tensorflow.python.keras.distribute import distributed_training_utils as dist_utils from tensorflow.python.keras.engine import training_eager from tensorflow.python.keras.engine import training_utils from tensorflow.python.keras.utils.mode_keys import ModeKeys from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.util import nest def _get_or_make_execution_function(model, mode): """Makes or reuses function to run one step of distributed model execution.""" model._init_distributed_function_cache_if_not_compiled() # Use a key with 'v2' to distinguish from fall-back execution functions. key = (mode, 'v2') distributed_function = dist_utils.get_distributed_function(model, key) if distributed_function: return distributed_function distribution_function = _make_execution_function(model, mode) dist_utils.set_distributed_function(model, key, distribution_function) return distribution_function def _make_execution_function(model, mode): """Creates a function to run one step of distributed model execution.""" per_replica_function = _make_replica_execution_function(model, mode) def distributed_function(input_iterator): """A single step of the distributed execution across replicas.""" x, y, sample_weights = _prepare_feed_values( model, input_iterator, mode) # Call `Model.{train,test,predict}_on_batch` on every replica passing # PerReplicas as arguments. On every replica inside this call, each # PerReplica object will return the value for that replica. The outputs # are PerReplicas too. strategy = distribution_strategy_context.get_strategy() outputs = strategy.experimental_run_v2( per_replica_function, args=(x, y, sample_weights)) # Out of PerReplica outputs reduce or pick values to return. all_outputs = dist_utils.unwrap_output_dict( strategy, outputs, mode) return all_outputs if not model.run_eagerly: distributed_function = def_function.function( distributed_function, autograph=False) def execution_function(input_fn): # `numpy` translates Tensors to values in Eager mode. return nest.map_structure(_non_none_constant_value, distributed_function(input_fn)) return execution_function def _non_none_constant_value(v): constant_value = tensor_util.constant_value(v) return constant_value if constant_value is not None else v def _prepare_feed_values(model, inputs, mode): """Prepare feed values to the model execution function. Arguments: model: Model to prepare feed values for. inputs: An iterator of model inputs, targets, and sample_weights. model inputs may be lists, single values, or dicts mapping input feed names to values. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. Returns: Feed values for the model in the given mode. This is a tuple of the structure (inputs, targets, sample_weights), where each of (tuple, targets, sample_weights) may be a python list. Single values for inputs will always be wrapped in lists. """ inputs, targets, sample_weights = _get_input_from_iterator(inputs) # When the inputs are dict, then we want to flatten it in the same order as # the input layers, such that the data are fed into the input layers in the # correct order. if isinstance(inputs, dict): inputs = [inputs[key] for key in model._feed_input_names] else: inputs = training_utils.ModelInputs(inputs).as_list() if mode == ModeKeys.PREDICT: sample_weights = [] targets = [] ins = [inputs, targets, sample_weights] return tuple(ins) def _get_input_from_iterator(iterator): """Get elements from the iterator and verify the input shape and type.""" next_element = next(iterator) if (tensor_util.is_tensor(next_element) or isinstance(next_element, (dict, composite_tensor.CompositeTensor))): next_element = [next_element] if len(next_element) == 1: x, = next_element y = None sample_weights = None elif len(next_element) == 2: x, y = next_element sample_weights = None else: x, y, sample_weights = next_element # Validate that all the elements in x and y are of the same type and shape. dist_utils.validate_distributed_dataset_inputs( distribution_strategy_context.get_strategy(), x, y, sample_weights) return x, y, sample_weights def _make_replica_execution_function(model, mode): """A single step of the distributed execution on a replica.""" if mode == ModeKeys.TRAIN: func = functools.partial(train_on_batch, model) elif mode == ModeKeys.TEST: func = functools.partial(test_on_batch, model) else: def _predict_on_batch(x, y=None, sample_weights=None): del y, sample_weights return predict_on_batch(model, x) func = _predict_on_batch if mode != ModeKeys.PREDICT: # `reset_metrics` is set to False to maintain stateful metrics across # batch-level calls. func = functools.partial(func, reset_metrics=False) return func def _prepare_model_with_inputs(model, dataset): """Use the data from the adapter to config the model. Model need to be properly configured before training, eg build with inputs, or compile with inputs for subclass model. Args: model: a Keras model object. dataset: a eager dataset instance where the data will be extracted. """ if not model.inputs: inputs, target, _ = model._build_model_with_inputs(dataset, targets=None) else: inputs, target, _ = _get_input_from_iterator(iter(dataset)) if not model._is_compiled and model.optimizer: model._compile_from_inputs(inputs, target, dataset, None) if target is not None: training_utils.prepare_sample_weight_modes(model._training_endpoints, model.sample_weight_mode) def train_on_batch( model, x, y=None, sample_weight=None, class_weight=None, reset_metrics=True): """Runs a single gradient update on a single batch of data. Arguments: model: The model to train. x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset `y` should not be specified (since targets will be obtained from the iterator). sample_weight: Optional array of the same length as x, containing weights to apply to the model's loss for each sample. In the case of temporal data, you can pass a 2D array with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. In this case you should make sure to specify sample_weight_mode="temporal" in compile(). This argument is not supported when `x` is a dataset. class_weight: Optional dictionary mapping class indices (integers) to a weight (float) to apply to the model's loss for the samples from this class during training. This can be useful to tell the model to "pay more attention" to samples from an under-represented class. reset_metrics: If `True`, the metrics returned will be only for this batch. If `False`, the metrics will be statefully accumulated across batches. Returns: Scalar training loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: In case of invalid user-provided arguments. """ model._assert_compile_was_called() # TODO(scottzhu): Standardization should happen in the data handlers, ## not on a per batch basis in the *_on_batch methods # Validate and standardize user data. x, y, sample_weights = model._standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight, extract_tensors_from_dataset=True) batch_size = array_ops.shape(nest.flatten(x, expand_composites=True)[0])[0] # If `model._distribution_strategy` is True, then we are in a replica context # at this point because of the check above. `train_on_batch` is being run # for each replica by `model._distribution_strategy` and the same code path # as Eager is expected to be taken. outputs = training_eager.train_on_batch( model, x, y, sample_weights=sample_weights, output_loss_metrics=model._output_loss_metrics) if reset_metrics: model.reset_metrics() outputs['batch_size'] = math_ops.cast(batch_size, dtypes.int64) return outputs def test_on_batch(model, x, y=None, sample_weight=None, reset_metrics=True): """Test the model on a single batch of samples. Arguments: model: The model to test. x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset, `y` should not be specified (since targets will be obtained from the iterator). sample_weight: Optional array of the same length as x, containing weights to apply to the model's loss for each sample. In the case of temporal data, you can pass a 2D array with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. In this case you should make sure to specify sample_weight_mode="temporal" in compile(). This argument is not supported when `x` is a dataset. reset_metrics: If `True`, the metrics returned will be only for this batch. If `False`, the metrics will be statefully accumulated across batches. Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: In case of invalid user-provided arguments. """ model._assert_compile_was_called() # TODO(scottzhu): Standardization should happen in the data handlers, ## not on a per batch basis in the *_on_batch methods # Validate and standardize user data. x, y, sample_weights = model._standardize_user_data( x, y, sample_weight=sample_weight, extract_tensors_from_dataset=True) batch_size = array_ops.shape(nest.flatten(x, expand_composites=True)[0])[0] outputs = training_eager.test_on_batch( model, x, y, sample_weights=sample_weights, output_loss_metrics=model._output_loss_metrics) if reset_metrics: model.reset_metrics() outputs['batch_size'] = math_ops.cast(batch_size, dtypes.int64) return outputs def predict_on_batch(model, x): """Returns predictions for a single batch of samples. Arguments: model: The model to predict with. x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A `tf.data` dataset. Returns: Numpy array(s) of predictions. Raises: ValueError: In case of mismatch between given number of inputs and expectations of the model. """ # TODO(scottzhu): Standardization should happen in the data handlers, ## not on a per batch basis in the *_on_batch methods # Validate and standardize user data. inputs, _, _ = model._standardize_user_data( x, extract_tensors_from_dataset=True) # If `model._distribution_strategy` is True, then we are in a replica context # at this point. inputs = training_utils.cast_if_floating_dtype(inputs) if isinstance(inputs, collections.Sequence): # Unwrap lists with only one input, as we do when training on batch if len(inputs) == 1: inputs = inputs[0] with backend.eager_learning_phase_scope(0): return model(inputs) # pylint: disable=not-callable
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/training_v2_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Training-related utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import atexit import collections from collections import OrderedDict import multiprocessing.pool import threading import time import numpy as np import six from six.moves import zip # pylint: disable=redefined-builtin from tensorflow.python import tf2 from tensorflow.python.data.experimental.ops import cardinality from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.data.ops import readers from tensorflow.python.eager import context from tensorflow.python.framework import composite_tensor_utils from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend as K from tensorflow.python.keras import callbacks as cbks from tensorflow.python.keras import losses from tensorflow.python.keras import metrics as metrics_module from tensorflow.python.keras.utils import generic_utils from tensorflow.python.keras.utils import losses_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.losses import util as tf_losses_utils from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest from tensorflow.python.util.compat import collections_abc @six.add_metaclass(abc.ABCMeta) class Aggregator(object): """Abstract base class used to aggregate batch-level outputs of a loop. Attributes: use_steps: Whether the loop is using `step` or `batch_size`. num_samples: Total number of samples: `batch_size * num_batches`. steps: Total number of steps. batch_size: Batch size. It is used for validation checks between inputs and outputs. results: What to return at the end of the aggregation loop. """ def __init__(self, use_steps, num_samples=None, steps=None, batch_size=None): self.use_steps = use_steps self.num_samples = num_samples self.steps = steps self.batch_size = batch_size self.results = [] @abc.abstractmethod def create(self, batch_outs): """Creates the initial results from the first batch outputs. Arguments: batch_outs: A list of batch-level outputs. """ raise NotImplementedError('Must be implemented in subclasses.') @abc.abstractmethod def aggregate(self, batch_outs, batch_start=None, batch_end=None): """Aggregates batch-level results into total results. Arguments: batch_outs: A list of batch-level outputs. batch_start: The start index of this batch. Always `None` if `use_steps` is `True`. batch_end: The end index of this batch. Always `None` if `use_steps` is `True`. """ raise NotImplementedError('Must be implemented in subclasses.') @abc.abstractmethod def finalize(self): """Prepares the total results to be returned.""" raise NotImplementedError('Must be implemented in subclasses.') class MetricsAggregator(Aggregator): """Aggregator that calculates loss and metrics info. Attributes: use_steps: Whether the loop is using `step` or `batch_size`. num_samples: Total number of samples: `batch_size*num_batches`. steps: Total number of steps, ie number of times to iterate over a dataset to cover all samples. """ def __init__(self, use_steps, num_samples=None, steps=None): super(MetricsAggregator, self).__init__( use_steps=use_steps, num_samples=num_samples, steps=steps, batch_size=None) def create(self, batch_outs): self.results = [0.] * len(batch_outs) def aggregate(self, batch_outs, batch_start=None, batch_end=None): # Loss. if self.use_steps: self.results[0] += batch_outs[0] else: self.results[0] += batch_outs[0] * (batch_end - batch_start) # Metrics (always stateful, just grab current values.) self.results[1:] = batch_outs[1:] def finalize(self): if not self.results: raise ValueError('Empty training data.') self.results[0] /= (self.num_samples or self.steps) class ConcatAggregator(Aggregator): """Combine tensor-likes which cannot be merged on the fly. This class expects to aggregate a single tensor-like rather than a nested structure of tensor-likes. """ def __init__(self, batch_size): self.composite = None super(ConcatAggregator, self).__init__( use_steps=True, num_samples=None, steps=None, batch_size=batch_size) def create(self, batch_element): self.composite = composite_tensor_utils.is_composite_or_composite_value( batch_element) def aggregate(self, batch_element, batch_start=None, batch_end=None): # TODO(psv): Add num_samples check here to detect when output batch # #samples is < batch size and != input batch #samples. if self.batch_size and self.batch_size < batch_element.shape[0]: raise ValueError( 'Mismatch between expected batch size and model output batch size. ' 'Output shape = {}, expected output shape = shape {}'.format( batch_element.shape, (self.batch_size,) + batch_element.shape[1:])) self.results.append(batch_element) def finalize(self): # Special case of single batch inference which skips a copy. if len(self.results) == 1: self.results = self.results[0] elif self.composite: # TODO(taylorrobie): efficiently concatenate. results = self.results[0] for r in self.results[1:]: results = composite_tensor_utils.append_composite_tensor(results, r) self.results = results else: self.results = np.concatenate(self.results, axis=0) if isinstance(self.results, ops.EagerTensor): self.results = self.results._numpy() # pylint: disable=protected-access _COPY_THREADS = 4 _COPY_POOL = None def get_copy_pool(): """Shared threadpool for copying arrays. Pool instantiation takes ~ 2ms, so a singleton pool is used rather than creating a pool per SliceAggregator. Returns: The global copy threadpool. """ global _COPY_POOL if _COPY_POOL is None: _COPY_POOL = multiprocessing.pool.ThreadPool(_COPY_THREADS) atexit.register(_COPY_POOL.close) return _COPY_POOL class SliceAggregator(Aggregator): """Combine arrays where the final size is known. This class expects to aggregate a single tensor-like rather than a nested structure of tensor-likes. NumPy copies are an operation that threads handle quite well because all of the heavy lifting is in c and does not need the GIL. Moreover, we can perform lock-free writes to the same buffer in multiple threads because the nature of result aggregation guarantees that either the indices are disjoint or the aggregator will throw an exception in finalize. Moreover, because aggregation is performed on the slowest varying dimension, assignments for a given batch will write to contiguous blocks of memory, further minimizing contention. There is, however, some scheduling and context switching overhead which will offset the gains from pipelining the slice assignment. Below a given threshold it is faster to simply assign in the main thread rather than enqueue the assigmnet in a side thread. The exact threshold will vary from system to system, but the time is not very sensitive to the exact transition so a value of 2 ** 14 was chosen which should be reasonable on most systems. """ _BINARY_SIZE_THRESHOLD = 2 ** 14 _MAX_COPY_SECONDS = 300 def __init__(self, num_samples, batch_size): self._async_copies = [] self._pool = get_copy_pool() self._errors = [] super(SliceAggregator, self).__init__( use_steps=False, num_samples=num_samples, steps=None, batch_size=batch_size) def create(self, batch_element): # This step does not need to be pipelined because NumPy empty array # initialization is effectively instantaneous. shape = (self.num_samples,) + batch_element.shape[1:] dtype = batch_element.dtype if isinstance(batch_element, ops.EagerTensor): dtype = dtype.as_numpy_dtype() self.results = np.empty(shape=shape, dtype=dtype) def aggregate(self, batch_element, batch_start, batch_end): # Fail early. if self._errors: six.reraise(type(self._errors[0]), self._errors[0]) # In the special case of single batch inference, no copy is needed. if batch_end - batch_start == self.num_samples: if self.num_samples != batch_element.shape[0]: raise ValueError( 'Mismatch between expected batch size and model output batch size. ' 'Output shape = {}, expected output shape = shape {}'.format( batch_element.shape, self.results.shape)) self.results = batch_element return # This is an approximate threshold, so we don't need to consider the number # of bytes per element. num_elements = np.prod(batch_element.shape) if num_elements < self._BINARY_SIZE_THRESHOLD: self.results[batch_start:batch_end] = batch_element else: is_finished = threading.Event() self._pool.apply_async( self._slice_assign, args=(batch_element, batch_start, batch_end, is_finished)) self._async_copies.append(is_finished) def _slice_assign(self, batch_element, batch_start, batch_end, is_finished): try: self.results[batch_start:batch_end] = batch_element except Exception as e: # pylint: disable=broad-except # `_slice_assign` should only be called in threads and exceptions raised # in threads do not carry over to the main thread. So instead we perform a # a broad catch in the thread and then store the exception to be re-raised # in the main thread. self._errors.append(e) finally: is_finished.set() def finalize(self): start_time = time.time() for is_finished in self._async_copies: timeout = max([0., self._MAX_COPY_SECONDS - (time.time() - start_time)]) if not is_finished.wait(timeout): raise ValueError('Timed out waiting for copy to complete.') if self._errors: six.reraise(self._errors[0].__class__, self._errors[0]) class OutputsAggregator(Aggregator): """Aggregator that concatenates outputs.""" _structure = None def create(self, batch_outs): # SparseTensorValue is a named tuple which nest will flatten, so we need # to guard it to properly handle the structure. self._structure = nest.get_traverse_shallow_structure( lambda x: not composite_tensor_utils.is_composite_or_composite_value(x), batch_outs) batch_outs = nest.flatten_up_to(self._structure, batch_outs) for batch_element in batch_outs: if composite_tensor_utils.is_composite_or_composite_value(batch_element): # If the output is not a ndarray, it will be either a composite tensor # or a composite tensor's Value object. In either case, we can't # allocate an array to hold the object - we'll handle it later. self.results.append(ConcatAggregator(self.batch_size)) elif isinstance(batch_element, (np.ndarray, ops.EagerTensor)): self.results.append( (ConcatAggregator(self.batch_size) if self.use_steps else SliceAggregator(self.num_samples, self.batch_size))) else: # This is not a ndarray, a CompositeTensor, or a CompositeTensorValue. # Fail fast rather than trying to concatenate it. raise RuntimeError('Attempted to aggregate unsupported object {}.' .format(batch_element)) self.results[-1].create(batch_element) def aggregate(self, batch_outs, batch_start=None, batch_end=None): batch_outs = nest.flatten_up_to(self._structure, batch_outs) for batch_element, result in zip(batch_outs, self.results): result.aggregate(batch_element, batch_start, batch_end) def finalize(self): for result in self.results: result.finalize() self.results = [i.results for i in self.results] self.results = nest.pack_sequence_as(self._structure, self.results) def get_progbar(model, count_mode): """Get Progbar.""" stateful_metric_names = None if hasattr(model, 'metrics_names'): stateful_metric_names = model.metrics_names[1:] # Exclude `loss` return cbks.ProgbarLogger(count_mode, stateful_metrics=stateful_metric_names) def slice_arrays(arrays, indices, contiguous=True): """Slices batches out of provided arrays (workaround for eager tensors). Unfortunately eager tensors don't have the same slicing behavior as Numpy arrays (they follow the same slicing behavior as symbolic TF tensors), hence we cannot use `generic_utils.slice_arrays` directly and we have to implement this workaround based on `concat`. This has a performance cost. Arguments: arrays: Single array or list of arrays. indices: List of indices in the array that should be included in the output batch. contiguous: Boolean flag indicating whether the indices are contiguous. Returns: Slice of data (either single array or list of arrays). """ converted_to_list = False if not isinstance(arrays, list): converted_to_list = True arrays = [arrays] if any(tensor_util.is_tensor(x) for x in arrays): if not contiguous: entries = [[x[i:i + 1] for i in indices] for x in arrays] slices = [array_ops.concat(x, axis=0) for x in entries] else: slices = [x[indices[0]:indices[-1] + 1] for x in arrays] else: slices = generic_utils.slice_arrays(arrays, indices) if converted_to_list: slices = slices[0] return slices def check_num_samples(ins, batch_size=None, steps=None, steps_name='steps'): """Determine the number of samples provided for training and evaluation. The number of samples is not defined when running with `steps`, in which case the number of samples is set to `None`. Arguments: ins: List of tensors to be fed to the Keras function. batch_size: Integer batch size or `None` if not defined. steps: Total number of steps (batches of samples) before declaring `_predict_loop` finished. Ignored with the default value of `None`. steps_name: The public API's parameter name for `steps`. Raises: ValueError: when `steps` is `None` and the attribute `ins.shape` does not exist. Also raises ValueError when `steps` is not `None` and `batch_size` is not `None` because they are mutually exclusive. Returns: When steps is `None`, returns the number of samples to be processed based on the size of the first dimension of the first input numpy array. When steps is not `None` and `batch_size` is `None`, returns `None`. """ if steps is not None and batch_size is not None: raise ValueError('If ' + steps_name + ' is set, the `batch_size` must be None.') if check_steps_argument(ins, steps, steps_name): return None if hasattr(ins[0], 'shape'): return int(ins[0].shape[0]) return None # Edge case where ins == [static_learning_phase] def standardize_single_array(x, expected_shape=None): """Expand data of shape (x,) to (x, 1), unless len(expected_shape)==1.""" if x is None: return None if composite_tensor_utils.is_composite_or_composite_value(x): return x if isinstance(x, int): raise ValueError( 'Expected an array data type but received an integer: {}'.format(x)) if (x.shape is not None and len(x.shape) == 1 and (expected_shape is None or len(expected_shape) != 1)): if tensor_util.is_tensor(x): x = array_ops.expand_dims(x, axis=1) else: x = np.expand_dims(x, 1) return x def standardize_input_data(data, names, shapes=None, check_batch_axis=True, exception_prefix=''): """Normalizes inputs and targets provided by users. Users may pass data as a list of arrays, dictionary of arrays, or as a single array. We normalize this to an ordered list of arrays (same order as `names`), while checking that the provided arrays have shapes that match the network's expectations. Arguments: data: User-provided input data (polymorphic). names: List of expected array names. shapes: Optional list of expected array shapes. check_batch_axis: Boolean; whether to check that the batch axis of the arrays matches the expected value found in `shapes`. exception_prefix: String prefix used for exception formatting. Returns: List of standardized input arrays (one array per model input). Raises: ValueError: in case of improperly formatted user-provided data. """ try: data_len = len(data) except TypeError: # For instance if data is `None` or a symbolic Tensor. data_len = None if not names: if data_len and not isinstance(data, dict): raise ValueError( 'Error when checking model ' + exception_prefix + ': ' 'expected no data, but got:', data) return [] if data is None: return [None for _ in range(len(names))] if isinstance(data, dict): try: data = [ data[x].values if data[x].__class__.__name__ == 'DataFrame' else data[x] for x in names ] except KeyError as e: raise ValueError('No data provided for "' + e.args[0] + '". Need data ' 'for each key in: ' + str(names)) elif isinstance(data, (list, tuple)): if isinstance(data[0], (list, tuple)): data = [np.asarray(d) for d in data] elif len(names) == 1 and isinstance(data[0], (float, int)): data = [np.asarray(data)] else: data = [ x.values if x.__class__.__name__ == 'DataFrame' else x for x in data ] else: data = data.values if data.__class__.__name__ == 'DataFrame' else data data = [data] if shapes is not None: data = [ standardize_single_array(x, shape) for (x, shape) in zip(data, shapes) ] else: data = [standardize_single_array(x) for x in data] if len(data) != len(names): if data and hasattr(data[0], 'shape'): raise ValueError('Error when checking model ' + exception_prefix + ': the list of Numpy arrays that you are passing to ' 'your model is not the size the model expected. ' 'Expected to see ' + str(len(names)) + ' array(s), ' 'but instead got the following list of ' + str(len(data)) + ' arrays: ' + str(data)[:200] + '...') elif len(names) > 1: raise ValueError('Error when checking model ' + exception_prefix + ': you are passing a list as input to your model, ' 'but the model expects a list of ' + str(len(names)) + ' Numpy arrays instead. The list you passed was: ' + str(data)[:200]) elif len(data) == 1 and not hasattr(data[0], 'shape'): raise TypeError('Error when checking model ' + exception_prefix + ': data should be a Numpy array, or list/dict of ' 'Numpy arrays. Found: ' + str(data)[:200] + '...') elif len(names) == 1: data = [np.asarray(data)] # Check shapes compatibility. if shapes: for i in range(len(names)): if shapes[i] is not None: if tensor_util.is_tensor(data[i]): tensorshape = data[i].shape if not tensorshape: continue data_shape = tuple(tensorshape.as_list()) elif composite_tensor_utils.is_composite_or_composite_value(data[i]): tensorshape = composite_tensor_utils.get_shape(data[i]) data_shape = tuple(tensorshape.as_list()) else: data_shape = data[i].shape shape = shapes[i] if len(data_shape) != len(shape): raise ValueError('Error when checking ' + exception_prefix + ': expected ' + names[i] + ' to have ' + str(len(shape)) + ' dimensions, but got array ' 'with shape ' + str(data_shape)) if not check_batch_axis: data_shape = data_shape[1:] shape = shape[1:] for dim, ref_dim in zip(data_shape, shape): if ref_dim != dim and ref_dim is not None and dim is not None: raise ValueError('Error when checking ' + exception_prefix + ': expected ' + names[i] + ' to have shape ' + str(shape) + ' but got array with shape ' + str(data_shape)) return data def standardize_sample_or_class_weights(x_weight, output_names, weight_type): """Maps `sample_weight` or `class_weight` to model outputs. Arguments: x_weight: User-provided `sample_weight` or `class_weight` argument. output_names: List of output names (strings) in the model. weight_type: A string used purely for exception printing. Returns: A list of `sample_weight` or `class_weight` where there are exactly one element per model output. Raises: ValueError: In case of invalid user-provided argument. """ if x_weight is None or (isinstance(x_weight, (list, tuple)) and len(x_weight) == 0): # pylint: disable=g-explicit-length-test return [None for _ in output_names] if len(output_names) == 1: if isinstance(x_weight, (list, tuple)) and len(x_weight) == 1: return x_weight if isinstance(x_weight, dict) and output_names[0] in x_weight: return [x_weight[output_names[0]]] else: return [x_weight] if isinstance(x_weight, (list, tuple)): if len(x_weight) != len(output_names): raise ValueError('Provided `' + weight_type + '` was a list of ' + str(len(x_weight)) + ' elements, but the model has ' + str(len(output_names)) + ' outputs. ' 'You should provide one `' + weight_type + '`' 'array per model output.') return x_weight if isinstance(x_weight, collections.Mapping): generic_utils.check_for_unexpected_keys(weight_type, x_weight, output_names) x_weights = [] for name in output_names: x_weights.append(x_weight.get(name)) return x_weights else: raise TypeError('The model has multiple outputs, so `' + weight_type + '` ' 'should be either a list or a dict. ' 'Provided `' + weight_type + '` type not understood: ' + str(x_weight)) def standardize_class_weights(class_weight, output_names): return standardize_sample_or_class_weights(class_weight, output_names, 'class_weight') def standardize_sample_weights(sample_weight, output_names): return standardize_sample_or_class_weights(sample_weight, output_names, 'sample_weight') def check_array_lengths(inputs, targets, weights=None): """Does user input validation for numpy arrays. Arguments: inputs: list of Numpy arrays of inputs. targets: list of Numpy arrays of targets. weights: list of Numpy arrays of sample weights. Raises: ValueError: in case of incorrectly formatted data. """ def is_tensor_or_composite_tensor(x): return tensor_util.is_tensor( x) or composite_tensor_utils.is_composite_or_composite_value(x) def set_of_lengths(x): # Returns a set with the variation between # different shapes, with None => 0 if x is None: return {} else: return set([ y.shape[0] for y in x if y is not None and not is_tensor_or_composite_tensor(y) ]) set_x = set_of_lengths(inputs) set_y = set_of_lengths(targets) set_w = set_of_lengths(weights) if len(set_x) > 1: raise ValueError('All input arrays (x) should have ' 'the same number of samples. Got array shapes: ' + str([x.shape for x in inputs])) if len(set_y) > 1: raise ValueError('All target arrays (y) should have ' 'the same number of samples. Got array shapes: ' + str([y.shape for y in targets])) if set_x and set_y and list(set_x)[0] != list(set_y)[0]: raise ValueError('Input arrays should have ' 'the same number of samples as target arrays. ' 'Found ' + str(list(set_x)[0]) + ' input samples ' 'and ' + str(list(set_y)[0]) + ' target samples.') if len(set_w) > 1: raise ValueError('All sample_weight arrays should have ' 'the same number of samples. Got array shapes: ' + str([w.shape for w in weights])) if set_y and set_w and list(set_y)[0] != list(set_w)[0]: raise ValueError('Sample_weight arrays should have ' 'the same number of samples as target arrays. Got ' + str(list(set_y)[0]) + ' input samples and ' + str(list(set_w)[0]) + ' target samples.') def check_loss_and_target_compatibility(targets, loss_fns, output_shapes): """Does validation on the compatibility of targets and loss functions. This helps prevent users from using loss functions incorrectly. This check is purely for UX purposes. Arguments: targets: list of Numpy arrays of targets. loss_fns: list of loss functions. output_shapes: list of shapes of model outputs. Raises: ValueError: if a loss function or target array is incompatible with an output. """ key_loss_fns = { losses.mean_squared_error, losses.binary_crossentropy, losses.categorical_crossentropy } key_loss_classes = (losses.MeanSquaredError, losses.BinaryCrossentropy, losses.CategoricalCrossentropy) for y, loss, shape in zip(targets, loss_fns, output_shapes): if y is None or loss is None or tensor_util.is_tensor(y): continue if losses.is_categorical_crossentropy(loss): if y.shape[-1] == 1: raise ValueError('You are passing a target array of shape ' + str(y.shape) + ' while using as loss `categorical_crossentropy`. ' '`categorical_crossentropy` expects ' 'targets to be binary matrices (1s and 0s) ' 'of shape (samples, classes). ' 'If your targets are integer classes, ' 'you can convert them to the expected format via:\n' '```\n' 'from keras.utils import to_categorical\n' 'y_binary = to_categorical(y_int)\n' '```\n' '\n' 'Alternatively, you can use the loss function ' '`sparse_categorical_crossentropy` instead, ' 'which does expect integer targets.') is_loss_wrapper = isinstance(loss, losses.LossFunctionWrapper) if (isinstance(loss, key_loss_classes) or (is_loss_wrapper and (loss.fn in key_loss_fns))): for target_dim, out_dim in zip(y.shape[1:], shape[1:]): if out_dim is not None and target_dim != out_dim: loss_name = loss.name if loss_name is None: loss_type = loss.fn if is_loss_wrapper else type(loss) loss_name = loss_type.__name__ raise ValueError('A target array with shape ' + str(y.shape) + ' was passed for an output of shape ' + str(shape) + ' while using as loss `' + loss_name + '`. ' 'This loss expects targets to have the same shape ' 'as the output.') def collect_per_output_metric_info(metrics, output_names, output_shapes, loss_fns, is_weighted=False): """Maps metric names and functions to model outputs. Arguments: metrics: a list or a list of lists or a dict of metric functions. output_names: a list of the names (strings) of model outputs. output_shapes: a list of the shapes (strings) of model outputs. loss_fns: a list of the loss functions corresponding to the model outputs. is_weighted: Boolean indicating whether the given metrics are weighted. Returns: A list (one entry per model output) of dicts. For instance, if the model has 2 outputs, and for the first output we want to compute "binary_accuracy" and "binary_crossentropy", and just "binary_accuracy" for the second output, the list would look like: `[{ 'acc': binary_accuracy(), 'ce': binary_crossentropy(), }, { 'acc': binary_accuracy(), }]` Raises: TypeError: if an incorrect type is passed for the `metrics` argument. """ if not metrics: return [{} for _ in output_names] if isinstance(metrics, list): any_sub_list = any(isinstance(m, list) for m in metrics) if any_sub_list: if len(metrics) != len(output_names): raise ValueError('When passing a list of lists as `metrics`, ' 'it should have one entry per model output. ' 'The model has ' + str(len(output_names)) + ' outputs, but you passed metrics=' + str(metrics)) # User has provided a list of len = len(outputs). nested_metrics = [generic_utils.to_list(m) for m in metrics] else: # If it is a single list we then apply all metrics to all outputs. if len(output_names) > 1: nested_metrics = [] for _ in output_names: nested_metrics.append( [metrics_module.clone_metric(m) for m in metrics]) else: nested_metrics = [metrics] elif isinstance(metrics, collections.Mapping): generic_utils.check_for_unexpected_keys('metrics', metrics, output_names) nested_metrics = [] for name in output_names: output_metrics = generic_utils.to_list(metrics.get(name, [])) nested_metrics.append(output_metrics) else: raise TypeError('Type of `metrics` argument not understood. ' 'Expected a list or dictionary, found: ' + str(metrics)) per_output_metrics = [] for i, metrics in enumerate(nested_metrics): metrics_dict = OrderedDict() for metric in metrics: metric_name = get_metric_name(metric, is_weighted) metric_fn = get_metric_function( metric, output_shape=output_shapes[i], loss_fn=loss_fns[i]) # If the metric function is not stateful, we create a stateful version. if not isinstance(metric_fn, metrics_module.Metric): metric_fn = metrics_module.MeanMetricWrapper( metric_fn, name=metric_name) metrics_dict[metric_name] = metric_fn per_output_metrics.append(metrics_dict) return per_output_metrics def batch_shuffle(index_array, batch_size): """Shuffles an array in a batch-wise fashion. Useful for shuffling HDF5 arrays (where one cannot access arbitrary indices). Arguments: index_array: array of indices to be shuffled. batch_size: integer. Returns: The `index_array` array, shuffled in a batch-wise fashion. """ batch_count = int(len(index_array) / batch_size) # to reshape we need to be cleanly divisible by batch size # we stash extra items and reappend them after shuffling last_batch = index_array[batch_count * batch_size:] index_array = index_array[:batch_count * batch_size] index_array = index_array.reshape((batch_count, batch_size)) np.random.shuffle(index_array) index_array = index_array.flatten() return np.append(index_array, last_batch) def standardize_weights(y, sample_weight=None, class_weight=None, sample_weight_mode=None): """Performs sample weight validation and standardization. Everything gets normalized to a single sample-wise (or timestep-wise) weight array. If both `sample_weight` and `class_weight` are provided, the weights are multiplied. Arguments: y: Numpy array of model targets to be weighted. sample_weight: User-provided `sample_weight` argument. class_weight: User-provided `class_weight` argument. sample_weight_mode: One of `None` or `"temporal"`. `"temporal"` indicated that we expect 2D weight data that will be applied to the last 2 dimensions of the targets (i.e. we are weighting timesteps, not samples). Returns: A numpy array of target weights, one entry per sample to weight. Raises: ValueError: In case of invalid user-provided arguments. """ # Iterator may return sample_weight as 1-tuple if isinstance(sample_weight, tuple): sample_weight = sample_weight[0] if sample_weight_mode is not None and sample_weight_mode != 'samplewise': if sample_weight_mode != 'temporal': raise ValueError('"sample_weight_mode ' 'should be None or "temporal". ' 'Found: ' + str(sample_weight_mode)) if len(y.shape) < 3: raise ValueError('Found a sample_weight array for ' 'an input with shape ' + str(y.shape) + '. ' 'Timestep-wise sample weighting (use of ' 'sample_weight_mode="temporal") is restricted to ' 'outputs that are at least 3D, i.e. that have ' 'a time dimension.') if sample_weight is not None and len(sample_weight.shape) != 2: raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) + '. ' 'In order to use timestep-wise sample weighting, ' 'you should pass a 2D sample_weight array.') else: if sample_weight is not None and len(sample_weight.shape) != 1: raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) + '. ' 'In order to use timestep-wise sample weights, ' 'you should specify ' 'sample_weight_mode="temporal" ' 'in compile(). If you just mean to use ' 'sample-wise weights, make sure your ' 'sample_weight array is 1D.') if sample_weight is not None: if len(sample_weight.shape) > len(y.shape): raise ValueError('Found a sample_weight with shape' + str(sample_weight.shape) + '.' 'Expected sample_weight with rank ' 'less than or equal to ' + str(len(y.shape))) if (not tensor_util.is_tensor(sample_weight) and y.shape[:sample_weight.ndim] != sample_weight.shape): raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) + ' for an input with shape ' + str(y.shape) + '. ' 'sample_weight cannot be broadcast.') # Class weights applied per-sample. class_sample_weight = None if isinstance(class_weight, dict): if len(y.shape) > 2: raise ValueError('`class_weight` not supported for ' '3+ dimensional targets.') if len(y.shape) == 2: if y.shape[1] > 1: y_classes = np.argmax(y, axis=1) elif y.shape[1] == 1: y_classes = np.reshape(y, y.shape[0]) else: y_classes = y class_sample_weight = np.asarray( [class_weight[cls] for cls in y_classes if cls in class_weight]) if len(class_sample_weight) != len(y_classes): # subtract the sets to pick all missing classes existing_classes = set(y_classes) existing_class_weight = set(class_weight.keys()) raise ValueError( '`class_weight` must contain all classes in the data.' ' The classes %s exist in the data but not in ' '`class_weight`.' % (existing_classes - existing_class_weight)) if class_sample_weight is not None and sample_weight is not None: # Multiply weights if both are provided. return class_sample_weight * sample_weight if sample_weight is not None: return sample_weight if class_sample_weight is not None: return class_sample_weight return None def has_symbolic_tensors(ls): if context.executing_eagerly(): return False return has_tensors(ls) def has_tensors(ls): if isinstance(ls, (list, tuple)): return any(tensor_util.is_tensor(v) for v in ls) if isinstance(ls, dict): return any(tensor_util.is_tensor(v) for _, v in six.iteritems(ls)) return tensor_util.is_tensor(ls) def get_metric_name(metric, weighted=False): """Returns the name corresponding to the given metric input. Arguments: metric: Metric function name or reference. weighted: Boolean indicating if the given metric is weighted. Returns: The metric name. """ if tf2.enabled(): # We keep the string that the user has set in compile as the metric name. if isinstance(metric, six.string_types): return metric metric = metrics_module.get(metric) return metric.name if hasattr(metric, 'name') else metric.__name__ else: metric_name_prefix = 'weighted_' if weighted else '' if metric in ('accuracy', 'acc', 'crossentropy', 'ce'): if metric in ('accuracy', 'acc'): suffix = 'acc' elif metric in ('crossentropy', 'ce'): suffix = 'ce' else: metric_fn = metrics_module.get(metric) # Get metric name as string if hasattr(metric_fn, 'name'): suffix = metric_fn.name else: suffix = metric_fn.__name__ metric_name = metric_name_prefix + suffix return metric_name def get_metric_function(metric, output_shape=None, loss_fn=None): """Returns the metric function corresponding to the given metric input. Arguments: metric: Metric function name or reference. output_shape: The shape of the output that this metric will be calculated for. loss_fn: The loss function used. Returns: The metric function. """ if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']: return metrics_module.get(metric) is_sparse_categorical_crossentropy = ( isinstance(loss_fn, losses.SparseCategoricalCrossentropy) or (isinstance(loss_fn, losses.LossFunctionWrapper) and loss_fn.fn == losses.sparse_categorical_crossentropy)) is_binary_crossentropy = ( isinstance(loss_fn, losses.BinaryCrossentropy) or (isinstance(loss_fn, losses.LossFunctionWrapper) and loss_fn.fn == losses.binary_crossentropy)) if metric in ['accuracy', 'acc']: if output_shape[-1] == 1 or is_binary_crossentropy: return metrics_module.binary_accuracy elif is_sparse_categorical_crossentropy: return metrics_module.sparse_categorical_accuracy # If the output_shape[-1] is not 1, then we know output is `categorical`. # We assume it is sparse categorical only if loss is explicitly given # as sparse categorical crossentropy loss. return metrics_module.categorical_accuracy else: if output_shape[-1] == 1 or is_binary_crossentropy: return metrics_module.binary_crossentropy elif is_sparse_categorical_crossentropy: return metrics_module.sparse_categorical_crossentropy return metrics_module.categorical_crossentropy def call_metric_function(metric_fn, y_true, y_pred=None, weights=None, mask=None): """Invokes metric function and returns the metric result tensor.""" if mask is not None: mask = math_ops.cast(mask, y_pred.dtype) if weights is None: # Use mask as sample weight. weights = mask else: # Update dimensions of weights to match with mask. mask, _, weights = tf_losses_utils.squeeze_or_expand_dimensions( mask, sample_weight=weights) weights *= mask if y_pred is not None: return metric_fn(y_true, y_pred, sample_weight=weights) # `Mean` metric only takes a single value. return metric_fn(y_true, sample_weight=weights) def get_loss_function(loss): """Returns the loss corresponding to the loss input in `compile` API.""" if loss is None or isinstance(loss, losses.Loss): return loss # Deserialize loss configuration, if needed. if isinstance(loss, collections_abc.Mapping): loss = losses.get(loss) # Custom callable class. if callable(loss) and not hasattr(loss, '__name__'): return loss # Wrap loss function with signature `(y_true, y_pred, **kwargs)` # in `LossFunctionWrapper` class. loss_fn = losses.get(loss) # For losses which are given as strings/functions in the compile API, # we always set the loss reduction type to be `SUM_OVER_BATCH_SIZE` # (both in distribution strategy context and otherwise). return losses.LossFunctionWrapper( loss_fn, name=loss_fn.__name__, reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE) def validate_dataset_input(x, y, sample_weight, validation_split=None): """Validates user input arguments when a dataset iterator is passed. Arguments: x: Input data. A `tf.data` dataset or iterator. y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s). Expected to be `None` when `x` is a dataset iterator. sample_weight: An optional sample-weight array passed by the user to weight the importance of each sample in `x`. Expected to be `None` when `x` is a dataset iterator validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. Expected to be `None` when `x` is a dataset iterator. Raises: ValueError: if argument `y` or `sample_weight` or `validation_split` are provided by user. """ if y is not None: raise ValueError('You passed a dataset or dataset iterator (%s) as ' 'input `x` to your model. In that case, you should ' 'not specify a target (`y`) argument, since the dataset ' 'or dataset iterator generates both input data and ' 'target data. ' 'Received: %s' % (x, y)) if sample_weight is not None: raise ValueError('`sample_weight` argument is not supported when input ' '`x` is a dataset or a dataset iterator. Instead, you' 'can provide sample_weight as the third element of your' 'dataset, i.e. (inputs, targets, sample_weight). ' 'Received: x=%s, sample_weight=%s' % (x, sample_weight)) if validation_split is not None and validation_split != 0.0: raise ValueError( '`validation_split` argument is not supported when ' 'input `x` is a dataset or a dataset iterator. ' 'Received: x=%s, validation_split=%f' % (x, validation_split)) def validate_input_types(inp, orig_inp, allow_dict=True, field_name='inputs'): """Helper function to validate either inputs or targets.""" if isinstance(inp, (list, tuple)): if not all(isinstance(v, np.ndarray) or tensor_util.is_tensor(v) for v in inp): raise ValueError( 'Please provide as model inputs either a single array or a list of ' 'arrays. You passed: {}={}'.format(field_name, str(orig_inp))) elif isinstance(inp, dict): if not allow_dict: raise ValueError( 'You cannot pass a dictionary as model {}.'.format(field_name)) elif not isinstance(inp, np.ndarray) and not tensor_util.is_tensor(inp): raise ValueError( 'Please provide as model inputs either a single array or a list of ' 'arrays. You passed: {}={}'.format(field_name, orig_inp)) def check_generator_arguments(y=None, sample_weight=None, validation_split=None): """Validates arguments passed when using a generator.""" if y is not None: raise ValueError('`y` argument is not supported when data is' 'a generator or Sequence instance. Instead pass targets' ' as the second element of the generator.') if sample_weight is not None: raise ValueError('`sample_weight` argument is not supported when data is' 'a generator or Sequence instance. Instead pass sample' ' weights as the third element of the generator.') if validation_split: raise ValueError('If your data is in the form of a Python generator, ' 'you cannot use `validation_split`.') def check_steps_argument(input_data, steps, steps_name): """Validates `steps` argument based on input data's type. The cases when `steps` value must be provided are when 1. input data passed is an iterator. 2. model was built on top of symbolic tensors, input data is not required and is `None`. 3. input data passed is a symbolic tensor. Arguments: input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or tf.data.Dataset iterator or `None`. steps: Integer or `None`. Total number of steps (batches of samples) to execute. steps_name: The public API's parameter name for `steps`. Returns: boolean, True if `steps` argument is required, else False. Raises: ValueError: if `steps` argument is required for given input data type but not provided. """ is_x_iterator = isinstance( input_data, (iterator_ops.Iterator, iterator_ops.IteratorV2)) if (input_data is None or is_x_iterator or has_symbolic_tensors(input_data) or (isinstance(input_data, list) and not input_data)): if steps is None: input_type_str = 'a Dataset iterator' if is_x_iterator else 'data tensors' raise ValueError('When using {input_type} as input to a model, you should' ' specify the `{steps_name}` argument.'.format( input_type=input_type_str, steps_name=steps_name)) return True if isinstance(input_data, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)): return True if steps is not None: list_types = (np.ndarray, list, tuple) if (isinstance(input_data, list_types) or (isinstance(input_data, dict) and any(isinstance(v, list_types) for v in input_data.values()))): logging.warning('When passing input data as arrays, do not specify ' '`steps_per_epoch`/`steps` argument. ' 'Please use `batch_size` instead.') return False def cast_single_tensor(x, dtype=None): if isinstance(x, np.ndarray): x = ops.convert_to_tensor(x) dtype = dtype or K.floatx() if x.dtype.is_floating: return math_ops.cast(x, dtype=dtype) return x def cast_if_floating_dtype_and_mismatch(targets, outputs): """Returns target data tensors using correct datatype. Checks that each target and output pair are the same datatype. If not, casts the target to the output's datatype. Args: targets: tensor or list of targets. outputs: tensor or list of outputs. Returns: Targets in appropriate datatype. """ if tensor_util.is_tensor(targets): # There is one target, so output[0] should be the only output. return cast_single_tensor(targets, dtype=outputs[0].dtype) new_targets = [] for target, out in zip(targets, outputs): if isinstance(target, np.ndarray): target = ops.convert_to_tensor(target) if target.dtype != out.dtype: new_targets.append(cast_single_tensor(target, dtype=out.dtype)) else: new_targets.append(target) return new_targets def cast_if_floating_dtype(x): """Casts the given data tensors to the default floating point type. Casts only if the input is already a floating point type. Args: x: tensor or list/tuple of tensors. Returns: Converted input. """ return nest.map_structure(cast_single_tensor, x) def cast_to_model_input_dtypes(x, model): """Casts the given data tensors to the dtypes of the model inputs. Args: x: tensor or list/tuple of tensors. model: The model. Returns: Converted input. Each tensor is casted to the corresponding input in `model.inputs`. """ input_dtypes = nest.map_structure(lambda t: t.dtype, model.inputs) return nest.map_structure(math_ops.cast, x, input_dtypes) def prepare_sample_weight_modes(training_endpoints, sample_weight_mode): """Prepares sample weight modes for the model. Args: training_endpoints: List of model _TrainingEndpoints. sample_weight_mode: sample weight mode user input passed from compile API. Raises: ValueError: In case of invalid `sample_weight_mode` input. """ if isinstance(sample_weight_mode, collections.Mapping): generic_utils.check_for_unexpected_keys( 'sample_weight_mode', sample_weight_mode, [e.output_name for e in training_endpoints]) for end_point in training_endpoints: if not end_point.should_skip_target_weights(): if end_point.output_name not in sample_weight_mode: raise ValueError('Output ' + end_point.output_name + 'missing from `_sample_weight_modes` dictionary') else: end_point.sample_weight_mode = sample_weight_mode.get( end_point.output_name) elif isinstance(sample_weight_mode, (list, tuple)): if len(sample_weight_mode) != len(training_endpoints): raise ValueError('When passing a list as sample_weight_mode, ' 'it should have one entry per model output. ' 'The model has ' + str(len(training_endpoints)) + ' outputs, but you passed ' + str(len(sample_weight_mode)) + '_sample_weight_modes.') for mode, endpoint in zip(sample_weight_mode, training_endpoints): if not endpoint.should_skip_target_weights(): endpoint.sample_weight_mode = mode else: for endpoint in training_endpoints: if not endpoint.should_skip_target_weights(): endpoint.sample_weight_mode = sample_weight_mode def prepare_loss_functions(loss, output_names): """Converts loss to a list of loss functions. Arguments: loss: String (name of objective function), objective function or `tf.losses.Loss` instance. See `tf.losses`. If the model has multiple outputs, you can use a different loss on each output by passing a dictionary or a list of losses. The loss value that will be minimized by the model will then be the sum of all individual losses. output_names: List of model output names. Returns: A list of loss objective functions. Raises: ValueError: If loss is a dict with keys not in model output names, or if loss is a list with len not equal to model outputs. """ if isinstance(loss, collections_abc.Mapping): generic_utils.check_for_unexpected_keys('loss', loss, output_names) loss_functions = [] for name in output_names: if name not in loss: logging.warning( 'Output {0} missing from loss dictionary. We assume ' 'this was done on purpose. The fit and evaluate APIs will not be ' 'expecting any data to be passed to {0}.'.format(name)) loss_functions.append(get_loss_function(loss.get(name, None))) elif isinstance(loss, six.string_types): loss_functions = [get_loss_function(loss) for _ in output_names] elif isinstance(loss, collections_abc.Sequence): if len(loss) != len(output_names): raise ValueError('When passing a list as loss, it should have one entry ' 'per model outputs. The model has {} outputs, but you ' 'passed loss={}'.format(len(output_names), loss)) loss_functions = nest.map_structure(get_loss_function, loss) else: loss_functions = [get_loss_function(loss) for _ in range(len(output_names))] return loss_functions def prepare_loss_weights(training_endpoints, loss_weights=None): """Converts loss weights to a list of loss weights. The result loss weights will be populated on the trainging endpoint. Arguments: training_endpoints: List of model training endpoints. loss_weights: Optional list or dictionary specifying scalar coefficients (Python floats) to weight the loss contributions of different model outputs. The loss value that will be minimized by the model will then be the *weighted sum* of all individual losses, weighted by the `loss_weights` coefficients. If a list, it is expected to have a 1:1 mapping to the model's outputs. If a dict, it is expected to map output names (strings) to scalar coefficients. Raises: ValueError: If loss weight is a dict with key not in model output names, or if loss is a list with len not equal to model outputs. """ if loss_weights is None: for e in training_endpoints: e.loss_weight = 1. elif isinstance(loss_weights, collections.Mapping): generic_utils.check_for_unexpected_keys( 'loss_weights', loss_weights, [e.output_name for e in training_endpoints]) for e in training_endpoints: e.loss_weight = loss_weights.get(e.output_name, 1.) elif isinstance(loss_weights, list): if len(loss_weights) != len(training_endpoints): raise ValueError('When passing a list as loss_weights, ' 'it should have one entry per model output. ' 'The model has ' + str(len(training_endpoints)) + ' outputs, but you passed loss_weights=' + str(loss_weights)) for w, e in zip(loss_weights, training_endpoints): e.loss_weight = w else: raise TypeError('Could not interpret loss_weights argument: ' + str(loss_weights) + ' - expected a list of dicts.') # TODO(rohanj): This is a hack to get around not depending on feature_column and # create a cyclical dependency. Figure out a cleaner solution def is_feature_layer(layer): """Returns whether `layer` is a FeatureLayer or not.""" return getattr(layer, '_is_feature_layer', False) def is_eager_dataset_or_iterator(data): return context.executing_eagerly() and isinstance( data, (dataset_ops.DatasetV1, dataset_ops.DatasetV2, iterator_ops.IteratorV2)) # pylint: disable=protected-access def assert_not_batched(dataset): """Asserts that `dataset` is not batched. The algorithm used by this method is sound but not complete. In other words, if the method fails to establish the assertion, it does not mean the dataset is batched. Example usage: ```python try: assert_not_batched(dataset) # safe to assume `dataset` it not batched here expect ValueError: # make no assumptions about `dataset` ``` Args: dataset: The dataset to analyze. Raises: ValueError: If the method cannot establish the assertion. """ if isinstance(dataset, dataset_ops.DatasetV1Adapter): return assert_not_batched(dataset._dataset) else: whitelisted_types = [ dataset_ops._OptionsDataset, dataset_ops.ConcatenateDataset, dataset_ops.CacheDataset, dataset_ops.FilterDataset, dataset_ops.MapDataset, dataset_ops.ParallelMapDataset, dataset_ops.PrefetchDataset, dataset_ops.RangeDataset, dataset_ops.RepeatDataset, dataset_ops.ShuffleDataset, dataset_ops.SkipDataset, dataset_ops.SparseTensorSliceDataset, dataset_ops.TakeDataset, dataset_ops.TensorDataset, dataset_ops.TensorSliceDataset, dataset_ops.ZipDataset, readers.FixedLengthRecordDatasetV2, readers.TextLineDatasetV2, readers.TFRecordDatasetV2, ] for ty in whitelisted_types: if isinstance(dataset, ty): for input_dataset in dataset._inputs(): assert_not_batched(input_dataset) return raise ValueError('Could not assert that dataset is not batched.') # pylint: disable=protected-access def assert_not_shuffled(dataset): """Asserts that `dataset` is not shuffled. The algorithm used by this method is sound but not complete. In other words, if the method fails to establish the assertion, it does not mean the dataset is shuffled. Example usage: ```python try: assert_not_shuffled(dataset) # safe to assume `dataset` it not shuffled here expect ValueError: # make no assumptions about `dataset` ``` Args: dataset: The dataset to analyze. Raises: ValueError: If the method cannot establish the assertion. """ if isinstance(dataset, dataset_ops.DatasetV1Adapter): return assert_not_shuffled(dataset._dataset) else: whitelisted_types = [ dataset_ops._OptionsDataset, dataset_ops.BatchDataset, dataset_ops.ConcatenateDataset, dataset_ops.CacheDataset, dataset_ops.FilterDataset, dataset_ops.MapDataset, dataset_ops.PaddedBatchDataset, dataset_ops.ParallelMapDataset, dataset_ops.PrefetchDataset, dataset_ops.RangeDataset, dataset_ops.RepeatDataset, dataset_ops.SkipDataset, dataset_ops.SparseTensorSliceDataset, dataset_ops.TakeDataset, dataset_ops.TensorDataset, dataset_ops.TensorSliceDataset, dataset_ops.WindowDataset, dataset_ops.ZipDataset, readers.FixedLengthRecordDatasetV2, readers.TextLineDatasetV2, readers.TFRecordDatasetV2, ] for ty in whitelisted_types: if isinstance(dataset, ty): for input_dataset in dataset._inputs(): assert_not_shuffled(input_dataset) return raise ValueError('Could not assert that dataset is not shuffled.') def verify_dataset_shuffled(x): """Verifies that the dataset is shuffled. Args: x: Dataset passed as an input to the model. Raises: ValueError: if the dataset is not already shuffled. """ assert isinstance(x, dataset_ops.DatasetV2) try: assert_not_shuffled(x) except ValueError: # Dataset may or may not be shuffled. return else: logging.warning('Expected a shuffled dataset but input dataset `x` is ' 'not shuffled. Please invoke `shuffle()` on input dataset.') def is_dataset_or_iterator(data): return isinstance(data, (dataset_ops.DatasetV1, dataset_ops.DatasetV2, iterator_ops.Iterator, iterator_ops.IteratorV2)) def get_iterator(dataset): """Create and initialize an iterator from a dataset.""" if context.executing_eagerly(): iterator = dataset_ops.make_one_shot_iterator(dataset) else: iterator = dataset_ops.make_initializable_iterator(dataset) initialize_iterator(iterator) return iterator def initialize_iterator(iterator): if not context.executing_eagerly(): init_op = iterator.initializer K.get_session((init_op,)).run(init_op) def extract_tensors_from_dataset(dataset): """Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset. Arguments: dataset: Dataset instance. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None. """ iterator = get_iterator(dataset) inputs, targets, sample_weight = unpack_iterator_input(iterator) return inputs, targets, sample_weight def unpack_iterator_input(iterator): """Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`. Arguments: iterator: Instance of a dataset iterator. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None. """ try: next_element = iterator.get_next() except errors.OutOfRangeError: raise RuntimeError('Your dataset iterator ran out of data; ' 'Make sure that your dataset can generate ' 'required number of samples.') if isinstance(next_element, (list, tuple)): if len(next_element) not in [2, 3]: raise ValueError( 'Please provide model inputs as a list or tuple of 2 or 3 ' 'elements: (input, target) or (input, target, sample_weights) ' 'Received %s' % next_element) if len(next_element) == 2: x, y = next_element weights = None else: x, y, weights = next_element else: x = next_element y = None weights = None return x, y, weights def infer_steps_for_dataset(model, dataset, steps, epochs=1, steps_name='steps'): """Infers steps_per_epoch needed to loop through a dataset. Arguments: model: Keras model instance. dataset: Input data of type tf.data.Dataset. steps: Number of steps to draw from the dataset (may be None if unknown). epochs: Number of times to iterate over the dataset. steps_name: The string name of the steps argument, either `steps`, `validation_steps`, or `steps_per_epoch`. Only used for error message formatting. Returns: Integer or `None`. Inferred number of steps to loop through the dataset. `None` is returned if 1) the size of the dataset is unknown and `steps` was not specified, or 2) this is multi-worker training and auto sharding is enabled. Raises: ValueError: In case of invalid argument values. """ assert isinstance(dataset, dataset_ops.DatasetV2) if (model._in_multi_worker_mode() and dataset.options().experimental_distribute.auto_shard): # If the dataset would be auto-sharded, we should not infer a local # steps_per_epoch due to the possible inbalanced sharding between workers. return None size = K.get_value(cardinality.cardinality(dataset)) if size == cardinality.INFINITE and steps is None: raise ValueError('When passing an infinitely repeating dataset, you ' 'must specify the `%s` argument.' % (steps_name,)) if size >= 0: if steps is not None and steps * epochs > size: if epochs > 1: raise ValueError('The dataset you passed contains %s batches, but you ' 'passed `epochs=%s` and `%s=%s`, which is a total of ' '%s steps. We cannot draw that many steps from this ' 'dataset. We suggest to set `%s=%s`.' % (size, epochs, steps_name, steps, steps * epochs, steps_name, size // epochs)) else: raise ValueError('The dataset you passed contains %s batches, but you ' 'passed `%s=%s`. We cannot draw that many steps from ' 'this dataset. We suggest to set `%s=%s`.' % (size, steps_name, steps, steps_name, size)) if steps is None: if size >= 0: return size return None return steps class ModelInputs(object): """Encapsulates model inputs. Allows for transforming model inputs while keeping the same structure. """ def __init__(self, inputs): self._inputs = inputs self._is_dict = isinstance(self._inputs, dict) self._is_single_input = not isinstance(self._inputs, (list, tuple, dict)) self._flattened_inputs = [] self._input_names = [] if self._is_dict: for k in sorted(self._inputs.keys()): self._flattened_inputs.append(self._inputs[k]) self._input_names.append(k) else: self._flattened_inputs = nest.flatten(self._inputs) self._input_names = [ 'input_%d' % (i + 1) for i in range(len(self._flattened_inputs)) ] def get_input_names(self): """Returns keys to name inputs by. In case inputs provided were a list, tuple or single entry, we make up a key 'input_%d'. For dictionary case, we return a sorted list of keys. """ return self._input_names def get_symbolic_inputs(self, return_single_as_list=False): """Returns inputs to be set as self.inputs for a model.""" # TODO(karmel): There is a side-effect here where what you get # with as_list and as_dict depends on whether you have called this # method first, since it modifies in place. for i, (k, v) in enumerate(zip(self._input_names, self._flattened_inputs)): if isinstance(v, (list, float, int)): v = np.asarray(v) if v.ndim == 1: v = np.expand_dims(v, 1) if isinstance(v, (np.ndarray, ops.EagerTensor)): # We fix the placeholder shape except the batch size. # This is suboptimal, but it is the best we can do with the info # we have. The user should call `model._set_inputs(placeholders)` # to specify custom placeholders if the need arises. shape = (None,) + tuple(v.shape[1:]) if shape == (None,): shape = (None, 1) dtype = dtypes.as_dtype(v.dtype) if dtype.is_floating: dtype = K.floatx() v = K.placeholder(shape=shape, name=k, dtype=dtype) elif isinstance(v, tensor_spec.TensorSpec): shape = (None,) + tuple(v.shape.as_list()[1:]) if shape == (None,): shape = (None, 1) v = K.placeholder(shape=shape, name=k, dtype=v.dtype) self._flattened_inputs[i] = v if self._is_dict: return dict(zip(self._input_names, self._flattened_inputs)) if self._is_single_input and not return_single_as_list: return self._flattened_inputs[0] return self._flattened_inputs def as_dict(self): """An iterable over a dictionary version of inputs.""" for k, v in zip(self._input_names, self._flattened_inputs): yield k, v def as_list(self): """Returning the inputs as a list.""" return self._flattened_inputs # Allow use of methods not exposed to the user. # pylint: disable=protected-access def get_input_shape_and_dtype(layer): """Retrieves input shape and input dtype of layer if applicable. Args: layer: Layer (or model) instance. Returns: Tuple (input_shape, input_dtype). Both could be None if the layer does not have a defined input shape. Raises: ValueError: in case an empty Sequential or Functional model is passed. """ def _is_graph_model(layer): return ((hasattr(layer, '_is_graph_network') and layer._is_graph_network) or layer.__class__.__name__ == 'Sequential') # In case of nested models: recover the first layer # of the deepest model to infer input shape and dtype. # Subclassed Models may not have been built so can't be checked. while _is_graph_model(layer): if not layer.layers: raise ValueError('An empty Model cannot be used as a Layer.') layer = layer.layers[0] if hasattr(layer, '_batch_input_shape'): return layer._batch_input_shape, layer.dtype return None, None # pylint: enable=protected-access def get_static_batch_size(layer): """Gets the static batch size of a Layer. Arguments: layer: a `Layer` instance. Returns: The static batch size of a Layer. """ batch_input_shape, _ = get_input_shape_and_dtype(layer) if batch_input_shape is not None: return tensor_shape.as_dimension(batch_input_shape[0]).value return None def generic_output_names(outputs_list): return ['output_%d' % (i + 1) for i in range(len(outputs_list))] def convert_eager_tensors_to_numpy(structure): """Convert every EagerTensor in `structure` to NumPy. Arguments: structure: An arbitrary structure of elements to be converted to NumPy arrays. Returns: An identical structure with EagerTensors converted to NumPy arrays. """ def _convert(element): if isinstance(element, ops.EagerTensor): return element.numpy() return element return nest.map_structure(_convert, structure) def list_to_tuple(maybe_list): """Datasets will stack the list of tensor, so switch them to tuples.""" if isinstance(maybe_list, list): return tuple(maybe_list) return maybe_list def should_run_validation(validation_freq, epoch): """Checks if validation should be run this epoch. Arguments: validation_freq: Integer or list. If an integer, specifies how many training epochs to run before a new validation run is performed. If a list, specifies the epochs on which to run validation. epoch: Integer, the number of the training epoch just completed. Returns: Bool, True if validation should be run. Raises: ValueError: if `validation_freq` is an Integer and less than 1, or if it is neither an Integer nor a Sequence. """ # `epoch` is 0-indexed internally but 1-indexed in the public API. one_indexed_epoch = epoch + 1 if isinstance(validation_freq, int): if validation_freq < 1: raise ValueError('`validation_freq` can not be less than 1.') return one_indexed_epoch % validation_freq == 0 if not isinstance(validation_freq, collections_abc.Container): raise ValueError('`validation_freq` must be an Integer or ' '`collections_abc.Container` (e.g. list, tuple, etc.)') return one_indexed_epoch in validation_freq def split_training_and_validation_data(x, y, sample_weights, validation_split): """Split input data into train/eval section based on validation_split.""" if has_symbolic_tensors(x): raise ValueError('If your data is in the form of symbolic tensors, ' 'you cannot use `validation_split`.') if hasattr(x[0], 'shape'): split_at = int(x[0].shape[0] * (1. - validation_split)) else: split_at = int(len(x[0]) * (1. - validation_split)) x, val_x = (generic_utils.slice_arrays(x, 0, split_at), generic_utils.slice_arrays(x, split_at)) y, val_y = (generic_utils.slice_arrays(y, 0, split_at), generic_utils.slice_arrays(y, split_at)) if sample_weights: sample_weights, val_sample_weights = ( generic_utils.slice_arrays(sample_weights, 0, split_at), generic_utils.slice_arrays(sample_weights, split_at), ) else: val_sample_weights = None return x, y, sample_weights, val_x, val_y, val_sample_weights def unpack_validation_data(validation_data): """Unpack validation data based input type. The validation data is not touched if its dataset or dataset iterator. For other type of input (Numpy or tensor), it will be unpacked into tuple of 3 which is x, y and sample weights. Args: validation_data: dataset, dataset iterator, or numpy, tensor tuple. Returns: tuple of 3, (x, y, sample_weights) for numpy and tensor input. """ if (isinstance(validation_data, (iterator_ops.Iterator, iterator_ops.IteratorV2, dataset_ops.DatasetV2))): val_x = validation_data val_y = None val_sample_weight = None elif len(validation_data) == 2: val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence val_sample_weight = None elif len(validation_data) == 3: val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence else: raise ValueError( 'When passing a `validation_data` argument, ' 'it must contain either 2 items (x_val, y_val), ' 'or 3 items (x_val, y_val, val_sample_weights), ' 'or alternatively it could be a dataset or a ' 'dataset or a dataset iterator. ' 'However we received `validation_data=%s`' % validation_data) return val_x, val_y, val_sample_weight class TrainingLoop(object): """TrainingLoop is a wrapper class around the training logic. This class is trying to encapsulate the different logic of fit/eval/predict with regard to different data input and model condition. Note that TrainingLoop is stateless, which means it doesn't contain any internal field and can be reused with different model and inputs. """ def fit(self, model, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, **kwargs): """Train the model with the inputs and targets.""" raise NotImplementedError() def evaluate(self, model, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, **kwargs): """Returns the loss value & metrics values for the model in test mode.""" raise NotImplementedError() def predict(self, model, x, batch_size=None, verbose=0, steps=None, callbacks=None, **kwargs): raise NotImplementedError()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/training_utils.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for dynamic control flow behavior with Keras.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.eager import def_function from tensorflow.python.framework import dtypes from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import base_layer from tensorflow.python.keras.optimizer_v2 import rmsprop from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.platform import test class ControlFlowLayer1(base_layer.Layer): """Layer with an `if` condition in call.""" def call(self, inputs): if math_ops.reduce_sum(inputs) > 0: return math_ops.sqrt(inputs) else: return math_ops.square(inputs) class ControlFlowLayer2(base_layer.Layer): """Layer with a `for` loop in call.""" def call(self, inputs): samples = tensor_array_ops.TensorArray( dtype=dtypes.float32, size=array_ops.shape(inputs)[0]) i = 0 for sample in inputs: samples = samples.write(i, math_ops.square(sample)) i += 1 return samples.stack() class NestedControlFlowLayer(base_layer.Layer): """Layer nested with a control flow layer.""" def __init__(self, **kwargs): super(NestedControlFlowLayer, self).__init__(**kwargs) self.layer = ControlFlowLayer1() def call(self, inputs): return self.layer(inputs) class ControlFlowModel(keras.Model): """Model with an `if` condition in call.""" def call(self, inputs): if math_ops.reduce_sum(inputs) > 0: return math_ops.sqrt(inputs) else: return math_ops.square(inputs) class NestedControlFlowModel(keras.Model): """Model with an `if` condition in call using a control flow layer.""" def __init__(self, **kwargs): super(NestedControlFlowModel, self).__init__(**kwargs) self.layer = NestedControlFlowLayer() def call(self, inputs): inputs = self.layer(inputs) if math_ops.reduce_sum(inputs) > 0: return math_ops.sqrt(inputs) else: return math_ops.square(inputs) class FunctionControlFlowModel(keras.Model): """Model with control flow where `call` is wrapped in function already.""" @def_function.function def call(self, inputs): if math_ops.reduce_sum(inputs) > 0: return math_ops.sqrt(inputs) else: return math_ops.square(inputs) @keras_parameterized.run_all_keras_modes class AutographWrapperTest(keras_parameterized.TestCase): @keras_parameterized.run_with_all_model_types @parameterized.named_parameters(('with_if', ControlFlowLayer1), ('with_for', ControlFlowLayer2), ('nested', NestedControlFlowLayer)) def test_control_flow_layer(self, layer_class): model = testing_utils.get_model_from_layers([layer_class()], input_shape=(3,)) model.compile(rmsprop.RMSprop(0.001), loss='mse') model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3))) @parameterized.named_parameters( ('with_if', ControlFlowModel), ('nested', NestedControlFlowModel), ('wrapped_in_function', FunctionControlFlowModel)) def test_control_flow_model(self, model_class): model = model_class() model.compile(rmsprop.RMSprop(0.001), loss='mse') model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3))) def test_control_flow_in_deferred_sequential_model(self): model = keras.Sequential( [ControlFlowLayer1(), keras.layers.Dense(3), ControlFlowLayer2()]) model.compile(rmsprop.RMSprop(0.001), loss='mse') model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3))) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/control_flow_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for numerical correctness.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test class Bias(keras.layers.Layer): """Layer that add a bias to its inputs.""" def build(self, input_shape): self.bias = self.add_variable('bias', (1,), initializer='zeros') def call(self, inputs): return inputs + self.bias class MultiInputSubclassed(keras.Model): """Subclassed Model that adds its inputs and then adds a bias.""" def __init__(self): super(MultiInputSubclassed, self).__init__() self.add = keras.layers.Add() self.bias = Bias() def call(self, inputs): added = self.add(inputs) return self.bias(added) def multi_input_functional(): """Functional Model that adds its inputs and then adds a bias.""" input_1 = keras.Input(shape=(1,)) input_2 = keras.Input(shape=(1,)) input_3 = keras.Input(shape=(1,)) added = keras.layers.Add()([input_1, input_2, input_3]) output = Bias()(added) return keras.Model([input_1, input_2, input_3], output) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class SimpleBiasTest(keras_parameterized.TestCase): def _get_simple_bias_model(self): model = testing_utils.get_model_from_layers([Bias()], input_shape=(1,)) model.compile( keras.optimizer_v2.gradient_descent.SGD(0.1), 'mae', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) return model def test_simple_bias_fit(self): x = np.array([[0.], [1.], [2.]]) y = np.array([[0.5], [2.], [3.5]]) model = self._get_simple_bias_model() history = model.fit(x, y, batch_size=3, epochs=5) self.assertAllClose(history.history['loss'], [1., 0.9, 0.8, 0.7, 0.6]) def test_simple_bias_evaluate(self): x = np.array([[0.], [1.], [2.]]) y = np.array([[1.], [3.], [5.]]) model = self._get_simple_bias_model() loss = model.evaluate(x, y, batch_size=1) self.assertAlmostEqual(loss, 2.) def test_simple_bias_predict(self): x = np.array([[0.], [1.], [2.]]) model = self._get_simple_bias_model() pred = model.predict(x, batch_size=1) self.assertAllClose(x, pred) @keras_parameterized.run_all_keras_modes class MultipleInputTest(keras_parameterized.TestCase): def _get_multiple_input_model(self, subclassed=True): if subclassed: model = MultiInputSubclassed() else: model = multi_input_functional() model.compile( keras.optimizer_v2.gradient_descent.SGD(0.1), 'mae', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) return model @parameterized.named_parameters(('subclassed', True), ('functional', False)) def test_multiple_input_fit(self, subclassed): x = [ np.array([[1.], [2.], [3.]]), np.array([[4.], [5.], [6.]]), np.array([[7.], [8.], [9.]]) ] y = np.array([[12.5], [16.], [19.5]]) model = self._get_multiple_input_model(subclassed) history = model.fit(x, y, batch_size=3, epochs=5) self.assertAllClose(history.history['loss'], [1., 0.9, 0.8, 0.7, 0.6]) @parameterized.named_parameters(('subclassed', True), ('functional', False)) def test_multiple_input_evaluate(self, subclassed): x = [ np.array([[1.], [2.], [3.]]), np.array([[4.], [5.], [6.]]), np.array([[7.], [8.], [9.]]) ] y = np.array([[13.], [17.], [21.]]) model = self._get_multiple_input_model(subclassed) loss = model.evaluate(x, y, batch_size=3) self.assertAlmostEqual(loss, 2.) @parameterized.named_parameters(('subclassed', True), ('functional', False)) def test_multiple_input_predict(self, subclassed): x = [ np.array([[1.], [2.], [3.]]), np.array([[4.], [5.], [6.]]), np.array([[7.], [8.], [9.]]) ] model = self._get_multiple_input_model(subclassed) pred = model.predict(x, batch_size=1) self.assertAllClose(pred, [[12.], [15.], [18.]]) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/correctness_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """A `Network` is way to compose layers: the topological form of a `Model`. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import itertools import json import os import threading import numpy as np from six.moves import zip # pylint: disable=redefined-builtin from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import context from tensorflow.python.framework import errors from tensorflow.python.framework import errors_impl from tensorflow.python.framework import func_graph from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import backend from tensorflow.python.keras import saving from tensorflow.python.keras.engine import base_layer from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.engine import node as node_module from tensorflow.python.keras.engine import training_utils from tensorflow.python.keras.utils import generic_utils from tensorflow.python.keras.utils import layer_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.keras.utils.io_utils import ask_to_proceed_with_overwrite from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import checkpoint_management from tensorflow.python.training.tracking import base as trackable from tensorflow.python.training.tracking import data_structures from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils from tensorflow.python.training.tracking import tracking from tensorflow.python.training.tracking import util as trackable_utils from tensorflow.python.util import nest from tensorflow.python.util import object_identity from tensorflow.python.util import serialization from tensorflow.python.util import tf_inspect # pylint: disable=g-import-not-at-top try: import h5py except ImportError: h5py = None # pylint: enable=g-import-not-at-top class Network(base_layer.Layer): """A `Network` is a composition of layers. `Network` is the topological form of a "model". A `Model` is simply a `Network` with added training routines. Two types of `Networks` exist: Graph Networks and Subclass Networks. Graph networks are used in the Keras Functional and Sequential APIs. Subclassed networks are used when a user subclasses the `Model` class. In general, more Keras features are supported with Graph Networks than with Subclassed Networks, specifically: - Model cloning (`keras.models.clone`) - Serialization (`model.get_config()/from_config`, `model.to_json()` - Whole-model saving (`model.save()`) A Graph Network can be instantiated by passing two arguments to `__init__`. The first argument is the `keras.Input` Tensors that represent the inputs to the Network. The second argument specifies the output Tensors that represent the outputs of this Network. Both arguments can be a nested structure of Tensors. Example: ``` inputs = {'x1': keras.Input(shape=(10,)), 'x2': keras.Input(shape=(1,))} t = keras.layers.Dense(1, activation='relu')(inputs['x1']) outputs = keras.layers.Add()([t, inputs['x2']) network = Network(inputs, outputs) ``` A Graph Network constructed using the Functional API can also include raw TensorFlow functions, with the exception of functions that create Variables or assign ops. Example: ``` inputs = keras.Input(shape=(10,)) x = keras.layers.Dense(1)(inputs) outputs = tf.nn.relu(x) network = Network(inputs, outputs) ``` Subclassed Networks can be instantiated via `name` and (optional) `dynamic` keyword arguments. Subclassed Networks keep track of their Layers, and their `call` method can be overridden. Subclassed Networks are typically created indirectly, by subclassing the `Model` class. Example: ``` class MyModel(keras.Model): def __init__(self): super(MyModel, self).__init__(name='my_model', dynamic=False) self.layer1 = keras.layers.Dense(10, activation='relu') def call(self, inputs): return self.layer1(inputs) ``` Allowed args in `super().__init__`: name: String name of the model. dynamic: (Subclassed models only) Set this to `True` if your model should only be run eagerly, and should not be used to generate a static computation graph. This attribute is automatically set for Functional API models. trainable: Boolean, whether the model's variables should be trainable. dtype: (Subclassed models only) Default dtype of the model's weights ( default of `None` means use the type of the first input). This attribute has no effect on Functional API models, which do not have weights of their own. """ # See tf.Module for the usage of this property. # The key of _layer_call_argspecs is a layer. tf.Module._flatten will fail to # flatten the key since it is trying to convert Trackable/Layer to a string. _TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain( ('_layer_call_argspecs',), base_layer.Layer._TF_MODULE_IGNORED_PROPERTIES )) def __init__(self, *args, **kwargs): # pylint: disable=super-init-not-called # Signature detection if (len(args) == 2 or len(args) == 1 and 'outputs' in kwargs or 'inputs' in kwargs and 'outputs' in kwargs): # Graph network self._init_graph_network(*args, **kwargs) else: # Subclassed network self._init_subclassed_network(**kwargs) tf_utils.assert_no_legacy_layers(self.layers) # Several Network methods have "no_automatic_dependency_tracking" # annotations. Since Network does automatic dependency tracking on attribute # assignment, including for common data structures such as lists, by default # we'd have quite a few empty dependencies which users don't care about (or # would need some way to ignore dependencies automatically, which is confusing # when applied to user code). Some attributes, such as _layers, would cause # structural issues (_layers being the place where Layers assigned to tracked # attributes are stored). # # Aside from these aesthetic and structural issues, useless dependencies on # empty lists shouldn't cause issues; adding or removing them will not break # checkpoints, but may cause "all Python objects matched" assertions to fail # (in which case less strict assertions may be substituted if necessary). @trackable.no_automatic_dependency_tracking def _base_init(self, name=None, **kwargs): # The following are implemented as property functions: # self.trainable_weights # self.non_trainable_weights # self.input_spec # self.losses # self.updates generic_utils.validate_kwargs(kwargs, {'trainable', 'dtype', 'dynamic', 'autocast'}) # Object to store all thread local layer properties. self._thread_local = threading.local() self._init_set_name(name, zero_based=True) self._activity_regularizer = None # This acts just like the `trainable` attribute of any layer instance. self._trainable = kwargs.get('trainable', True) # This attribute has no effect if the model is created using the Functional # API. Instead, `model.dynamic` is determined based on the internal layers. self._dynamic = kwargs.get('dynamic', False) self._is_compiled = False self._layers = [] # This is True for Sequential networks and Functional networks. self._compute_output_and_mask_jointly = False self.supports_masking = False if not hasattr(self, 'optimizer'): # Don't reset optimizer if already set. self.optimizer = None # Private attributes to implement compatibility with Layer. self._maybe_create_attribute('_trainable_weights', []) self._maybe_create_attribute('_non_trainable_weights', []) self._updates = [] # Used in symbolic mode only. self._losses = [] self._callable_losses = [] # A list of metric instances corresponding to the symbolic metric tensors # added using the `add_metric` API. self._metrics = [] self._scope = None # Never used. self._reuse = None # Never used. if context.executing_eagerly(): self._graph = None else: self._graph = ops.get_default_graph() # Used in symbolic mode only. # Both graph and subclassed networks have a dtype policy. For graph # networks, the policy's compute and variable dtypes are ignored, but other # fields, like the loss scale, are used by Models. For subclassed networks, # the compute and variable dtypes are used as like any ordinary layer. self._set_dtype_policy(kwargs.get('dtype', None)) # All layers in order of horizontal graph traversal. # Entries are unique. Includes input and output layers. self._maybe_create_attribute('_layers', []) # Used in symbolic mode only, only in conjunction with graph-networks self._outbound_nodes = [] self._inbound_nodes = [] self._trackable_saver = ( trackable_utils.saver_with_op_caching(self)) @trackable.no_automatic_dependency_tracking def _init_graph_network(self, inputs, outputs, name=None, **kwargs): generic_utils.validate_kwargs( kwargs, {'trainable'}, 'Functional models may only specify `name` and `trainable` keyword ' 'arguments during initialization. Got an unexpected argument:') # Normalize and set self.inputs, self.outputs. if isinstance(inputs, list) and len(nest.flatten(inputs)) == 1: inputs = inputs[0] if isinstance(outputs, list) and len(nest.flatten(outputs)) == 1: outputs = outputs[0] self._nested_outputs = outputs self._nested_inputs = inputs self.inputs = nest.flatten(inputs) self.outputs = nest.flatten(outputs) if any(not hasattr(tensor, '_keras_history') for tensor in self.outputs): base_layer_utils.create_keras_history(self._nested_outputs) self._base_init(name=name, **kwargs) self._validate_graph_inputs_and_outputs() # A Network does not create weights of its own, thus it is already # built. self.built = True self._compute_output_and_mask_jointly = True self._is_graph_network = True # `_expects_training_arg` is True since the `training` argument is always # present in the signature of the `call` method of a graph network. self._expects_training_arg = True self._expects_mask_arg = True # A graph network does not autocast inputs, as its layers will cast them # instead. self._autocast = False self._input_layers = [] self._output_layers = [] self._input_coordinates = [] self._output_coordinates = [] # This is for performance optimization when calling the Network on new # inputs. Every time the Network is called on a set on input tensors, # we compute the output tensors, output masks and output shapes in one pass, # then cache them here. When any of these outputs is queried later, we # retrieve it from there instead of recomputing it. self._output_mask_cache = {} self._output_tensor_cache = {} self._output_shape_cache = {} # Build self._output_layers: for x in self.outputs: layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access self._output_layers.append(layer) self._output_coordinates.append((layer, node_index, tensor_index)) # Build self._input_layers: for x in self.inputs: layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access # It's supposed to be an input layer, so only one node # and one tensor output. assert node_index == 0 assert tensor_index == 0 self._input_layers.append(layer) self._input_coordinates.append((layer, node_index, tensor_index)) # Keep track of the network's nodes and layers. nodes, nodes_by_depth, layers, _ = _map_graph_network( self.inputs, self.outputs) self._network_nodes = nodes self._nodes_by_depth = nodes_by_depth self._layers = layers self._layer_call_argspecs = {} for layer in self._layers: self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call) self._track_layers(layers) # Create the node linking internal inputs to internal outputs. node_module.Node( outbound_layer=self, inbound_layers=[], node_indices=[], tensor_indices=[], input_tensors=self._nested_inputs, output_tensors=self._nested_outputs) # Build self.input_names and self.output_names. self._set_output_names() self.input_names = [] self._feed_input_names = [] self._feed_inputs = [] self._feed_input_shapes = [] for i, layer in enumerate(self._input_layers): self.input_names.append(layer.name) if layer.is_placeholder: self._feed_input_names.append(layer.name) # Use batch_input_shape here because non-eager composite tensors may not # have a shape attribute that's meaningful (sparse, for instance, has # a tensor that's non-constant and needs to be fed). This means that # input layers that create placeholders will need to have the # batch_input_shape attr to allow for input shape validation. self._feed_input_shapes.append(layer._batch_input_shape) self._feed_inputs.append(layer.input) def _set_output_names(self): """Assigns unique names to the Network's outputs. Output layers with multiple output tensors would otherwise lead to duplicate names in self.output_names. """ uniquified = [] output_names = set() prefix_count = {} for layer in self._output_layers: proposal = layer.name while proposal in output_names: existing_count = prefix_count.get(layer.name, 1) proposal = '{}_{}'.format(layer.name, existing_count) prefix_count[layer.name] = existing_count + 1 output_names.add(proposal) uniquified.append(proposal) self.output_names = uniquified @trackable.no_automatic_dependency_tracking def _init_subclassed_network(self, name=None, **kwargs): self._base_init(name=name, **kwargs) self._is_graph_network = False self._init_call_fn_args() self._autocast = kwargs.get('autocast', base_layer_utils.v2_dtype_behavior_enabled()) self.outputs = [] self.inputs = [] self.built = False @property def dynamic(self): if self._is_graph_network: return any(layer.dynamic for layer in self.layers) return self._dynamic or any(layer.dynamic for layer in self.layers) def _track_layers(self, layers): """Add Trackable dependencies on a list of Layers.""" weight_layer_index = 0 for layer_index, layer in enumerate(layers): try: if layer.weights: # Keep a separate index for layers which have weights. This allows # users to insert Layers without weights anywhere in the network # without breaking checkpoints. self._track_trackable( layer, name='layer_with_weights-%d' % weight_layer_index, overwrite=True) weight_layer_index += 1 except ValueError: # The layer might have weights, but may not be built yet. We just treat # it as layer without weight. pass # Even if it doesn't have weights, we should still track everything in # case it has/will have Trackable dependencies. self._track_trackable( layer, name='layer-%d' % layer_index, overwrite=True) def __setattr__(self, name, value): if not getattr(self, '_self_setattr_tracking', True): super(Network, self).__setattr__(name, value) return if all( isinstance(v, (base_layer.Layer, data_structures.TrackableDataStructure)) or trackable_layer_utils.has_weights(v) for v in nest.flatten(value)): try: self._is_graph_network except AttributeError: raise RuntimeError('It looks like you are subclassing `Model` and you ' 'forgot to call `super(YourClass, self).__init__()`.' ' Always start with this line.') super(Network, self).__setattr__(name, value) # Keep track of metric instance created in subclassed model/layer. # We do this so that we can maintain the correct order of metrics by adding # the instance to the `metrics` list as soon as it is created. from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top if isinstance(value, metrics_module.Metric): self._metrics.append(value) @property def stateful(self): return any((hasattr(layer, 'stateful') and layer.stateful) for layer in self.layers) def reset_states(self): for layer in self.layers: if hasattr(layer, 'reset_states') and getattr(layer, 'stateful', False): layer.reset_states() @property def state_updates(self): """Returns the `updates` from all layers that are stateful. This is useful for separating training updates and state updates, e.g. when we need to update a layer's internal state during prediction. Returns: A list of update ops. """ state_updates = [] for layer in self.layers: if getattr(layer, 'stateful', False): if hasattr(layer, 'updates'): state_updates += layer.updates return state_updates @property def weights(self): """Returns the list of all layer variables/weights. Returns: A list of variables. """ self._assert_weights_created() weights = [] for layer in self._layers: weights += layer.weights weights += (self._trainable_weights + self._non_trainable_weights) return weights @property @tracking.cached_per_instance def _should_compute_mask(self): return self._is_graph_network and super(Network, self)._should_compute_mask def compute_mask(self, inputs, mask): if not self._is_graph_network: return None # TODO(omalleyt): b/123540974 This function is not really safe to call # by itself because it will duplicate any updates and losses in graph # mode by `call`ing the Layers again. output_tensors = self._run_internal_graph(inputs, mask=mask) return nest.map_structure(lambda t: t._keras_mask, output_tensors) @property def layers(self): return trackable_layer_utils.filter_empty_layer_containers( self._layers) def get_layer(self, name=None, index=None): """Retrieves a layer based on either its name (unique) or index. If `name` and `index` are both provided, `index` will take precedence. Indices are based on order of horizontal graph traversal (bottom-up). Arguments: name: String, name of layer. index: Integer, index of layer. Returns: A layer instance. Raises: ValueError: In case of invalid layer name or index. """ # TODO(fchollet): We could build a dictionary based on layer names # since they are constant, but we have not done that yet. if index is not None: if len(self.layers) <= index: raise ValueError('Was asked to retrieve layer at index ' + str(index) + ' but model only has ' + str(len(self.layers)) + ' layers.') else: return self.layers[index] else: if not name: raise ValueError('Provide either a layer name or layer index.') for layer in self.layers: if layer.name == name: return layer raise ValueError('No such layer: ' + name) @property def trainable_weights(self): self._assert_weights_created() return trackable_layer_utils.gather_trainable_weights( trainable=self.trainable, sub_layers=self._layers, extra_variables=self._trainable_weights) @property def non_trainable_weights(self): self._assert_weights_created() return trackable_layer_utils.gather_non_trainable_weights( trainable=self.trainable, sub_layers=self._layers, extra_variables=self._non_trainable_weights + self._trainable_weights) @property def input_spec(self): """Gets the network's input specs. Returns: A list of `InputSpec` instances (one per input to the model) or a single instance if the model has only one input. """ # If subclassed model, can't assume anything. if not self._is_graph_network: return None specs = [] for layer in self._input_layers: if layer.input_spec is None: specs.append(None) else: if not isinstance(layer.input_spec, list): raise TypeError('Layer ' + layer.name + ' has an input_spec attribute that ' 'is not a list. We expect a list. ' 'Found input_spec = ' + str(layer.input_spec)) specs += layer.input_spec if len(specs) == 1: return specs[0] return specs @base_layer_utils.default def build(self, input_shape): """Builds the model based on input shapes received. This is to be used for subclassed models, which do not know at instantiation time what their inputs look like. This method only exists for users who want to call `model.build()` in a standalone way (as a substitute for calling the model on real data to build it). It will never be called by the framework (and thus it will never throw unexpected errors in an unrelated workflow). Args: input_shape: Single tuple, TensorShape, or list of shapes, where shapes are tuples, integers, or TensorShapes. Raises: ValueError: 1. In case of invalid user-provided data (not of type tuple, list, or TensorShape). 2. If the model requires call arguments that are agnostic to the input shapes (positional or kwarg in call signature). 3. If not all layers were properly built. 4. If float type inputs are not supported within the layers. In each of these cases, the user should build their model by calling it on real tensor data. """ if self._is_graph_network: self.built = True return # If subclass network if input_shape is None: raise ValueError('Input shape must be defined when calling build on a ' 'model subclass network.') valid_types = (tuple, list, tensor_shape.TensorShape) if not isinstance(input_shape, valid_types): raise ValueError('Specified input shape is not one of the valid types. ' 'Please specify a batch input shape of type tuple or ' 'list of input shapes. User provided ' 'input type: {}'.format(type(input_shape))) if input_shape and not self.inputs: # We create placeholders for the `None`s in the shape and build the model # in a Graph. Since tf.Variable is compatible with both eager execution # and graph building, the variables created after building the model in # a Graph are still valid when executing eagerly. if context.executing_eagerly(): graph = func_graph.FuncGraph('build_graph') else: graph = backend.get_graph() with graph.as_default(): if isinstance(input_shape, list): x = [base_layer_utils.generate_placeholders_from_shape(shape) for shape in input_shape] else: x = base_layer_utils.generate_placeholders_from_shape(input_shape) kwargs = {} call_signature = tf_inspect.getfullargspec(self.call) call_args = call_signature.args # Exclude `self`, `inputs`, and any argument with a default value. if len(call_args) > 2: if call_signature.defaults: call_args = call_args[2:-len(call_signature.defaults)] else: call_args = call_args[2:] for arg in call_args: if arg == 'training': # Case where `training` is a positional arg with no default. kwargs['training'] = False else: # Has invalid call signature with unknown positional arguments. raise ValueError( 'Currently, you cannot build your model if it has ' 'positional or keyword arguments that are not ' 'inputs to the model, but are required for its ' '`call` method. Instead, in order to instantiate ' 'and build your model, `call` your model on real ' 'tensor data with all expected call arguments.') elif len(call_args) < 2: # Signature without `inputs`. raise ValueError('You can only call `build` on a model if its `call` ' 'method accepts an `inputs` argument.') try: self.call(x, **kwargs) except (errors.InvalidArgumentError, TypeError): raise ValueError('You cannot build your model by calling `build` ' 'if your layers do not support float type inputs. ' 'Instead, in order to instantiate and build your ' 'model, `call` your model on real tensor data (of ' 'the correct dtype).') if self._layers: self._track_layers(self._layers) self.built = True def call(self, inputs, training=None, mask=None): """Calls the model on new inputs. In this case `call` just reapplies all ops in the graph to the new inputs (e.g. build a new computational graph from the provided inputs). Arguments: inputs: A tensor or list of tensors. training: Boolean or boolean scalar tensor, indicating whether to run the `Network` in training mode or inference mode. mask: A mask or list of masks. A mask can be either a tensor or None (no mask). Returns: A tensor if there is a single output, or a list of tensors if there are more than one outputs. """ if not self._is_graph_network: raise NotImplementedError('When subclassing the `Model` class, you should' ' implement a `call` method.') return self._run_internal_graph(inputs, training=training, mask=mask) def compute_output_shape(self, input_shape): if not self._is_graph_network: return super(Network, self).compute_output_shape(input_shape) # Convert any shapes in tuple format to TensorShapes. input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False) if len(nest.flatten(input_shape)) != len(nest.flatten(self._input_layers)): raise ValueError('Invalid input_shape argument ' + str(input_shape) + ': model has ' + str(len(self._input_layers)) + ' tensor inputs.') cache_key = generic_utils.object_list_uid(input_shape) if cache_key in self._output_shape_cache: # Cache hit. Return shapes as TensorShapes. return self._output_shape_cache[cache_key] layers_to_output_shapes = {} for layer, shape in zip(self._input_layers, nest.flatten(input_shape)): # It's an input layer: then `compute_output_shape` is identity, # and there is only one node and one tensor.. shape_key = layer.name + '_0_0' layers_to_output_shapes[shape_key] = shape depth_keys = list(self._nodes_by_depth.keys()) depth_keys.sort(reverse=True) # Iterate over nodes, by depth level. if len(depth_keys) > 1: for depth in depth_keys: nodes = self._nodes_by_depth[depth] for node in nodes: # This is always a single layer, never a list. layer = node.outbound_layer if layer in self._input_layers: # We've already covered the input layers # a few lines above. continue # Potentially redundant list, # same size as node.input_tensors. layer_input_shapes = [] for inbound_layer, node_id, tensor_id, _ in node.iterate_inbound(): input_layer_key = inbound_layer.name + '_%s_%s' % (node_id, tensor_id) layer_input_shapes.append(layers_to_output_shapes[input_layer_key]) layer_input_shapes = nest.pack_sequence_as(node.inbound_layers, layer_input_shapes) # Layers expect shapes to be tuples for `compute_output_shape`. layer_input_shapes = tf_utils.convert_shapes( layer_input_shapes, to_tuples=True) layer_output_shapes = layer.compute_output_shape(layer_input_shapes) # Convert back to TensorShapes. layer_output_shapes = tf_utils.convert_shapes( layer_output_shapes, to_tuples=False) node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access for j, shape in enumerate(nest.flatten(layer_output_shapes)): shape_key = layer.name + '_%s_%s' % (node_index, j) layers_to_output_shapes[shape_key] = shape # Read final output shapes from layers_to_output_shapes. output_shapes = [] for i in range(len(self._output_layers)): layer, node_index, tensor_index = self._output_coordinates[i] shape_key = layer.name + '_%s_%s' % (node_index, tensor_index) output_shapes.append(layers_to_output_shapes[shape_key]) output_shapes = nest.pack_sequence_as(self._nested_outputs, output_shapes) # Store in cache. self._output_shape_cache[cache_key] = output_shapes # Return shapes as TensorShapes. return output_shapes def _run_internal_graph(self, inputs, training=None, mask=None): """Computes output tensors for new inputs. # Note: - Can be run on non-Keras tensors. Arguments: inputs: Tensor or nested structure of Tensors. training: Boolean learning phase. mask: (Optional) Tensor or nested structure of Tensors. Returns: Two lists: output_tensors, output_masks """ # Note: masking support is relevant mainly for Keras. # It cannot be factored out without having the fully reimplement the network # calling logic on the Keras side. We choose to incorporate it in # Network because 1) it may be useful to fully support in tf.layers in # the future and 2) Keras is a major user of Network. If you don't # use masking, it does not interfere with regular behavior at all and you # can ignore it. inputs = nest.flatten(inputs) if mask is None: masks = [None for _ in range(len(inputs))] else: masks = nest.flatten(mask) for input_t, mask in zip(inputs, masks): input_t._keras_mask = mask # Dictionary mapping reference tensors to computed tensors. tensor_dict = {} for x, y in zip(self.inputs, inputs): tensor_dict[str(id(x))] = y depth_keys = list(self._nodes_by_depth.keys()) depth_keys.sort(reverse=True) # Ignore the InputLayers when computing the graph. depth_keys = depth_keys[1:] for depth in depth_keys: nodes = self._nodes_by_depth[depth] for node in nodes: # This is always a single layer, never a list. layer = node.outbound_layer if all( str(id(tensor)) in tensor_dict for tensor in nest.flatten(node.input_tensors)): # Call layer (reapplying ops to new inputs). computed_tensors = nest.map_structure( lambda t: tensor_dict[str(id(t))], node.input_tensors) # Ensure `training` arg propagation if applicable. kwargs = copy.copy(node.arguments) if node.arguments else {} argspec = self._layer_call_argspecs[layer].args if 'training' in argspec: kwargs.setdefault('training', training) if (type(kwargs['training']) is ops.Tensor and # pylint: disable=unidiomatic-typecheck any([kwargs['training'] is x for x in backend._GRAPH_LEARNING_PHASES.values()])): kwargs['training'] = training # Materialize placeholder. # Map Keras tensors in kwargs to their computed value. def _map_tensor_if_from_keras_layer(t): if isinstance(t, ops.Tensor) and hasattr(t, '_keras_history'): t_id = str(id(t)) return tensor_dict[t_id] return t kwargs = nest.map_structure(_map_tensor_if_from_keras_layer, kwargs) # Compute outputs. output_tensors = layer(computed_tensors, **kwargs) # Update tensor_dict. for x, y in zip( nest.flatten(node.output_tensors), nest.flatten(output_tensors)): tensor_dict[str(id(x))] = y output_tensors = [] output_shapes = [] for x in self.outputs: assert str(id(x)) in tensor_dict, 'Could not compute output ' + str(x) tensor = tensor_dict[str(id(x))] output_shapes.append(x.shape) output_tensors.append(tensor) if output_shapes is not None: input_shapes = [x.shape for x in inputs] cache_key = generic_utils.object_list_uid(input_shapes) self._output_shape_cache[cache_key] = nest.pack_sequence_as( self._nested_outputs, output_shapes) output_tensors = nest.pack_sequence_as(self._nested_outputs, output_tensors) return output_tensors def get_config(self): if not self._is_graph_network: raise NotImplementedError config = { 'name': self.name, } node_conversion_map = {} for layer in self.layers: kept_nodes = 1 if _should_skip_first_node(layer) else 0 for original_node_index, node in enumerate(layer._inbound_nodes): node_key = _make_node_key(layer.name, original_node_index) if node_key in self._network_nodes: node_conversion_map[node_key] = kept_nodes kept_nodes += 1 layer_configs = [] for layer in self.layers: # From the earliest layers on. layer_class_name = layer.__class__.__name__ layer_config = layer.get_config() filtered_inbound_nodes = [] for original_node_index, node in enumerate(layer._inbound_nodes): node_key = _make_node_key(layer.name, original_node_index) if node_key in self._network_nodes: # The node is relevant to the model: # add to filtered_inbound_nodes. if node.arguments: kwargs = _serialize_tensors(node.arguments) try: json.dumps(kwargs) except TypeError: logging.warning( 'Layer ' + layer.name + ' was passed non-serializable keyword arguments: ' + str(node.arguments) + '. They will not be included ' 'in the serialized model (and thus will be missing ' 'at deserialization time).') kwargs = {} else: kwargs = {} if node.inbound_layers: node_data = [] for inbound_layer, node_id, tensor_id, _ in node.iterate_inbound(): node_key = _make_node_key(inbound_layer.name, node_id) new_node_index = node_conversion_map.get(node_key, 0) node_data.append( tf_utils.ListWrapper( [inbound_layer.name, new_node_index, tensor_id, kwargs])) node_data = nest.pack_sequence_as(node.input_tensors, node_data) if not nest.is_sequence(node_data): node_data = [node_data] # Convert ListWrapper to list for backwards compatible configs. node_data = tf_utils.convert_inner_node_data(node_data) filtered_inbound_nodes.append(node_data) layer_configs.append({ 'name': layer.name, 'class_name': layer_class_name, 'config': layer_config, 'inbound_nodes': filtered_inbound_nodes, }) config['layers'] = layer_configs # Gather info about inputs and outputs. model_inputs = [] for i in range(len(self._input_layers)): layer, node_index, tensor_index = self._input_coordinates[i] node_key = _make_node_key(layer.name, node_index) if node_key not in self._network_nodes: continue new_node_index = node_conversion_map[node_key] model_inputs.append( tf_utils.ListWrapper([layer.name, new_node_index, tensor_index])) model_inputs = nest.pack_sequence_as(self._nested_inputs, model_inputs) # Preserve external Keras compat for Models with single input. if not nest.is_sequence(model_inputs): model_inputs = [model_inputs] model_inputs = tf_utils.convert_inner_node_data(model_inputs) config['input_layers'] = model_inputs model_outputs = [] for i in range(len(self._output_layers)): layer, node_index, tensor_index = self._output_coordinates[i] node_key = _make_node_key(layer.name, node_index) if node_key not in self._network_nodes: continue new_node_index = node_conversion_map[node_key] model_outputs.append( tf_utils.ListWrapper([layer.name, new_node_index, tensor_index])) model_outputs = nest.pack_sequence_as(self._nested_outputs, model_outputs) # Preserve external Keras compat for Models with single output. if not nest.is_sequence(model_outputs): model_outputs = [model_outputs] model_outputs = tf_utils.convert_inner_node_data(model_outputs) config['output_layers'] = model_outputs return copy.deepcopy(config) @classmethod def from_config(cls, config, custom_objects=None): """Instantiates a Model from its config (output of `get_config()`). Arguments: config: Model config dictionary. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A model instance. Raises: ValueError: In case of improperly formatted config dict. """ # Layer instances created during the graph reconstruction process. created_layers = collections.OrderedDict() # Dictionary mapping layer instances to # node data that specifies a layer call. # It acts as a queue that maintains any unprocessed # layer call until it becomes possible to process it # (i.e. until the input tensors to the call all exist). unprocessed_nodes = {} def add_unprocessed_node(layer, node_data): if layer not in unprocessed_nodes: unprocessed_nodes[layer] = [node_data] else: unprocessed_nodes[layer].append(node_data) def process_node(layer, node_data): """Deserialize a node. Arguments: layer: layer instance. node_data: Nested structure of `ListWrapper`. Raises: ValueError: In case of improperly formatted `node_data`. """ input_tensors = [] for input_data in nest.flatten(node_data): input_data = input_data.as_list() inbound_layer_name = input_data[0] inbound_node_index = input_data[1] inbound_tensor_index = input_data[2] if len(input_data) == 3: kwargs = {} elif len(input_data) == 4: kwargs = input_data[3] kwargs = _deserialize_keras_tensors(kwargs, created_layers) else: raise ValueError('Improperly formatted model config.') inbound_layer = created_layers[inbound_layer_name] if len(inbound_layer._inbound_nodes) <= inbound_node_index: add_unprocessed_node(layer, node_data) return inbound_node = inbound_layer._inbound_nodes[inbound_node_index] input_tensors.append( nest.flatten(inbound_node.output_tensors)[inbound_tensor_index]) input_tensors = nest.pack_sequence_as(node_data, input_tensors) # Call layer on its inputs, thus creating the node # and building the layer if needed. if input_tensors is not None: # Preserve compatibility with older configs flat_input_tensors = nest.flatten(input_tensors) # If this is a single element but not a dict, unwrap. If this is a dict, # assume the first layer expects a dict (as is the case with a # DenseFeatures layer); pass through. if not isinstance(input_tensors, dict) and len(flat_input_tensors) == 1: input_tensors = flat_input_tensors[0] layer(input_tensors, **kwargs) def process_layer(layer_data): """Deserializes a layer, then call it on appropriate inputs. Arguments: layer_data: layer config dict. Raises: ValueError: In case of improperly formatted `layer_data` dict. """ layer_name = layer_data['name'] # Instantiate layer. from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top layer = deserialize_layer(layer_data, custom_objects=custom_objects) created_layers[layer_name] = layer # Gather layer inputs and convert to `ListWrapper` objects. inbound_nodes_data = layer_data['inbound_nodes'] inbound_nodes_data = tf_utils.convert_inner_node_data( inbound_nodes_data, wrap=True) for node_data in inbound_nodes_data: # We don't process nodes (i.e. make layer calls) # on the fly because the inbound node may not yet exist, # in case of layer shared at different topological depths # (e.g. a model such as A(B(A(B(x))))) add_unprocessed_node(layer, node_data) # First, we create all layers and enqueue nodes to be processed for layer_data in config['layers']: process_layer(layer_data) # Then we process nodes in order of layer depth. # Nodes that cannot yet be processed (if the inbound node # does not yet exist) are re-enqueued, and the process # is repeated until all nodes are processed. while unprocessed_nodes: for layer_data in config['layers']: layer = created_layers[layer_data['name']] if layer in unprocessed_nodes: for node_data in unprocessed_nodes.pop(layer): process_node(layer, node_data) name = config.get('name') input_tensors = [] output_tensors = [] input_layers = tf_utils.convert_inner_node_data( config['input_layers'], wrap=True) for layer_data in nest.flatten(input_layers): layer_name, node_index, tensor_index = layer_data.as_list() assert layer_name in created_layers layer = created_layers[layer_name] layer_output_tensors = layer._inbound_nodes[node_index].output_tensors input_tensors.append(nest.flatten(layer_output_tensors)[tensor_index]) output_layers = tf_utils.convert_inner_node_data( config['output_layers'], wrap=True) for layer_data in nest.flatten(output_layers): layer_name, node_index, tensor_index = layer_data.as_list() assert layer_name in created_layers layer = created_layers[layer_name] layer_output_tensors = layer._inbound_nodes[node_index].output_tensors output_tensors.append(nest.flatten(layer_output_tensors)[tensor_index]) input_tensors = nest.pack_sequence_as(input_layers, input_tensors) output_tensors = nest.pack_sequence_as(output_layers, output_tensors) model = cls(inputs=input_tensors, outputs=output_tensors, name=name) # Layers not connected to outputs, such as those added in `add_loss`. ancillary_layers = [ layer for layer in created_layers.values() if layer not in model.layers ] if ancillary_layers: relevant_nodes = nest.flatten([ layer.inbound_nodes[1:] if _should_skip_first_node(layer) else layer.inbound_nodes for layer in created_layers.values() ]) model._insert_layers(ancillary_layers, relevant_nodes) return model def save(self, filepath, overwrite=True, include_optimizer=True, save_format=None, signatures=None): """Saves the model to Tensorflow SavedModel or a single HDF5 file. The savefile includes: - The model architecture, allowing to re-instantiate the model. - The model weights. - The state of the optimizer, allowing to resume training exactly where you left off. This allows you to save the entirety of the state of a model in a single file. Saved models can be reinstantiated via `keras.models.load_model`. The model returned by `load_model` is a compiled model ready to be used (unless the saved model was never compiled in the first place). Arguments: filepath: String, path to SavedModel or H5 file to save the model. overwrite: Whether to silently overwrite any existing file at the target location, or provide the user with a manual prompt. include_optimizer: If True, save optimizer's state together. save_format: Either 'tf' or 'h5', indicating whether to save the model to Tensorflow SavedModel or HDF5. The default is currently 'h5', but will switch to 'tf' in TensorFlow 2.0. The 'tf' option is currently disabled (use `tf.keras.experimental.export_saved_model` instead). signatures: Signatures to save with the SavedModel. Applicable to the 'tf' format only. Please see the `signatures` argument in `tf.saved_model.save` for details. Example: ```python from keras.models import load_model model.save('my_model.h5') # creates a HDF5 file 'my_model.h5' del model # deletes the existing model # returns a compiled model # identical to the previous one model = load_model('my_model.h5') ``` """ saving.save_model(self, filepath, overwrite, include_optimizer, save_format, signatures) def save_weights(self, filepath, overwrite=True, save_format=None): """Saves all layer weights. Either saves in HDF5 or in TensorFlow format based on the `save_format` argument. When saving in HDF5 format, the weight file has: - `layer_names` (attribute), a list of strings (ordered names of model layers). - For every layer, a `group` named `layer.name` - For every such layer group, a group attribute `weight_names`, a list of strings (ordered names of weights tensor of the layer). - For every weight in the layer, a dataset storing the weight value, named after the weight tensor. When saving in TensorFlow format, all objects referenced by the network are saved in the same format as `tf.train.Checkpoint`, including any `Layer` instances or `Optimizer` instances assigned to object attributes. For networks constructed from inputs and outputs using `tf.keras.Model(inputs, outputs)`, `Layer` instances used by the network are tracked/saved automatically. For user-defined classes which inherit from `tf.keras.Model`, `Layer` instances must be assigned to object attributes, typically in the constructor. See the documentation of `tf.train.Checkpoint` and `tf.keras.Model` for details. While the formats are the same, do not mix `save_weights` and `tf.train.Checkpoint`. Checkpoints saved by `Model.save_weights` should be loaded using `Model.load_weights`. Checkpoints saved using `tf.train.Checkpoint.save` should be restored using the corresponding `tf.train.Checkpoint.restore`. Prefer `tf.train.Checkpoint` over `save_weights` for training checkpoints. The TensorFlow format matches objects and variables by starting at a root object, `self` for `save_weights`, and greedily matching attribute names. For `Model.save` this is the `Model`, and for `Checkpoint.save` this is the `Checkpoint` even if the `Checkpoint` has a model attached. This means saving a `tf.keras.Model` using `save_weights` and loading into a `tf.train.Checkpoint` with a `Model` attached (or vice versa) will not match the `Model`'s variables. See the [guide to training checkpoints](https://www.tensorflow.org/alpha/guide/checkpoints) for details on the TensorFlow format. Arguments: filepath: String, path to the file to save the weights to. When saving in TensorFlow format, this is the prefix used for checkpoint files (multiple files are generated). Note that the '.h5' suffix causes weights to be saved in HDF5 format. overwrite: Whether to silently overwrite any existing file at the target location, or provide the user with a manual prompt. save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or '.keras' will default to HDF5 if `save_format` is `None`. Otherwise `None` defaults to 'tf'. Raises: ImportError: If h5py is not available when attempting to save in HDF5 format. ValueError: For invalid/unknown format arguments. """ self._assert_weights_created() filepath_is_h5 = _is_hdf5_filepath(filepath) if save_format is None: if filepath_is_h5: save_format = 'h5' else: save_format = 'tf' else: user_format = save_format.lower().strip() if user_format in ('tensorflow', 'tf'): save_format = 'tf' elif user_format in ('hdf5', 'h5', 'keras'): save_format = 'h5' else: raise ValueError( 'Unknown format "%s". Was expecting one of {"tf", "h5"}.' % ( save_format,)) if save_format == 'tf' and filepath_is_h5: raise ValueError( ('save_weights got save_format="tf"/"tensorflow", but the ' 'filepath ("%s") looks like an HDF5 file. Omit the ".h5"/".keras" ' 'when saving in TensorFlow format.') % filepath) if save_format == 'h5' and h5py is None: raise ImportError( '`save_weights` requires h5py when saving in hdf5.') if save_format == 'tf': check_filepath = filepath + '.index' else: check_filepath = filepath # If file exists and should not be overwritten: if not overwrite and os.path.isfile(check_filepath): proceed = ask_to_proceed_with_overwrite(check_filepath) if not proceed: return if save_format == 'h5': with h5py.File(filepath, 'w') as f: saving.save_weights_to_hdf5_group(f, self.layers) else: if context.executing_eagerly(): session = None else: session = backend.get_session() optimizer = getattr(self, 'optimizer', None) if (optimizer and not isinstance(optimizer, trackable.Trackable)): logging.warning( ('This model was compiled with a Keras optimizer (%s) but is being ' 'saved in TensorFlow format with `save_weights`. The model\'s ' 'weights will be saved, but unlike with TensorFlow optimizers in ' 'the TensorFlow format the optimizer\'s state will not be ' 'saved.\n\nConsider using a TensorFlow optimizer from `tf.train`.') % (optimizer,)) self._trackable_saver.save(filepath, session=session) # Record this checkpoint so it's visible from tf.train.latest_checkpoint. checkpoint_management.update_checkpoint_state_internal( save_dir=os.path.dirname(filepath), model_checkpoint_path=filepath, save_relative_paths=True, all_model_checkpoint_paths=[filepath]) def load_weights(self, filepath, by_name=False): """Loads all layer weights, either from a TensorFlow or an HDF5 weight file. If `by_name` is False weights are loaded based on the network's topology. This means the architecture should be the same as when the weights were saved. Note that layers that don't have weights are not taken into account in the topological ordering, so adding or removing layers is fine as long as they don't have weights. If `by_name` is True, weights are loaded into layers only if they share the same name. This is useful for fine-tuning or transfer-learning models where some of the layers have changed. Only topological loading (`by_name=False`) is supported when loading weights from the TensorFlow format. Note that topological loading differs slightly between TensorFlow and HDF5 formats for user-defined classes inheriting from `tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the TensorFlow format loads based on the object-local names of attributes to which layers are assigned in the `Model`'s constructor. Arguments: filepath: String, path to the weights file to load. For weight files in TensorFlow format, this is the file prefix (the same as was passed to `save_weights`). by_name: Boolean, whether to load weights by name or by topological order. Only topological loading is supported for weight files in TensorFlow format. Returns: When loading a weight file in TensorFlow format, returns the same status object as `tf.train.Checkpoint.restore`. When graph building, restore ops are run automatically as soon as the network is built (on first call for user-defined classes inheriting from `Model`, immediately if it is already built). When loading weights in HDF5 format, returns `None`. Raises: ImportError: If h5py is not available and the weight file is in HDF5 format. """ if _is_hdf5_filepath(filepath): save_format = 'h5' else: try: pywrap_tensorflow.NewCheckpointReader(filepath) save_format = 'tf' except errors_impl.DataLossError: # The checkpoint is not readable in TensorFlow format. Try HDF5. save_format = 'h5' if save_format == 'tf': status = self._trackable_saver.restore(filepath) if by_name: raise NotImplementedError( 'Weights may only be loaded based on topology into Models when ' 'loading TensorFlow-formatted weights (got by_name=True to ' 'load_weights).') if not context.executing_eagerly(): session = backend.get_session() # Restore existing variables (if any) immediately, and set up a # streaming restore for any variables created in the future. trackable_utils.streaming_restore(status=status, session=session) status.assert_nontrivial_match() return status if h5py is None: raise ImportError( '`load_weights` requires h5py when loading weights from HDF5.') if self._is_graph_network and not self.built: raise NotImplementedError( 'Unable to load weights saved in HDF5 format into a subclassed ' 'Model which has not created its variables yet. Call the Model ' 'first, then load the weights.') self._assert_weights_created() with h5py.File(filepath, 'r') as f: if 'layer_names' not in f.attrs and 'model_weights' in f: f = f['model_weights'] if by_name: saving.load_weights_from_hdf5_group_by_name(f, self.layers) else: saving.load_weights_from_hdf5_group(f, self.layers) def _updated_config(self): """Util shared between different serialization methods. Returns: Model config with Keras version information added. """ from tensorflow.python.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top config = self.get_config() model_config = { 'class_name': self.__class__.__name__, 'config': config, 'keras_version': keras_version, 'backend': backend.backend() } return model_config def to_json(self, **kwargs): """Returns a JSON string containing the network configuration. To load a network from a JSON save file, use `keras.models.model_from_json(json_string, custom_objects={})`. Arguments: **kwargs: Additional keyword arguments to be passed to `json.dumps()`. Returns: A JSON string. """ model_config = self._updated_config() return json.dumps( model_config, default=serialization.get_json_type, **kwargs) def to_yaml(self, **kwargs): """Returns a yaml string containing the network configuration. Note: Since TF 1.15.5+nv21.09, this method is no longer supported and will raise a RuntimeError. To load a network from a yaml save file, use `keras.models.model_from_yaml(yaml_string, custom_objects={})`. `custom_objects` should be a dictionary mapping the names of custom losses / layers / etc to the corresponding functions / classes. Arguments: **kwargs: Additional keyword arguments to be passed to `yaml.dump()`. Returns: A YAML string. Raises: RuntimeError: announces that the method poses a security risk """ raise RuntimeError( 'Method `model.to_yaml()` has been removed due to security risk of ' 'arbitrary code execution. Please use `model.to_json()` instead.' ) def summary(self, line_length=None, positions=None, print_fn=None): """Prints a string summary of the network. Arguments: line_length: Total length of printed lines (e.g. set this to adapt the display to different terminal window sizes). positions: Relative or absolute positions of log elements in each line. If not provided, defaults to `[.33, .55, .67, 1.]`. print_fn: Print function to use. Defaults to `print`. It will be called on each line of the summary. You can set it to a custom function in order to capture the string summary. Raises: ValueError: if `summary()` is called before the model is built. """ if not self.built: raise ValueError('This model has not yet been built. ' 'Build the model first by calling `build()` or calling ' '`fit()` with some data, or specify ' 'an `input_shape` argument in the first layer(s) for ' 'automatic build.') layer_utils.print_summary(self, line_length=line_length, positions=positions, print_fn=print_fn) def _validate_graph_inputs_and_outputs(self): """Validates the inputs and outputs of a Graph Network.""" # Check for redundancy in inputs. if len(object_identity.ObjectIdentitySet(self.inputs)) != len(self.inputs): raise ValueError('The list of inputs passed to the model ' 'is redundant. ' 'All inputs should only appear once.' ' Found: ' + str(self.inputs)) for x in self.inputs: # Check that x has appropriate `_keras_history` metadata. if not hasattr(x, '_keras_history'): cls_name = self.__class__.__name__ raise ValueError('Input tensors to a ' + cls_name + ' ' + 'must come from `tf.keras.Input`. ' 'Received: ' + str(x) + ' (missing previous layer metadata).') # Check that x is an input tensor. # pylint: disable=protected-access layer = x._keras_history.layer if len(layer._inbound_nodes) > 1 or ( layer._inbound_nodes and layer._inbound_nodes[0].inbound_layers): cls_name = self.__class__.__name__ logging.warning(cls_name + ' inputs must come from ' '`tf.keras.Input` (thus holding past layer metadata), ' 'they cannot be the output of ' 'a previous non-Input layer. ' 'Here, a tensor specified as ' 'input to "' + self.name + '" was not an Input tensor, ' 'it was generated by layer ' + layer.name + '.\n' 'Note that input tensors are ' 'instantiated via `tensor = tf.keras.Input(shape)`.\n' 'The tensor that caused the issue was: ' + str(x.name)) # Check compatibility of batch sizes of Input Layers. input_batch_sizes = [ training_utils.get_static_batch_size(x._keras_history.layer) for x in self.inputs ] consistent_batch_size = None for batch_size in input_batch_sizes: if batch_size is not None: if (consistent_batch_size is not None and batch_size != consistent_batch_size): raise ValueError('The specified batch sizes of the Input Layers' ' are incompatible. Found batch sizes: {}'.format( input_batch_sizes)) consistent_batch_size = batch_size for x in self.outputs: if not hasattr(x, '_keras_history'): cls_name = self.__class__.__name__ raise ValueError('Output tensors to a ' + cls_name + ' must be ' 'the output of a TensorFlow `Layer` ' '(thus holding past layer metadata). Found: ' + str(x)) def _insert_layers(self, layers, relevant_nodes=None): """Inserts Layers into the Network after Network creation. This is only valid for Keras Graph Networks. Layers added via this function will be included in the `call` computation and `get_config` of this Network. They will not be added to the Network's outputs. Arguments: layers: Arbitrary nested structure of Layers. Layers must be reachable from one or more of the `keras.Input` Tensors that correspond to this Network's inputs. relevant_nodes: Nodes from the Layers that should be considered part of this Network. If `None`, all Nodes will be considered part of this Network. Raises: ValueError: If the layers depend on `Input`s not found in this Model. """ layers = nest.flatten(layers) tf_utils.assert_no_legacy_layers(layers) node_to_depth = {} for depth, nodes in self._nodes_by_depth.items(): node_to_depth.update({node: depth for node in nodes}) # The nodes of these Layers that are relevant to this Network. If not # provided, assume all Nodes are relevant if not relevant_nodes: relevant_nodes = nest.flatten([layer._inbound_nodes for layer in layers]) network_nodes = set(relevant_nodes + list(node_to_depth.keys())) def _get_min_depth(node): """Gets the minimum depth at which node can be computed.""" min_depth = 0 for layer, node_id, _, _ in node.iterate_inbound(include_arguments=True): inbound_node = layer._inbound_nodes[node_id] if inbound_node in node_to_depth: min_depth = min(min_depth, node_to_depth[inbound_node]) elif inbound_node not in network_nodes: continue else: # Previous relevant nodes haven't been processed yet. return None # New node is one shallower than its shallowest input. return min_depth - 1 # Insert nodes into `_nodes_by_depth` and other node attrs. unprocessed_nodes = copy.copy(relevant_nodes) i = 0 while unprocessed_nodes: i += 1 # Do a sanity check. This can occur if `Input`s from outside this Model # are being relied on. if i > 10000: raise ValueError('Layers could not be added due to missing ' 'dependencies.') node = unprocessed_nodes.pop(0) depth = _get_min_depth(node) if depth is None: # Defer until inbound nodes are processed. unprocessed_nodes.append(node) continue node_key = _make_node_key(node.outbound_layer.name, node.outbound_layer._inbound_nodes.index(node)) if node_key not in self._network_nodes: node_to_depth[node] = depth self._network_nodes.add(node_key) self._nodes_by_depth[depth].append(node) # Insert layers and update other layer attrs. layer_set = set(self._layers) for layer in layers: if layer not in layer_set: self._layers.append(layer) self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call) layer_set.add(layer) def _assert_weights_created(self): """Asserts that all the weights for the network have been created. For a non-dynamic network, the weights must already be created after the layer has been called. For a dynamic network, the exact list of weights can never be known for certain since it may change at any time during execution. We run this check right before accessing weights or getting the Numpy value for the current weights. Otherwise, if the layer has never been called, the user would just get an empty list, which is misleading. Raises: ValueError: if the weights of the network has not yet been created. """ if self.dynamic: return if (not self._is_graph_network and 'build' in self.__class__.__dict__ and not self.built): # For any model that has customized build() method but hasn't # been invoked yet, this will cover both sequential and subclass model. raise ValueError('Weights for model %s have not yet been created. ' 'Weights are created when the Model is first called on ' 'inputs or `build()` is called with an `input_shape`.' % self.name) @property def _object_identifier(self): return '_tf_keras_network' def _graph_network_add_loss(self, symbolic_loss): new_nodes, new_layers = _map_subgraph_network(self.inputs, [symbolic_loss]) # Losses must be keyed on inputs no matter what in order to be supported in # DistributionStrategy. add_loss_layer = base_layer.AddLoss(unconditional=False) add_loss_layer(symbolic_loss) new_nodes.extend(add_loss_layer.inbound_nodes) new_layers.append(add_loss_layer) self._insert_layers(new_layers, new_nodes) def _graph_network_add_metric(self, value, aggregation, name): new_nodes, new_layers = _map_subgraph_network(self.inputs, [value]) add_metric_layer = base_layer.AddMetric(aggregation, name) add_metric_layer(value) new_nodes.extend(add_metric_layer.inbound_nodes) new_layers.append(add_metric_layer) self._insert_layers(new_layers, new_nodes) def _is_hdf5_filepath(filepath): return (filepath.endswith('.h5') or filepath.endswith('.keras') or filepath.endswith('.hdf5')) def _make_node_key(layer_name, node_index): return layer_name + '_ib-' + str(node_index) def _map_graph_network(inputs, outputs): """Validates a network's topology and gather its layers and nodes. Arguments: inputs: List of input tensors. outputs: List of outputs tensors. Returns: A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`. - nodes: list of Node instances. - nodes_by_depth: dict mapping ints (depth) to lists of node instances. - layers: list of Layer instances. - layers_by_depth: dict mapping ints (depth) to lists of layer instances. Raises: ValueError: In case the network is not valid (e.g. disconnected graph). """ # Network_nodes: set of nodes included in the graph of layers # (not all nodes included in the layers are relevant to the current graph). network_nodes = set() # ids of all nodes relevant to the Network nodes_depths = {} # dict {node: depth value} layers_depths = {} # dict {layer: depth value} layer_indices = {} # dict {layer: index in traversal} nodes_in_decreasing_depth = [] def build_map(tensor, finished_nodes, nodes_in_progress, layer, node_index, tensor_index): """Builds a map of the graph of layers. This recursively updates the map `layer_indices`, the list `nodes_in_decreasing_depth` and the set `network_nodes`. Arguments: tensor: Some tensor in a graph. finished_nodes: Set of nodes whose subgraphs have been traversed completely. Useful to prevent duplicated work. nodes_in_progress: Set of nodes that are currently active on the recursion stack. Useful to detect cycles. layer: Layer from which `tensor` comes from. If not provided, will be obtained from `tensor._keras_history`. node_index: Node index from which `tensor` comes from. tensor_index: Tensor_index from which `tensor` comes from. Raises: ValueError: if a cycle is detected. """ node = layer._inbound_nodes[node_index] # pylint: disable=protected-access # Prevent cycles. if node in nodes_in_progress: raise ValueError('The tensor ' + str(tensor) + ' at layer "' + layer.name + '" is part of a cycle.') # Don't repeat work for shared subgraphs if node in finished_nodes: return node_key = _make_node_key(layer.name, node_index) # Update network_nodes. network_nodes.add(node_key) # Store the traversal order for layer sorting. if layer not in layer_indices: layer_indices[layer] = len(layer_indices) nodes_in_progress.add(node) # Propagate to all previous tensors connected to this node. for layer, node_index, tensor_index, tensor in node.iterate_inbound( include_arguments=True): build_map(tensor, finished_nodes, nodes_in_progress, layer, node_index, tensor_index) finished_nodes.add(node) nodes_in_progress.remove(node) nodes_in_decreasing_depth.append(node) finished_nodes = set() nodes_in_progress = set() for x in outputs: layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access build_map(x, finished_nodes, nodes_in_progress, layer=layer, node_index=node_index, tensor_index=tensor_index) for node in reversed(nodes_in_decreasing_depth): # If the depth is not set, the node has no outbound nodes (depth 0). depth = nodes_depths.setdefault(node, 0) # Update the depth of the corresponding layer previous_depth = layers_depths.get(node.outbound_layer, 0) # If we've seen this layer before at a higher depth, # we should use that depth instead of the node depth. # This is necessary for shared layers that have inputs at different # depth levels in the graph. depth = max(depth, previous_depth) layers_depths[node.outbound_layer] = depth nodes_depths[node] = depth # Update the depth of inbound nodes. # The "depth" of a node is the max of the depths # of all nodes it is connected to + 1. for node_dep in node._get_all_node_dependencies(): previous_depth = nodes_depths.get(node_dep, 0) nodes_depths[node_dep] = max(depth + 1, previous_depth) # Handle inputs that are not connected to outputs. # We do not error out here because the inputs may be used to compute losses # and metrics. for input_t in inputs: input_layer = input_t._keras_history[0] if input_layer not in layers_depths: layers_depths[input_layer] = 0 layer_indices[input_layer] = -1 nodes_depths[input_layer._inbound_nodes[0]] = 0 network_nodes.add(_make_node_key(input_layer.name, 0)) # Build a dict {depth: list of nodes with this depth} nodes_by_depth = collections.defaultdict(list) for node, depth in nodes_depths.items(): nodes_by_depth[depth].append(node) # Build a dict {depth: list of layers with this depth} layers_by_depth = collections.defaultdict(list) for layer, depth in layers_depths.items(): layers_by_depth[depth].append(layer) # Get sorted list of layer depths. depth_keys = list(layers_by_depth.keys()) depth_keys.sort(reverse=True) # Set self.layers ordered by depth. layers = [] for depth in depth_keys: layers_for_depth = layers_by_depth[depth] # Network.layers needs to have a deterministic order: # here we order them by traversal order. layers_for_depth.sort(key=lambda x: layer_indices[x]) layers.extend(layers_for_depth) # Get sorted list of node depths. depth_keys = list(nodes_by_depth.keys()) depth_keys.sort(reverse=True) # Check that all tensors required are computable. # computable_tensors: all tensors in the graph # that can be computed from the inputs provided. computable_tensors = object_identity.ObjectIdentitySet() for x in inputs: computable_tensors.add(x) layers_with_complete_input = [] # To provide a better error msg. for depth in depth_keys: for node in nodes_by_depth[depth]: layer = node.outbound_layer if layer: for x in nest.flatten(node.input_tensors): if x not in computable_tensors: raise ValueError('Graph disconnected: ' 'cannot obtain value for tensor ' + str(x) + ' at layer "' + layer.name + '". ' 'The following previous layers ' 'were accessed without issue: ' + str(layers_with_complete_input)) for x in nest.flatten(node.output_tensors): computable_tensors.add(x) layers_with_complete_input.append(layer.name) # Ensure name unicity, which will be crucial for serialization # (since serialized nodes refer to layers by their name). all_names = [layer.name for layer in layers] for name in all_names: if all_names.count(name) != 1: raise ValueError('The name "' + name + '" is used ' + str(all_names.count(name)) + ' times in the model. ' 'All layer names should be unique.') return network_nodes, nodes_by_depth, layers, layers_by_depth def _map_subgraph_network(inputs, outputs): """Returns the nodes and layers in the topology from `inputs` to `outputs`. Args: inputs: List of input tensors. outputs: List of output tensors. Returns: A tuple of List{Node] and List[Layer]. """ base_layer_utils.create_keras_history(outputs) # Keep only nodes and layers in the topology betweeen inputs and outputs. _, nodes_by_depth, layers, _ = _map_graph_network(inputs, outputs) return nest.flatten([nodes for nodes in nodes_by_depth.values()]), layers def _should_skip_first_node(layer): """Returns True if the first layer node should not be saved or loaded.""" # Networks start with a pre-existing node linking their input to output. return issubclass(layer.__class__, Network) and layer._is_graph_network def _serialize_tensors(kwargs): """Serializes Tensors passed to `call`.""" def _serialize_keras_tensor(t): """Serializes a single Tensor passed to `call`.""" if hasattr(t, '_keras_history'): kh = t._keras_history return [kh.layer.name, kh.node_index, kh.tensor_index] if isinstance(t, np.ndarray): return t.tolist() if isinstance(t, ops.Tensor): return backend.get_value(t).tolist() return t return nest.map_structure(_serialize_keras_tensor, kwargs) def _deserialize_keras_tensors(kwargs, layer_map): """Deserializes Keras Tensors passed to `call`..""" def _deserialize_keras_tensor(t): """Deserializes a single Keras Tensor passed to `call`.""" if isinstance(t, tf_utils.ListWrapper): t = t.as_list() layer_name = t[0] node_index = t[1] tensor_index = t[2] layer = layer_map[layer_name] node = layer._inbound_nodes[node_index] return nest.flatten(node.output_tensors)[tensor_index] return t kwargs = tf_utils.convert_inner_node_data(kwargs, wrap=True) return nest.map_structure(_deserialize_keras_tensor, kwargs)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/network.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for training routines.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import io import logging import sys from absl.testing import parameterized import numpy as np import six from tensorflow.python import keras from tensorflow.python import tf2 from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import function from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util as tf_test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import losses from tensorflow.python.keras import metrics as metrics_module from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import training_utils from tensorflow.python.keras.callbacks import Callback from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training.rmsprop import RMSPropOptimizer try: import scipy.sparse as scipy_sparse # pylint: disable=g-import-not-at-top except ImportError: scipy_sparse = None class CompileTest(keras_parameterized.TestCase): def _get_multi_output_model(self): input_a = keras.layers.Input(shape=(3,), name='input_a') output_a = keras.layers.Dense(1, name='dense_1')(input_a) output_b = keras.layers.Dense(1, name='dense_2')(input_a) return keras.models.Model(input_a, [output_a, output_b]) def _do_test_compile_with_model_and_single_loss(self, model, loss): model.compile( optimizer='adam', loss=loss, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertEqual(model.loss, loss) loss = losses.get(loss) if not isinstance(loss, list): loss_list = [loss] * len(model.outputs) self.assertEqual(len(model.loss_functions), len(loss_list)) for i in range(len(loss_list)): self.assertIsInstance(model.loss_functions[i], losses.LossFunctionWrapper) if not isinstance(loss_list[i], losses.LossFunctionWrapper): self.assertEqual(model.loss_functions[i].fn, loss_list[i]) self.assertAllEqual(model._loss_weights_list, [1.] * len(loss_list)) def test_respect_run_functions_eagerly(self): with context.eager_mode(): model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=2, input_dim=3) model.compile('sgd', 'mse') def_function.run_functions_eagerly(True) self.assertTrue(model.run_eagerly) def_function.run_functions_eagerly(False) self.assertFalse(model.run_eagerly) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters(('loss_string', 'mse'), ('loss_function', losses.mean_squared_error), ('loss_instance', losses.MeanSquaredError())) def test_compile_with_single_output(self, loss): model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=2, input_dim=3) self._do_test_compile_with_model_and_single_loss(model, loss) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters(('loss_string', 'mse'), ('loss_function', losses.mean_squared_error), ('loss_instance', losses.MeanSquaredError())) def test_compile_with_multi_output(self, loss): model = self._get_multi_output_model() self._do_test_compile_with_model_and_single_loss(model, loss) @keras_parameterized.run_all_keras_modes def test_compile_with_multi_output_and_multi_loss(self): model = self._get_multi_output_model() # Test loss is a list. loss = ['mse', 'mae'] model.compile( optimizer='adam', loss=loss, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertEqual(model.loss_functions[0].fn, losses.mean_squared_error) self.assertEqual(model.loss_functions[1].fn, losses.mean_absolute_error) self.assertAllEqual(model._loss_weights_list, [1., 1.]) # Test loss is a dict. loss = {'dense_1': 'mae', 'dense_2': 'mse'} model.compile( optimizer='adam', loss=loss, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertEqual(model.loss_functions[0].fn, losses.mean_absolute_error) self.assertEqual(model.loss_functions[1].fn, losses.mean_squared_error) self.assertAllEqual(model._loss_weights_list, [1., 1.]) @keras_parameterized.run_all_keras_modes def test_compile_with_multi_output_and_loss_weights_list(self): model = self._get_multi_output_model() loss_weights = [1., 2.] model.compile( optimizer='adam', loss='mse', loss_weights=loss_weights, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertAllEqual(model._loss_weights_list, [1., 2.]) def test_compile_with_multi_output_and_loss_weights_dict(self): with context.graph_mode(): model = self._get_multi_output_model() loss_weights = {'dense_1': 1., 'dense_2': 2.} model.compile(optimizer='adam', loss='mse', loss_weights=loss_weights) self.assertAllEqual(model._loss_weights_list, [1., 2.]) input_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 1)) output_b_np = np.random.random((10, 1)) with self.cached_session() as sess: sess.run(variables_lib.global_variables_initializer()) total_loss, y_preds = sess.run( [model.total_loss, model.outputs], feed_dict={ 'input_a:0': input_np, 'dense_1_target:0': output_a_np, 'dense_2_target:0': output_b_np }) self.assertAllClose( total_loss, np.mean( np.add((output_a_np - y_preds[0])**2, 2 * (output_b_np - y_preds[1])**2))) @keras_parameterized.run_all_keras_modes def test_compile_with_incorrect_loss_size(self): model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=2, input_dim=3) with self.assertRaisesRegexp(ValueError, 'The model has 1 outputs'): model.compile( optimizer='adam', loss=['mse', 'mae'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) @keras_parameterized.run_all_keras_modes def test_compile_with_incorrect_loss_key(self): model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=2, input_dim=3) with self.assertRaisesRegexp( ValueError, r'Unknown entries in loss dictionary: \[\'unknown_output\'\]. ' r'Only expected following keys: \[\'dense_1\'\]'): model.compile( optimizer='adam', loss={'unknown_output': 'mse'}, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) @keras_parameterized.run_all_keras_modes def test_compile_with_incorrect_loss_weights_size(self): model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=2, input_dim=3) with self.assertRaisesRegexp(ValueError, 'it should have one entry per model output'): model.compile( optimizer='adam', loss='mse', loss_weights=[1., 2.], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) @keras_parameterized.run_all_keras_modes def test_compile_with_incorrect_loss_weights_key(self): model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=2, input_dim=3) with self.assertRaisesRegexp( ValueError, r'Unknown entries in loss_weights dictionary: \[\'unknown_output\'\]. ' r'Only expected following keys: \[\'dense_1\'\]'): model.compile( optimizer='adam', loss='mse', loss_weights={'unknown_output': 1.}, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) @keras_parameterized.run_all_keras_modes def test_compile_with_incorrect_sample_weight_mode(self): model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=2, input_dim=3) with self.assertRaisesRegexp( ValueError, r'Unknown entries in sample_weight_mode dictionary: \[\'unknown\'\]. ' r'Only expected following keys: \[\'dense_1\'\]'): model.compile( optimizer='adam', loss='mse', sample_weight_mode={'unknown': 'temporal'}, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) class TrainingTest(keras_parameterized.TestCase): @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_fit_training_arg(self): class ReturnTraining(keras.layers.Layer): def call(self, inputs, training): if training: return inputs + array_ops.constant([100], 'float32') else: return inputs + array_ops.constant([0], 'float32') model = keras.Sequential([ReturnTraining()]) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) hist = model.fit(x=np.array([0.]), y=np.array([0.])) self.assertAllClose(hist.history['loss'][0], 10000) @keras_parameterized.run_all_keras_modes def test_fit_and_validate_learning_phase(self): class ReturnTraining(keras.layers.Layer): def call(self, inputs): return keras.backend.in_train_phase( lambda: array_ops.ones_like(inputs), lambda: array_ops.zeros_like(inputs)) model = keras.Sequential([ReturnTraining(input_shape=(2,))]) model.compile( 'sgd', loss='mae', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) inputs = np.ones((40, 2), dtype=np.float32) targets = np.ones((40, 1), dtype=np.float32) # Test correctness with `steps_per_epoch`. train_dataset = dataset_ops.Dataset.from_tensor_slices( (inputs, targets)).batch(10) val_dataset = dataset_ops.Dataset.from_tensor_slices( (inputs, targets)).batch(10) history = model.fit( train_dataset, epochs=2, verbose=1, validation_data=val_dataset) # The training loss should be 0.0 self.assertAllClose(history.history['loss'][0], 0.0) # The validation loss should be 1.0. self.assertAllClose(history.history['val_loss'][0], 1.0) @keras_parameterized.run_all_keras_modes def test_fit_and_validate_training_arg(self): class ReturnTraining(keras.layers.Layer): def call(self, inputs, training=None): return keras.backend.in_train_phase( lambda: array_ops.ones_like(inputs), lambda: array_ops.zeros_like(inputs), training=training) model = keras.Sequential([ReturnTraining(input_shape=(2,))]) model.compile( 'sgd', loss='mae', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) inputs = np.ones((40, 2), dtype=np.float32) targets = np.ones((40, 1), dtype=np.float32) # Test correctness with `steps_per_epoch`. train_dataset = dataset_ops.Dataset.from_tensor_slices( (inputs, targets)).batch(10) val_dataset = dataset_ops.Dataset.from_tensor_slices( (inputs, targets)).batch(10) history = model.fit( train_dataset, epochs=2, verbose=1, validation_data=val_dataset) # The training loss should be 0.0 self.assertAllClose(history.history['loss'][0], 0.0) # The validation loss should be 1.0. self.assertAllClose(history.history['val_loss'][0], 1.0) @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types def test_target_dtype_matches_output(self): def _loss_fn(labels, preds): self.assertEqual(labels.dtype, preds.dtype) return labels - preds layers = [keras.layers.Dense(10, dtype=np.float64), keras.layers.Dense(10, dtype=np.float64)] model = testing_utils.get_model_from_layers(layers, input_shape=(1,)) inputs = np.ones(10, dtype=np.float64) targets = np.ones(10, dtype=np.float64) model.compile( 'sgd', loss=_loss_fn, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch(inputs, targets) model.test_on_batch(inputs, targets) self.assertEqual(model.predict(inputs).dtype, np.float64) @keras_parameterized.run_all_keras_modes def test_fit_and_validate_nested_training_arg(self): class NestedReturnTraining(keras.layers.Layer): def call(self, inputs, training=None): return keras.backend.in_train_phase( lambda: array_ops.ones_like(inputs), lambda: array_ops.zeros_like(inputs), training=training) class ReturnTraining(keras.layers.Layer): def __init__(self, input_shape=None, **kwargs): super(ReturnTraining, self).__init__(input_shape=input_shape, **kwargs) self._nested_layer = None def build(self, input_shape): self._nested_layer = NestedReturnTraining() self.built = True def call(self, inputs): return self._nested_layer(inputs) model = keras.Sequential([ReturnTraining(input_shape=(2,))]) model.compile( 'sgd', loss='mae', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) inputs = np.ones((40, 2), dtype=np.float32) targets = np.ones((40, 1), dtype=np.float32) # Test correctness with `steps_per_epoch`. train_dataset = dataset_ops.Dataset.from_tensor_slices( (inputs, targets)).batch(10) val_dataset = dataset_ops.Dataset.from_tensor_slices( (inputs, targets)).batch(10) history = model.fit( train_dataset, epochs=2, verbose=1, validation_data=val_dataset) # The training loss should be 0.0 self.assertAllClose(history.history['loss'][0], 0.0) # The validation loss should be 1.0. self.assertAllClose(history.history['val_loss'][0], 1.0) @keras_parameterized.run_with_all_model_types(exclude_models='sequential') @keras_parameterized.run_all_keras_modes def test_fit_on_arrays(self): input_a = keras.layers.Input(shape=(3,), name='input_a') input_b = keras.layers.Input(shape=(3,), name='input_b') dense = keras.layers.Dense(4, name='dense') dropout = keras.layers.Dropout(0.5, name='dropout') branch_a = [input_a, dense] branch_b = [input_b, dense, dropout] model = testing_utils.get_multi_io_model(branch_a, branch_b) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' loss_weights = [1., 0.5] model.compile( optimizer, loss, metrics=[metrics_module.CategoricalAccuracy(), 'mae'], loss_weights=loss_weights, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_d_np = np.random.random((10, 4)) output_e_np = np.random.random((10, 4)) # Test fit at different verbosity model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, batch_size=5, verbose=0) model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, batch_size=5, verbose=1) model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], epochs=2, batch_size=5, verbose=2) model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np]) # Test with validation data model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], validation_data=([input_a_np, input_b_np], [output_d_np, output_e_np]), epochs=1, batch_size=5, verbose=0) model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], validation_data=([input_a_np, input_b_np], [output_d_np, output_e_np]), epochs=2, batch_size=5, verbose=1) model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], validation_data=([input_a_np, input_b_np], [output_d_np, output_e_np]), epochs=2, batch_size=5, verbose=2) # Test with validation split model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], epochs=2, batch_size=5, verbose=0, validation_split=0.2) if testing_utils.get_model_type() == 'functional': # Test with dictionary inputs model.fit( { 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense': output_d_np, 'dropout': output_e_np }, epochs=1, batch_size=5, verbose=0) model.fit( { 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense': output_d_np, 'dropout': output_e_np }, epochs=1, batch_size=5, verbose=1) model.fit( { 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense': output_d_np, 'dropout': output_e_np }, validation_data=({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense': output_d_np, 'dropout': output_e_np }), epochs=1, batch_size=5, verbose=0) model.train_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense': output_d_np, 'dropout': output_e_np }) # Test with lists for loss, metrics loss = ['mae', 'mse'] model.compile( optimizer, loss, metrics=[metrics_module.CategoricalAccuracy(), 'mae'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, batch_size=5, verbose=0) # Test with dictionaries for loss, metrics, loss weights if testing_utils.get_model_type() == 'functional': loss = {'dense': 'mse', 'dropout': 'mae'} loss_weights = {'dense': 1., 'dropout': 0.5} metrics = { 'dense': 'mse', 'dropout': metrics_module.CategoricalAccuracy() } model.compile( optimizer, loss, metrics=metrics, loss_weights=loss_weights, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, batch_size=5, verbose=0) # Invalid use cases with self.assertRaises(ValueError): model.train_on_batch({'input_a': input_a_np}, [output_d_np, output_e_np]) with self.assertRaises(ValueError): model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, validation_data=([input_a_np, input_b_np], 0, 0), verbose=0) with self.assertRaises(ValueError): model.train_on_batch([input_a_np], [output_d_np, output_e_np]) with self.assertRaises(ValueError): model.train_on_batch(1, [output_d_np, output_e_np]) with self.assertRaises(ValueError): model.train_on_batch(input_a_np, [output_d_np, output_e_np]) with self.assertRaises(ValueError): bad_input = np.random.random((11, 3)) model.train_on_batch([bad_input, input_b_np], [output_d_np, output_e_np]) with self.assertRaises(ValueError): bad_target = np.random.random((11, 4)) model.train_on_batch([input_a_np, input_b_np], [bad_target, output_e_np]) # Build single-input model x = keras.layers.Input(shape=(3,), name='input_a') y = keras.layers.Dense(4)(x) model = keras.models.Model(x, y) model.compile( optimizer, loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # This will work model.fit([input_a_np], output_d_np, epochs=1) # TODO(gsundeep) Test only works in eager, file ticket if testing_utils.should_run_eagerly() and context.executing_eagerly(): with self.assertRaises(ValueError): model.fit([input_a_np, input_a_np], output_d_np, epochs=1) # Test model on a list of floats input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 4)) model.fit([np.ndarray.tolist(input_a_np)], [np.ndarray.tolist(input_b_np)], epochs=2, batch_size=5, verbose=2) @keras_parameterized.run_all_keras_modes def test_evaluate_predict_on_arrays(self): a = keras.layers.Input(shape=(3,), name='input_a') b = keras.layers.Input(shape=(3,), name='input_b') dense = keras.layers.Dense(4, name='dense') c = dense(a) d = dense(b) e = keras.layers.Dropout(0.5, name='dropout')(c) model = keras.models.Model([a, b], [d, e]) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' loss_weights = [1., 0.5] model.compile( optimizer, loss, metrics=['mae', metrics_module.CategoricalAccuracy()], loss_weights=loss_weights, sample_weight_mode=None, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_d_np = np.random.random((10, 4)) output_e_np = np.random.random((10, 4)) # Test evaluate at different verbosity out = model.evaluate( [input_a_np, input_b_np], [output_d_np, output_e_np], batch_size=5, verbose=0) self.assertEqual(len(out), 7) out = model.evaluate( [input_a_np, input_b_np], [output_d_np, output_e_np], batch_size=5, verbose=1) self.assertEqual(len(out), 7) out = model.evaluate( [input_a_np, input_b_np], [output_d_np, output_e_np], batch_size=5, verbose=2) self.assertEqual(len(out), 7) out = model.test_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np]) self.assertEqual(len(out), 7) # Test evaluate with dictionary inputs model.evaluate( { 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense': output_d_np, 'dropout': output_e_np }, batch_size=5, verbose=0) model.evaluate( { 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense': output_d_np, 'dropout': output_e_np }, batch_size=5, verbose=1) # Test predict out = model.predict([input_a_np, input_b_np], batch_size=5) self.assertEqual(len(out), 2) out = model.predict({'input_a': input_a_np, 'input_b': input_b_np}) self.assertEqual(len(out), 2) out = model.predict_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }) self.assertEqual(len(out), 2) @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types def test_activity_regularizer_fit(self): loss = {} for reg in [None, 'l2']: layers = [ keras.layers.Dense( 10, activation='relu', activity_regularizer=reg, kernel_initializer='ones', use_bias=False), keras.layers.Dense( 1, activation='sigmoid', kernel_initializer='ones', use_bias=False), ] model = testing_utils.get_model_from_layers( layers, input_shape=(10,)) x = np.ones((10, 10), 'float32') y = np.ones((10, 1), 'float32') optimizer = RMSPropOptimizer(learning_rate=0.001) model.compile( optimizer, 'binary_crossentropy', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, batch_size=2, epochs=5) loss[reg] = model.evaluate(x, y) self.assertLess(loss[None], loss['l2']) @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types def test_activity_regularizer_loss_value(self): layer = keras.layers.Dense( 1, kernel_initializer=keras.initializers.zeros(), bias_initializer=keras.initializers.ones(), activity_regularizer='l2') model = testing_utils.get_model_from_layers([layer], input_shape=(10,)) x = np.ones((10, 10), 'float32') y = np.ones((10, 1), 'float32') optimizer = RMSPropOptimizer(learning_rate=0.001) model.compile( optimizer, 'binary_crossentropy', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) loss = model.test_on_batch(x, y) self.assertAlmostEqual(0.01, loss, places=4) @keras_parameterized.run_all_keras_modes def test_activity_regularizer_batch_independent(self): inputs = keras.layers.Input(shape=(10,)) x = keras.layers.Dense( 10, activation='relu', activity_regularizer='l2')( inputs) outputs = keras.layers.Dense(1, activation='sigmoid')(x) model = keras.Model(inputs, outputs) optimizer = RMSPropOptimizer(learning_rate=0.001) model.compile( optimizer, 'binary_crossentropy', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones((10, 10), 'float32') y = np.ones((10, 1), 'float32') loss_small_batch = model.test_on_batch(x, y) x2 = np.ones((20, 10), 'float32') y2 = np.ones((20, 1), 'float32') loss_big_batch = model.test_on_batch(x2, y2) self.assertAlmostEqual(loss_small_batch, loss_big_batch, places=4) @keras_parameterized.run_all_keras_modes def test_activity_regularizer_in_model_call(self): class MyModel(keras.Model): def call(self, inputs): self.add_loss(inputs) return inputs x = ops.convert_to_tensor(1.) model = MyModel() _ = model(x) self.assertEqual(1, len(model.losses)) @keras_parameterized.run_all_keras_modes def test_custom_mapping_in_config(self): class MyModel(keras.Model): def call(self, inputs): return inputs def get_config(self): self.a = {} return {'a': self.a} model = MyModel() self.assertIn('{"a": {}}', model.to_json()) @keras_parameterized.run_all_keras_modes def test_training_on_sparse_data_with_dense_placeholders(self): # TODO(kaftan) Test seems to not work, file ticket if testing_utils.should_run_eagerly() and context.executing_eagerly(): self.skipTest('Skipping running model eagerly.') if scipy_sparse is None: return test_inputs = [ scipy_sparse.random(6, 3, density=0.25).tocsr() for _ in range(2) ] test_outputs = [ scipy_sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5) ] in1 = keras.layers.Input(shape=(3,)) in2 = keras.layers.Input(shape=(3,)) out1 = keras.layers.Dropout(0.5, name='dropout')(in1) out2 = keras.layers.Dense(4, name='dense_1')(in2) model = keras.Model([in1, in2], [out1, out2]) model.predict(test_inputs, batch_size=2) optimizer = RMSPropOptimizer(learning_rate=0.001) model.compile( optimizer, 'mse', metrics=['mae', metrics_module.CategoricalAccuracy()], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(test_inputs, test_outputs, epochs=1, batch_size=2, validation_split=0.5) model.evaluate(test_inputs, test_outputs, batch_size=2) @keras_parameterized.run_all_keras_modes def test_compile_with_sparse_placeholders(self): # TODO(kaftan) Test seems to not work, file ticket if testing_utils.should_run_eagerly() and context.executing_eagerly(): self.skipTest('Skipping running model eagerly.') input_layer = keras.layers.Input(shape=(10,), sparse=True) weights = variables_lib.Variable( np.ones((10, 1)).astype(np.float32), name='weights') weights_mult = lambda x: sparse_ops.sparse_tensor_dense_matmul(x, weights) output_layer = keras.layers.Lambda(weights_mult)(input_layer) model = keras.Model([input_layer], output_layer) model.compile( loss='binary_crossentropy', optimizer=keras.optimizers.Adam(lr=0.0001), metrics=['accuracy'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) @keras_parameterized.run_all_keras_modes def test_that_trainable_disables_updates(self): val_a = np.random.random((10, 4)) val_out = np.random.random((10, 4)) a = keras.layers.Input(shape=(4,)) layer = keras.layers.BatchNormalization(input_shape=(4,)) b = layer(a) model = keras.Model(a, b) model.trainable = False assert not model.updates model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) assert not model.updates x1 = model.predict(val_a) model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) self.assertAllClose(x1, x2, atol=1e-7) model.trainable = True model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) assert model.updates model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) assert np.abs(np.sum(x1 - x2)) > 1e-5 layer.trainable = False model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) assert not model.updates x1 = model.predict(val_a) model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) self.assertAllClose(x1, x2, atol=1e-7) @keras_parameterized.run_all_keras_modes def test_weight_deduplication(self): class WatchingLayer(keras.layers.Layer): def __init__(self, dense_to_track): # This will cause the kernel and bias to be double counted, effectively # doubling the learning rate if weights are not deduped. self._kernel = dense_to_track.kernel self._bias = dense_to_track.bias super(WatchingLayer, self).__init__() inp = keras.layers.Input(shape=(1,)) dense_layer = keras.layers.Dense(1) dense_output = dense_layer(inp) # This will build the dense kernel # Deterministically set weights to make the test repeatable. dense_layer.set_weights([np.ones((1, 1)), np.zeros((1,))]) output = WatchingLayer(dense_layer)(dense_output) model = keras.models.Model(inp, output) # 0.25 is the edge of the radius of convergence for the double apply case. # At lr=0.24, the double apply case will very slowly descend while the # correct case will drop very quickly. model.compile(loss='mse', optimizer=gradient_descent.SGD(0.24), run_eagerly=testing_utils.should_run_eagerly()) x = np.ones((64 * 2,)) y = 4.5 * x - 3. history = model.fit(x, y, batch_size=64, epochs=2, verbose=2) # If the gradient apply is duplicated then the loss after 2 epochs will # be ~0.15, compared to the correct answer of O(1e-7). self.assertLess(history.history['loss'][-1], 1e-6) def test_logs_passed_to_callbacks(self): with self.cached_session(): input_dim = 5 num_classes = 1 class TestCallback(Callback): def __init__(self): super(TestCallback, self).__init__() self.epoch_end_logs = None self.batch_end_logs = None self.epoch_end_call_count = 0 self.batch_end_call_count = 0 def on_epoch_end(self, epoch, logs=None): self.epoch_end_logs = logs self.epoch_end_call_count += 1 def on_batch_end(self, batch, logs=None): self.batch_end_logs = logs self.batch_end_call_count += 1 model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=num_classes, input_dim=input_dim) model.compile( loss='binary_crossentropy', metrics=['acc'], weighted_metrics=['mae'], optimizer=RMSPropOptimizer(learning_rate=0.01)) np.random.seed(1337) (x_train, y_train), (_, _) = testing_utils.get_test_data( train_samples=10, test_samples=10, input_shape=(input_dim,), num_classes=num_classes) test_callback = TestCallback() model.fit( x_train, y_train, batch_size=2, epochs=2, verbose=0, callbacks=[test_callback], validation_data=(x_train, y_train)) self.assertEqual(test_callback.batch_end_call_count, 10) self.assertEqual(test_callback.epoch_end_call_count, 2) weighted_metric = ('mae' if tf2.enabled() else 'weighted_mean_absolute_error') self.assertSetEqual( set(test_callback.batch_end_logs.keys()), set(['batch', 'size', 'acc', 'loss', weighted_metric])) self.assertSetEqual( set(test_callback.epoch_end_logs.keys()), set([ 'acc', 'loss', weighted_metric, 'val_acc', 'val_loss', 'val_' + weighted_metric ])) @keras_parameterized.run_all_keras_modes def test_mismatched_output_shape_and_target_shape(self): model = keras.Sequential([ keras.layers.Dense(2, input_shape=(3, 4)), keras.layers.Dense(5), ]) model.compile( RMSPropOptimizer(learning_rate=0.001), loss='sparse_categorical_crossentropy', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # Test with Numpy data x_train = np.random.random((10, 3, 4)) y_train = np.random.randint(0, 5, size=(10, 3)) model.fit(x_train, y_train, batch_size=5, epochs=1) # Test with iterator dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train)) dataset = dataset.repeat(10) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2) if context.executing_eagerly(): # Test with eager execution model.compile(RMSPropOptimizer(learning_rate=0.001), loss='sparse_categorical_crossentropy', run_eagerly=True) model.fit(x_train, y_train, batch_size=5, epochs=1) # Test with eager execution and iterator model.fit(dataset, epochs=1, steps_per_epoch=2) def test_losses_in_defun(self): with context.eager_mode(): layer = keras.layers.Dense(1, kernel_regularizer='l1') layer(array_ops.ones([1, 10])) @function.defun def get_losses(): return layer.losses self.assertAllEqual( self.evaluate(layer.losses), self.evaluate(get_losses())) @keras_parameterized.run_all_keras_modes def test_logging(self): mock_stdout = io.BytesIO() if six.PY2 else io.StringIO() model = keras.models.Sequential() model.add(keras.layers.Dense(10, activation='relu')) model.add(keras.layers.Dense(1, activation='sigmoid')) model.compile( RMSPropOptimizer(learning_rate=0.001), loss='binary_crossentropy', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) with test.mock.patch.object(sys, 'stdout', mock_stdout): model.fit( np.ones((10, 10), 'float32'), np.ones((10, 1), 'float32'), epochs=10) self.assertTrue('Epoch 5/10' in mock_stdout.getvalue()) @tf_test_util.run_in_graph_and_eager_modes def test_training_with_loss_instance(self): a = keras.layers.Input(shape=(3,), name='input_a') b = keras.layers.Input(shape=(3,), name='input_b') dense = keras.layers.Dense(4, name='dense') c = dense(a) d = dense(b) e = keras.layers.Dropout(0.5, name='dropout')(c) model = keras.models.Model([a, b], [d, e]) loss_weights = [1., 0.5] model.compile( RMSPropOptimizer(learning_rate=0.001), loss=keras.losses.MeanSquaredError(), metrics=[metrics_module.CategoricalAccuracy(), 'mae'], loss_weights=loss_weights) input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_d_np = np.random.random((10, 4)) output_e_np = np.random.random((10, 4)) model.fit([input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, batch_size=5) @tf_test_util.run_in_graph_and_eager_modes def test_static_batch_in_input_layer(self): class Counter(keras.callbacks.Callback): def __init__(self): self.batches = 0 def on_batch_end(self, batch, logs=None): self.batches += 1 x, y = np.ones((64, 10), 'float32'), np.ones((64, 1), 'float32') for batch_size, expected_batches in [(None, 2), (4, 16)]: inputs = keras.Input(batch_size=batch_size, shape=(10,)) outputs = keras.layers.Dense(1, activation='sigmoid')(inputs) model = keras.Model(inputs, outputs) model.compile(keras.optimizer_v2.adam.Adam(0.001), 'binary_crossentropy') counter = Counter() model.fit(x, y, callbacks=[counter]) self.assertEqual(counter.batches, expected_batches) model = keras.Sequential( [keras.layers.Dense(1, batch_input_shape=(batch_size, 10))]) model.compile(keras.optimizer_v2.adam.Adam(0.001), 'binary_crossentropy') counter = Counter() model.fit(x, y, callbacks=[counter]) self.assertEqual(counter.batches, expected_batches) @tf_test_util.run_in_graph_and_eager_modes def test_static_batch_in_input_layer_consistency_checks(self): x, y = np.ones((64, 10), 'float32'), np.ones((64, 1), 'float32') inputs = keras.Input(batch_size=2, shape=(10,)) outputs = keras.layers.Dense(1, activation='sigmoid')(inputs) model = keras.Model(inputs, outputs) model.compile(keras.optimizer_v2.adam.Adam(0.001), 'binary_crossentropy') with self.assertRaisesRegexp(ValueError, 'incompatible with the specified batch size'): model.fit(x, y, batch_size=4) @tf_test_util.run_in_graph_and_eager_modes def test_compatible_batch_size_functional_model(self): class MyLayer(keras.layers.Layer): def call(self, inputs): return array_ops.concat(inputs, axis=0) input1 = keras.Input(batch_size=2, shape=(10,)) input2 = keras.Input(batch_size=3, shape=(10,)) outputs = MyLayer()([input1, input2]) with self.assertRaisesRegexp(ValueError, 'specified batch sizes of the Input Layers'): keras.Model([input1, input2], outputs) @tf_test_util.run_in_graph_and_eager_modes def test_calling_subclass_model_on_different_datasets(self): class SubclassedModel(keras.models.Model): def call(self, inputs): return inputs * 2 model = SubclassedModel() dataset_one = dataset_ops.Dataset.range(2).batch(2) dataset_two = dataset_ops.Dataset.range(3, 10).batch(2) self.assertAllEqual([[0], [2]], model.predict(dataset_one, steps=1)) self.assertAllEqual([[6], [8], [10], [12]], model.predict(dataset_two, steps=2)) def test_training_on_sparse_categorical_crossentropy_loss_with_softmax(self): with context.eager_mode(): np.random.seed(1337) train_x = np.ones((100, 4)) train_y = np.random.randint(0, 1, size=(100, 1)) reference_model = testing_utils.get_small_sequential_mlp(16, 2, input_dim=4) reference_model.compile(loss='sparse_categorical_crossentropy', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=True) fixed_weights = reference_model.get_weights() reference_model_loss = reference_model.train_on_batch(train_x, train_y) test_model = testing_utils.get_small_sequential_mlp(16, 2, input_dim=4) test_model.compile(loss='sparse_categorical_crossentropy', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=False) test_model.set_weights(fixed_weights) test_model_loss = test_model.train_on_batch(train_x, train_y) self.assertAlmostEqual(test_model_loss, reference_model_loss, places=4) def test_training_on_categorical_crossentropy_loss_with_softmax(self): with context.eager_mode(): np.random.seed(1337) train_x = np.ones((100, 4)) train_y = keras.utils.to_categorical(np.random.randint(0, 1, size=(100, 1)), 2) reference_model = testing_utils.get_small_sequential_mlp(16, 2, input_dim=4) reference_model.compile(loss='categorical_crossentropy', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=True) fixed_weights = reference_model.get_weights() reference_model_loss = reference_model.train_on_batch(train_x, train_y) test_model = testing_utils.get_small_sequential_mlp(16, 2, input_dim=4) test_model.compile(loss='categorical_crossentropy', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=False) test_model.set_weights(fixed_weights) test_model_loss = test_model.train_on_batch(train_x, train_y) self.assertAlmostEqual(test_model_loss, reference_model_loss, places=4) def test_training_on_binary_crossentropy_loss(self): with context.eager_mode(): train_x = np.ones((100, 4), dtype=np.float32) train_y = np.ones((100, 1), dtype=np.float32) reference_model = testing_utils.get_small_sequential_mlp(16, 1, input_dim=4) reference_model.compile(loss='binary_crossentropy', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=True) fixed_weights = reference_model.get_weights() reference_model_loss = reference_model.train_on_batch(train_x, train_y) test_model = testing_utils.get_small_sequential_mlp(16, 1, input_dim=4) test_model.compile(loss='binary_crossentropy', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=False) test_model.set_weights(fixed_weights) test_model_loss = test_model.train_on_batch(train_x, train_y) self.assertAlmostEqual(test_model_loss, reference_model_loss, places=4) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes @parameterized.named_parameters( ('default', 1, 4), ('integer_two', 2, 2), ('integer_four', 4, 1), ('simple_list', [1, 3, 4], 3), ('duplicated_list', [4, 2, 2], 2)) def test_validation_freq(self, validation_freq, expected_runs): x, y = np.ones((10, 10)), np.ones((10, 1)) model = testing_utils.get_small_mlp(2, 1, 10) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) class ValCounter(keras.callbacks.Callback): def __init__(self): self.val_runs = 0 def on_test_begin(self, logs=None): self.val_runs += 1 val_counter = ValCounter() model.fit( x, y, epochs=4, validation_data=(x, y), validation_freq=validation_freq, callbacks=[val_counter]) self.assertEqual(val_counter.val_runs, expected_runs) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_validation_steps_without_data(self): x, y = np.ones((10, 10)), np.ones((10, 1)) model = testing_utils.get_small_mlp(2, 1, 10) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) with self.assertRaisesRegexp( ValueError, '`validation_steps` should not be specified if ' '`validation_data` is None.'): model.fit(x, y, epochs=4, validation_data=None, validation_steps=3) @keras_parameterized.run_all_keras_modes def test_add_loss_correctness(self): class Bias(keras.layers.Layer): def build(self, input_shape): self.bias = self.add_variable('bias', (1,), initializer='zeros') def call(self, inputs): return inputs + self.bias inputs = keras.Input(shape=(1,)) targets = keras.Input(shape=(1,)) outputs = Bias()(inputs) model = keras.Model([inputs, targets], outputs) model.add_loss(2 * math_ops.reduce_mean( keras.losses.mean_absolute_error(targets, outputs))) model.add_loss(keras.losses.MeanAbsoluteError()(targets, outputs)) model.compile( keras.optimizer_v2.gradient_descent.SGD(0.025), loss=keras.losses.MeanAbsoluteError(), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.array([[0.], [1.], [2.]]) y = np.array([[0.5], [2.], [3.5]]) history = model.fit([x, y], y, batch_size=3, epochs=5) self.assertAllClose(history.history['loss'], [4., 3.6, 3.2, 2.8, 2.4], 1e-3) @keras_parameterized.run_all_keras_modes def test_unconditional_add_loss_correctness(self): class MyLayer(keras.layers.Layer): def call(self, inputs, training=None): # Reachable from the inputs but marked as unconditional. self.add_loss(math_ops.reduce_sum(inputs)) return inputs inputs = keras.Input((3,)) layer = MyLayer() outputs = layer(inputs) model = keras.Model(inputs, outputs) self.assertEqual(len(model.losses), 1) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3))) self.assertEqual(loss, 2 * 3) @keras_parameterized.run_all_keras_modes def test_clear_losses(self): class LayerWithSharedNestedLossLayer(keras.layers.Layer): def __init__(self): super(LayerWithSharedNestedLossLayer, self).__init__() self.loss_layer = keras.layers.ActivityRegularization(l2=0.001) self.add_weight(shape=(1,), regularizer='l2') def call(self, x): x = self.loss_layer(x) return self.loss_layer(x) inputs = keras.Input(shape=(1,)) outputs = LayerWithSharedNestedLossLayer()(inputs) model = keras.Model(inputs, outputs) # Weight loss + 2 activity losses. self.assertEqual(len(model.losses), 3) x = array_ops.ones((1, 1)) model(x) y = array_ops.ones((1, 1)) model(y) if context.executing_eagerly(): # Eager losses are cleared every `__call__`. self.assertEqual(len(model.losses), 3) else: self.assertEqual(len(model.get_losses_for(x)), 2) self.assertEqual(len(model.get_losses_for(y)), 2) self.assertEqual(len(model.get_losses_for(None)), 1) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_layer_with_variable_output(self): class VariableOutputLayer(keras.layers.Layer): def build(self, input_shape): self.v = self.add_weight('output_var', shape=(2, 5), initializer='ones') def call(self, inputs): return self.v model = testing_utils.get_model_from_layers( [VariableOutputLayer(), keras.layers.Dense(1)], input_shape=(10,)) # TODO(omalleyt): Make this work with `run_eagerly=True`. model.compile('sgd', 'mse', run_eagerly=False) model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2, epochs=5) self.assertLen(model.trainable_variables, 3) # TODO(b/131372221): Make this work with subclassed models. @keras_parameterized.run_with_all_model_types(exclude_models=['subclass']) @keras_parameterized.run_all_keras_modes @testing_utils.enable_v2_dtype_behavior def test_model_dtype(self): class AssertTypeLayer(keras.layers.Layer): def call(self, inputs): assert inputs.dtype.name == self.dtype, ( 'Input tensor has type %s which does not match assert type %s' % (inputs.dtype.name, self.assert_type)) return inputs + 1. for dtype in ('float16', 'float32', 'float64'): model = testing_utils.get_model_from_layers( [AssertTypeLayer(dtype=dtype)], input_shape=(10,)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones((10, 10)) y = np.ones((10, 10)) model.fit(x, y) model.test_on_batch(x, y) model(x) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_subclassed_model_with_training_arg(self): class LayerWithTrainingArg(keras.layers.Layer): def call(self, inputs, training=None): self.training = training return inputs class ModelWithTrainingArg(keras.Model): def __init__(self): super(ModelWithTrainingArg, self).__init__() self.l1 = LayerWithTrainingArg() def call(self, inputs, training=None): self.training = training inputs = self.l1(inputs, training=training) return inputs x = np.zeros((1, 2)) model = ModelWithTrainingArg() model.compile( loss='mse', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, x, epochs=1) if (testing_utils.should_run_eagerly() or testing_utils.should_run_tf_function()): expected_training_arg = True else: expected_training_arg = keras.backend.symbolic_learning_phase() self.assertIs(model.training, expected_training_arg) self.assertIs(model.l1.training, expected_training_arg) @keras_parameterized.run_all_keras_modes def test_error_when_model_is_not_compiled(self): inputs = keras.Input(shape=(1,)) outputs = keras.layers.Dense(1)(inputs) model = keras.Model(inputs, outputs) with self.assertRaisesRegex(RuntimeError, 'must compile your model'): model.fit(np.ones((1, 1)), np.ones((1, 1))) class MyModel(keras.Model): def call(self, x): self.add_loss(math_ops.reduce_sum(x)) return x model = MyModel() with self.assertRaisesRegex(RuntimeError, 'must compile your model'): model.fit(np.random.random((32, 1)), epochs=2) class TestExceptionsAndWarnings(keras_parameterized.TestCase): @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_invalid_batch_dimension(self): def custom_reshape(inputs): return keras.backend.reshape(inputs, (-1, 8, 8, 3)) layer_1 = keras.layers.Lambda(custom_reshape) layer_2 = keras.layers.Conv2D(32, (3, 3)) model = testing_utils.get_model_from_layers([layer_1, layer_2], input_shape=(8, 8, 6)) model.compile('sgd', loss='mse') with self.assertRaisesRegex( ValueError, 'Mismatch between expected batch size and model output batch size. ' r'Output shape = \(20, 6, 6, 32\), expected output shape = ' r'shape \(10, 6, 6, 32\)'): model.predict(np.ones((10, 8, 8, 6)), batch_size=10) @keras_parameterized.run_all_keras_modes def test_invalid_loss(self): num_classes = 5 train_samples = 1000 test_samples = 1000 input_dim = 5 model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=num_classes, input_dim=input_dim) optimizer = RMSPropOptimizer(learning_rate=0.001) model.compile(optimizer, loss='categorical_crossentropy') np.random.seed(1337) (x_train, y_train), (_, _) = testing_utils.get_test_data( train_samples=train_samples, test_samples=test_samples, input_shape=(input_dim,), num_classes=num_classes) with self.assertRaises(ValueError): model.fit(x_train, np.concatenate([y_train, y_train], axis=-1)) if not context.executing_eagerly(): # TODO(psv): Investigate these use cases in eager mode. with self.assertRaises(ValueError): model.fit(x_train, y_train) with self.assertRaises(ValueError): model.compile( optimizer, loss=None, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) @keras_parameterized.run_all_keras_modes def test_compile_warning_for_loss_missing_output(self): with self.cached_session(): inp = keras.layers.Input(shape=(16,), name='input_a') out_1 = keras.layers.Dense(8, name='dense_1')(inp) out_2 = keras.layers.Dense(3, activation='softmax', name='dense_2')(out_1) model = keras.models.Model(inputs=[inp], outputs=[out_1, out_2]) optimizer = RMSPropOptimizer(learning_rate=0.001) with test.mock.patch.object(logging, 'warning') as mock_log: model.compile( optimizer, loss={ 'dense_2': 'categorical_crossentropy', }, metrics={ 'dense_2': 'categorical_accuracy', 'dense_1': metrics_module.CategoricalAccuracy(), }, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) msg = ('Output dense_1 missing from loss dictionary. We assume this ' 'was done on purpose. The fit and evaluate APIs will not be ' 'expecting any data to be passed to dense_1.') self.assertRegexpMatches(str(mock_log.call_args), msg) @keras_parameterized.run_all_keras_modes def test_invalid_steps_per_epoch_usage(self): x = keras.layers.Input(shape=(1,)) y = keras.layers.Dense(1)(x) model = keras.Model(x, y) model.compile( 'sgd', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=False) err_msg = 'When passing input data as arrays, do not specify' if testing_utils.should_run_eagerly(): with self.assertRaisesRegex(ValueError, err_msg): model.fit(x=np.zeros((100, 1)), y=np.ones((100, 1)), steps_per_epoch=4) with self.assertRaisesRegex(ValueError, err_msg): model.evaluate(x=np.zeros((100, 1)), y=np.ones((100, 1)), steps=4) with self.assertRaisesRegex(ValueError, err_msg): model.predict(np.zeros((100, 1)), steps=4) else: with test.mock.patch.object(logging, 'warning') as mock_log: model._standardize_user_data( np.zeros((100, 1)), np.ones((100, 1)), check_steps=True, steps=4) self.assertRegexpMatches(str(mock_log.call_args), err_msg) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_invalid_batch_size_argument_with_sequence_input(self): class DummySequence(keras.utils.Sequence): def __getitem__(self, idx): return np.zeros([10, 2]), np.ones([10, 4]) def __len__(self): return 10 model = testing_utils.get_small_mlp( num_hidden=10, num_classes=1, input_dim=10) model.compile( 'adam', 'binary_crossentropy', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) with self.assertRaisesRegexp( ValueError, 'The `batch_size` argument must not be specified'): model.fit(DummySequence(), batch_size=2, epochs=2) with self.assertRaisesRegexp( ValueError, 'The `batch_size` argument must not be specified'): model.evaluate(DummySequence(), batch_size=2) with self.assertRaisesRegexp( ValueError, 'The `batch_size` argument must not be specified'): model.predict(DummySequence(), batch_size=2) class LossWeightingTest(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes def test_class_weights(self): num_classes = 5 batch_size = 5 epochs = 10 weighted_class = 3 weight = 10. train_samples = 1000 test_samples = 1000 input_dim = 5 learning_rate = 0.001 model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=num_classes, input_dim=input_dim) model.compile( loss='categorical_crossentropy', metrics=['acc', metrics_module.CategoricalAccuracy()], weighted_metrics=['mae', metrics_module.CategoricalAccuracy()], optimizer=RMSPropOptimizer(learning_rate=learning_rate), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) np.random.seed(1337) (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=train_samples, test_samples=test_samples, input_shape=(input_dim,), num_classes=num_classes) int_y_test = y_test.copy() int_y_train = y_train.copy() # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) test_ids = np.where(int_y_test == np.array(weighted_class))[0] class_weight = dict([(i, 1.) for i in range(num_classes)]) class_weight[weighted_class] = weight model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs // 3, verbose=0, class_weight=class_weight, validation_data=(x_train, y_train)) model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs // 2, verbose=0, class_weight=class_weight) model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs // 2, verbose=0, class_weight=class_weight, validation_split=0.1) model.train_on_batch( x_train[:batch_size], y_train[:batch_size], class_weight=class_weight) ref_score = model.evaluate(x_test, y_test, verbose=0) score = model.evaluate( x_test[test_ids, :], y_test[test_ids, :], verbose=0) self.assertLess(score[0], ref_score[0]) @keras_parameterized.run_all_keras_modes def test_sample_weights(self): num_classes = 5 batch_size = 5 epochs = 10 weighted_class = 3 weight = 10. train_samples = 1000 test_samples = 1000 input_dim = 5 learning_rate = 0.001 model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=num_classes, input_dim=input_dim) model.compile( RMSPropOptimizer(learning_rate=learning_rate), metrics=['acc', metrics_module.CategoricalAccuracy()], weighted_metrics=['mae', metrics_module.CategoricalAccuracy()], loss='categorical_crossentropy', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) np.random.seed(43) (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=train_samples, test_samples=test_samples, input_shape=(input_dim,), num_classes=num_classes) int_y_test = y_test.copy() int_y_train = y_train.copy() # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) test_ids = np.where(int_y_test == np.array(weighted_class))[0] sample_weight = np.ones((y_train.shape[0])) sample_weight[int_y_train == weighted_class] = weight model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs // 3, verbose=0, sample_weight=sample_weight) model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs // 3, verbose=0, sample_weight=sample_weight, validation_split=0.1) model.train_on_batch( x_train[:batch_size], y_train[:batch_size], sample_weight=sample_weight[:batch_size]) model.test_on_batch( x_train[:batch_size], y_train[:batch_size], sample_weight=sample_weight[:batch_size]) ref_score = model.evaluate( x_test, y_test, verbose=0, sample_weight=sample_weight) score = model.evaluate( x_test[test_ids, :], y_test[test_ids, :], verbose=0, sample_weight=sample_weight[test_ids]) self.assertLess(score[0], ref_score[0]) @keras_parameterized.run_all_keras_modes def test_temporal_sample_weights(self): num_classes = 5 batch_size = 5 epochs = 10 weighted_class = 3 weight = 10. train_samples = 1000 test_samples = 1000 input_dim = 5 timesteps = 3 learning_rate = 0.001 with self.cached_session(): model = keras.models.Sequential() model.add( keras.layers.TimeDistributed( keras.layers.Dense(num_classes), input_shape=(timesteps, input_dim))) model.add(keras.layers.Activation('softmax')) np.random.seed(1337) (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=train_samples, test_samples=test_samples, input_shape=(input_dim,), num_classes=num_classes) int_y_test = y_test.copy() int_y_train = y_train.copy() # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) test_ids = np.where(int_y_test == np.array(weighted_class))[0] sample_weight = np.ones((y_train.shape[0])) sample_weight[int_y_train == weighted_class] = weight temporal_x_train = np.reshape(x_train, (len(x_train), 1, x_train.shape[1])) temporal_x_train = np.repeat(temporal_x_train, timesteps, axis=1) temporal_x_test = np.reshape(x_test, (len(x_test), 1, x_test.shape[1])) temporal_x_test = np.repeat(temporal_x_test, timesteps, axis=1) temporal_y_train = np.reshape(y_train, (len(y_train), 1, y_train.shape[1])) temporal_y_train = np.repeat(temporal_y_train, timesteps, axis=1) temporal_y_test = np.reshape(y_test, (len(y_test), 1, y_test.shape[1])) temporal_y_test = np.repeat(temporal_y_test, timesteps, axis=1) temporal_sample_weight = np.reshape(sample_weight, (len(sample_weight), 1)) temporal_sample_weight = np.repeat( temporal_sample_weight, timesteps, axis=1) model.compile( RMSPropOptimizer(learning_rate=learning_rate), loss='categorical_crossentropy', metrics=['acc', metrics_module.CategoricalAccuracy()], weighted_metrics=['mae', metrics_module.CategoricalAccuracy()], sample_weight_mode='temporal', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit( temporal_x_train, temporal_y_train, batch_size=batch_size, epochs=epochs // 3, verbose=0, sample_weight=temporal_sample_weight) model.fit( temporal_x_train, temporal_y_train, batch_size=batch_size, epochs=epochs // 3, verbose=0, sample_weight=temporal_sample_weight, validation_split=0.1) model.train_on_batch( temporal_x_train[:batch_size], temporal_y_train[:batch_size], sample_weight=temporal_sample_weight[:batch_size]) model.test_on_batch( temporal_x_train[:batch_size], temporal_y_train[:batch_size], sample_weight=temporal_sample_weight[:batch_size]) ref_score = model.evaluate(temporal_x_test, temporal_y_test, verbose=0) if not context.executing_eagerly(): score = model.evaluate( temporal_x_test[test_ids], temporal_y_test[test_ids], verbose=0) self.assertLess(score[0], ref_score[0]) @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types(exclude_models='sequential') def test_fit_with_incorrect_weights(self): input_a = keras.layers.Input(shape=(3,), name='input_a') input_b = keras.layers.Input(shape=(3,), name='input_b') dense = keras.layers.Dense(2, name='output_1') dropout = keras.layers.Dropout(0.5, name='output_2') branch_a = [input_a, dense] branch_b = [input_b, dense, dropout] model = testing_utils.get_multi_io_model(branch_a, branch_b) model.compile( optimizer='adam', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.random.random((10, 3)) y = np.random.random((10, 2)) with self.assertRaisesRegexp( ValueError, r'Unknown entries in sample_weight dictionary: \[\'unknown\'\]. ' r'Only expected following keys: \[\'output_1\', \'output_2\'\]'): model.fit([x, x], [y, y], epochs=1, sample_weight={'unknown': 'something'}) with self.assertRaisesRegexp( ValueError, r'Unknown entries in class_weight dictionary: \[\'unknown\'\]. ' r'Only expected following keys: \[\'output_1\', \'output_2\'\]'): model.fit([x, x], [y, y], epochs=1, class_weight={'unknown': 'something'}) @keras_parameterized.run_all_keras_modes def test_class_weight_invalid_use_case(self): num_classes = 5 train_samples = 1000 test_samples = 1000 input_dim = 5 timesteps = 3 learning_rate = 0.001 with self.cached_session(): model = keras.models.Sequential() model.add( keras.layers.TimeDistributed( keras.layers.Dense(num_classes), input_shape=(timesteps, input_dim))) model.add(keras.layers.Activation('softmax')) optimizer = RMSPropOptimizer(learning_rate=learning_rate) model.compile( optimizer, loss='binary_crossentropy', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) (x_train, y_train), _ = testing_utils.get_test_data( train_samples=train_samples, test_samples=test_samples, input_shape=(input_dim,), num_classes=num_classes) # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) class_weight = dict([(i, 1.) for i in range(num_classes)]) del class_weight[1] with self.assertRaises(ValueError): model.fit(x_train, y_train, epochs=0, verbose=0, class_weight=class_weight) with self.assertRaises(ValueError): model.compile( optimizer, loss='binary_crossentropy', sample_weight_mode=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # Build multi-output model x = keras.Input((3,)) y1 = keras.layers.Dense(4, name='1')(x) y2 = keras.layers.Dense(4, name='2')(x) model = keras.models.Model(x, [y1, y2]) model.compile( optimizer, loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x_np = np.random.random((10, 3)) y_np = np.random.random((10, 4)) w_np = np.random.random((10,)) # This will work model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': w_np}) # These will not with self.assertRaises(ValueError): model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=[w_np]) with self.assertRaises(TypeError): model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=w_np) with self.assertRaises(ValueError): bad_w_np = np.random.random((11,)) model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np}) with self.assertRaises(ValueError): bad_w_np = np.random.random((10, 2)) model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np}) with self.assertRaises(ValueError): bad_w_np = np.random.random((10, 2, 2)) model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np}) @keras_parameterized.run_all_keras_modes def test_default_sample_weight(self): """Verifies that fit works without having to set sample_weight.""" num_classes = 5 input_dim = 5 timesteps = 3 learning_rate = 0.001 with self.cached_session(): model = keras.models.Sequential() model.add( keras.layers.TimeDistributed( keras.layers.Dense(num_classes), input_shape=(timesteps, input_dim))) x = np.random.random((10, timesteps, input_dim)) y = np.random.random((10, timesteps, num_classes)) optimizer = RMSPropOptimizer(learning_rate=learning_rate) # sample_weight_mode is a list and mode value is None model.compile( optimizer, loss='mse', sample_weight_mode=[None], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, epochs=1, batch_size=10) # sample_weight_mode is a list and mode value is `temporal` model.compile( optimizer, loss='mse', sample_weight_mode=['temporal'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, epochs=1, batch_size=10) # sample_weight_mode is a dict and mode value is None model.compile( optimizer, loss='mse', sample_weight_mode={'time_distributed': None}, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, epochs=1, batch_size=10) # sample_weight_mode is a dict and mode value is `temporal` model.compile( optimizer, loss='mse', sample_weight_mode={'time_distributed': 'temporal'}, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, epochs=1, batch_size=10) # sample_weight_mode is a not a list/dict and mode value is None model.compile( optimizer, loss='mse', sample_weight_mode=None, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, epochs=1, batch_size=10) # sample_weight_mode is a not a list/dict and mode value is `temporal` model.compile( optimizer, loss='mse', sample_weight_mode='temporal', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, epochs=1, batch_size=10) def test_sample_weight_tensor(self): """Tests that sample weight may be defined as a tensor in the graph.""" with context.graph_mode(): # Create a simple pass-through model input_layer = keras.layers.Input(shape=1, name='input_layer') model = keras.Model(inputs=input_layer, outputs=input_layer) model.compile( loss='mean_absolute_error', optimizer='adam') # Prepare sample weights iterator tensor sample_weights = array_ops.constant( [[0, .4, 1, 1], [2, .4, .3, 1]]) dataset = dataset_ops.Dataset.from_tensor_slices(sample_weights) sample_weights = dataset_ops.make_one_shot_iterator(dataset).get_next() sample_weights = training_utils.standardize_sample_weights( sample_weights, model.output_names) # Update model loss with sample weight tensor. model._compile_weights_loss_and_weighted_metrics(sample_weights) feeds = {'input_layer:0': [[0], [0], [0], [0]], 'input_layer_target:0': [[1], [1], [1], [1]]} with self.cached_session() as sess: self.assertAllClose( (.4 + 1 + 1) / 4, sess.run(model.total_loss, feed_dict=feeds)) self.assertAllClose( (2+ .4 + .3 + 1) / 4, sess.run(model.total_loss, feed_dict=feeds)) def test_prepare_sample_weights(self): # pylint:disable=anomalous-backslash-in-string input_layer = keras.layers.Input(shape=1, name='input_layer') model = keras.Model(inputs=input_layer, outputs=[input_layer, input_layer]) sample_weights = array_ops.constant([0, .4, 1, 1]) temporal_weights = array_ops.constant([[1, 2], [3, 4], [5, 6]]) model.compile( loss='mean_absolute_error', optimizer='adam', sample_weight_mode=None) with self.assertRaises(AssertionError): model._prepare_sample_weights([sample_weights, sample_weights]) model.compile(loss='mean_absolute_error', optimizer='adam', sample_weight_mode='temporal') model._prepare_sample_weights([temporal_weights, temporal_weights]) with self.assertRaisesRegexp(ValueError, 'Expected shape \[None, None\]'): model._prepare_sample_weights([sample_weights, sample_weights]) with self.assertRaisesRegexp(ValueError, 'sample weights must have same length as the ' 'number of outputs'): model._prepare_sample_weights([temporal_weights]) model.compile(loss='mean_absolute_error', optimizer='adam', sample_weight_mode='samplewise') model._prepare_sample_weights([sample_weights, sample_weights]) with self.assertRaisesRegexp(ValueError, 'Expected shape \[None\]'): model._prepare_sample_weights([temporal_weights, temporal_weights]) # pylint:enable=anomalous-backslash-in-string @keras_parameterized.run_all_keras_modes class MaskingTest(keras_parameterized.TestCase): def _get_model(self, input_shape=None): layers = [ keras.layers.Masking(mask_value=0), keras.layers.TimeDistributed( keras.layers.Dense(1, kernel_initializer='one')) ] model = testing_utils.get_model_from_layers(layers, input_shape) model.compile( loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) return model @keras_parameterized.run_with_all_model_types def test_masking(self): model = self._get_model(input_shape=(2, 1)) x = np.array([[[1], [1]], [[0], [0]]]) y = np.array([[[1], [1]], [[1], [1]]]) loss = model.train_on_batch(x, y) self.assertEqual(loss, 0) @keras_parameterized.run_with_all_model_types(exclude_models='functional') def test_masking_deferred(self): model = self._get_model() x = np.array([[[1], [1]], [[0], [0]]]) y = np.array([[[1], [1]], [[1], [1]]]) loss = model.train_on_batch(x, y) self.assertEqual(loss, 0) def test_mask_argument_in_layer(self): # Test that the mask argument gets correctly passed to a layer in the # functional API. class CustomMaskedLayer(keras.layers.Layer): def __init__(self): super(CustomMaskedLayer, self).__init__() self.supports_masking = True def call(self, inputs, mask=None): assert mask is not None return inputs def compute_output_shape(self, input_shape): return input_shape x = np.random.random((5, 3)) inputs = keras.layers.Input((3,)) masked = keras.layers.Masking(mask_value=0)(inputs) outputs = CustomMaskedLayer()(masked) model = keras.Model(inputs, outputs) model.compile( loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) y = np.random.random((5, 3)) model.train_on_batch(x, y) @keras_parameterized.run_all_keras_modes class TestDynamicTrainability(keras_parameterized.TestCase): def test_trainable_warning(self): x = np.random.random((5, 3)) y = np.random.random((5, 2)) model = keras.models.Sequential() model.add(keras.layers.Dense(2, input_dim=3)) model.trainable = False model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.trainable = True model.train_on_batch(x, y) self.assertRaises(Warning) def test_trainable_argument(self): with self.cached_session(): x = np.random.random((5, 3)) y = np.random.random((5, 2)) model = keras.models.Sequential() model.add(keras.layers.Dense(2, input_dim=3, trainable=False)) model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) out = model.predict(x) model.train_on_batch(x, y) out_2 = model.predict(x) self.assertAllClose(out, out_2) # test with nesting inputs = keras.layers.Input(shape=(3,)) output = model(inputs) model = keras.models.Model(inputs, output) model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) out = model.predict(x) model.train_on_batch(x, y) out_2 = model.predict(x) self.assertAllClose(out, out_2) def test_layer_trainability_switch(self): # with constructor argument, in Sequential model = keras.models.Sequential() model.add(keras.layers.Dense(2, trainable=False, input_dim=1)) self.assertListEqual(model.trainable_weights, []) # by setting the `trainable` argument, in Sequential model = keras.models.Sequential() layer = keras.layers.Dense(2, input_dim=1) model.add(layer) self.assertListEqual(model.trainable_weights, layer.trainable_weights) layer.trainable = False self.assertListEqual(model.trainable_weights, []) # with constructor argument, in Model x = keras.layers.Input(shape=(1,)) y = keras.layers.Dense(2, trainable=False)(x) model = keras.models.Model(x, y) self.assertListEqual(model.trainable_weights, []) # by setting the `trainable` argument, in Model x = keras.layers.Input(shape=(1,)) layer = keras.layers.Dense(2) y = layer(x) model = keras.models.Model(x, y) self.assertListEqual(model.trainable_weights, layer.trainable_weights) layer.trainable = False self.assertListEqual(model.trainable_weights, []) def test_model_trainability_switch(self): # a non-trainable model has no trainable weights x = keras.layers.Input(shape=(1,)) y = keras.layers.Dense(2)(x) model = keras.models.Model(x, y) model.trainable = False self.assertListEqual(model.trainable_weights, []) # same for Sequential model = keras.models.Sequential() model.add(keras.layers.Dense(2, input_dim=1)) model.trainable = False self.assertListEqual(model.trainable_weights, []) def test_nested_model_trainability(self): # a Sequential inside a Model inner_model = keras.models.Sequential() inner_model.add(keras.layers.Dense(2, input_dim=1)) x = keras.layers.Input(shape=(1,)) y = inner_model(x) outer_model = keras.models.Model(x, y) self.assertListEqual(outer_model.trainable_weights, inner_model.trainable_weights) inner_model.trainable = False self.assertListEqual(outer_model.trainable_weights, []) inner_model.trainable = True inner_model.layers[-1].trainable = False self.assertListEqual(outer_model.trainable_weights, []) # a Sequential inside a Sequential inner_model = keras.models.Sequential() inner_model.add(keras.layers.Dense(2, input_dim=1)) outer_model = keras.models.Sequential() outer_model.add(inner_model) self.assertListEqual(outer_model.trainable_weights, inner_model.trainable_weights) inner_model.trainable = False self.assertListEqual(outer_model.trainable_weights, []) inner_model.trainable = True inner_model.layers[-1].trainable = False self.assertListEqual(outer_model.trainable_weights, []) # a Model inside a Model x = keras.layers.Input(shape=(1,)) y = keras.layers.Dense(2)(x) inner_model = keras.models.Model(x, y) x = keras.layers.Input(shape=(1,)) y = inner_model(x) outer_model = keras.models.Model(x, y) self.assertListEqual(outer_model.trainable_weights, inner_model.trainable_weights) inner_model.trainable = False self.assertListEqual(outer_model.trainable_weights, []) inner_model.trainable = True inner_model.layers[-1].trainable = False self.assertListEqual(outer_model.trainable_weights, []) # a Model inside a Sequential x = keras.layers.Input(shape=(1,)) y = keras.layers.Dense(2)(x) inner_model = keras.models.Model(x, y) outer_model = keras.models.Sequential() outer_model.add(inner_model) self.assertListEqual(outer_model.trainable_weights, inner_model.trainable_weights) inner_model.trainable = False self.assertListEqual(outer_model.trainable_weights, []) inner_model.trainable = True inner_model.layers[-1].trainable = False self.assertListEqual(outer_model.trainable_weights, []) def test_gan_workflow(self): shared_layer = keras.layers.BatchNormalization() inputs1 = keras.Input(10) outputs1 = shared_layer(inputs1) model1 = keras.Model(inputs1, outputs1) shared_layer.trainable = False model1.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) inputs2 = keras.Input(10) outputs2 = shared_layer(inputs2) model2 = keras.Model(inputs2, outputs2) shared_layer.trainable = True model2.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x, y = np.ones((10, 10)), np.ones((10, 10)) out1_0 = model1.predict_on_batch(x) model1.train_on_batch(x, y) out1_1 = model1.predict_on_batch(x) self.assertAllClose(out1_0, out1_1) out2_0 = model2.predict_on_batch(x) model2.train_on_batch(x, y) out2_1 = model2.predict_on_batch(x) self.assertNotAllClose(out2_0, out2_1) class TestTrainingWithDataTensors(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes def test_training_and_eval_methods_on_symbolic_tensors_single_io(self): # TODO(kaftan) Test seems to not work, file ticket if context.executing_eagerly(): self.skipTest('Skipping eager execution.') x = keras.layers.Input(shape=(3,), name='input') y = keras.layers.Dense(4, name='dense')(x) model = keras.Model(x, y) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' model.compile( optimizer, loss, metrics=['mae', metrics_module.CategoricalAccuracy()], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) inputs = keras.backend.zeros(shape=(10, 3)) targets = keras.backend.zeros(shape=(10, 4)) model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0) model.evaluate(inputs, targets, steps=2, verbose=0) model.predict(inputs, steps=2) model.train_on_batch(inputs, targets) model.test_on_batch(inputs, targets) model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0, validation_data=(inputs, targets), validation_steps=2) # Test with dynamic shape inputs = array_ops.placeholder_with_default( np.zeros((2, 3)), shape=tensor_shape.TensorShape([None, 3])) targets = array_ops.placeholder_with_default( np.zeros((2, 4)), shape=tensor_shape.TensorShape([None, 4])) self.assertEqual(inputs.shape.dims[0].value, None) model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0) model.evaluate(inputs, targets, steps=2, verbose=0) model.predict(inputs, steps=2) model.train_on_batch(inputs, targets) model.test_on_batch(inputs, targets) model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0, validation_data=(inputs, targets), validation_steps=2) @keras_parameterized.run_all_keras_modes def test_training_and_eval_methods_on_symbolic_tensors_multi_io(self): # TODO(kaftan) Test seems to not work, file ticket if context.executing_eagerly(): self.skipTest('Skipping eager execution.') a = keras.layers.Input(shape=(3,), name='input_a') b = keras.layers.Input(shape=(3,), name='input_b') dense = keras.layers.Dense(4, name='dense') c = dense(a) d = dense(b) e = keras.layers.Dropout(0.5, name='dropout')(c) model = keras.models.Model([a, b], [d, e]) optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] model.compile( optimizer, loss, metrics=['mae', metrics_module.CategoricalAccuracy()], loss_weights=loss_weights, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) input_a_tf = keras.backend.zeros(shape=(10, 3)) input_b_tf = keras.backend.zeros(shape=(10, 3)) output_d_tf = keras.backend.zeros(shape=(10, 4)) output_e_tf = keras.backend.zeros(shape=(10, 4)) model.fit( [input_a_tf, input_b_tf], [output_d_tf, output_e_tf], epochs=1, steps_per_epoch=2, verbose=0) with self.assertRaisesRegexp(ValueError, 'should specify the `steps_per_epoch`'): model.fit( [input_a_tf, input_b_tf], [output_d_tf, output_e_tf], epochs=1, batch_size=5, verbose=0) model.train_on_batch([input_a_tf, input_b_tf], [output_d_tf, output_e_tf]) # Test with dictionary inputs model.fit( {'input_a': input_a_tf, 'input_b': input_b_tf}, {'dense': output_d_tf, 'dropout': output_e_tf}, epochs=1, steps_per_epoch=2, verbose=0) model.fit( {'input_a': input_a_tf, 'input_b': input_b_tf}, {'dense': output_d_tf, 'dropout': output_e_tf}, validation_data=({'input_a': input_a_tf, 'input_b': input_b_tf}, {'dense': output_d_tf, 'dropout': output_e_tf}), epochs=1, steps_per_epoch=2, validation_steps=2, verbose=0) model.train_on_batch( {'input_a': input_a_tf, 'input_b': input_b_tf}, {'dense': output_d_tf, 'dropout': output_e_tf}) # Test with validation data model.fit( [input_a_tf, input_b_tf], [output_d_tf, output_e_tf], validation_data=([input_a_tf, input_b_tf], [output_d_tf, output_e_tf]), epochs=1, steps_per_epoch=2, validation_steps=2, verbose=0) # Test with validation split with self.assertRaisesRegexp(ValueError, 'you cannot use `validation_split`'): model.fit( [input_a_tf, input_b_tf], [output_d_tf, output_e_tf], epochs=2, steps_per_epoch=2, verbose=0, validation_split=0.2, validation_steps=2) # Test evaluation / prediction methods model.evaluate([input_a_tf, input_b_tf], [output_d_tf, output_e_tf], steps=2, verbose=0) model.predict([input_a_tf, input_b_tf], steps=2) model.test_on_batch([input_a_tf, input_b_tf], [output_d_tf, output_e_tf]) @tf_test_util.run_deprecated_v1 def test_model_with_input_feed_tensor(self): """We test building a model with a TF variable as input. We should be able to call fit, evaluate, predict, by only passing them data for the placeholder inputs in the model. """ with self.cached_session(): input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) input_v = keras.backend.variables_module.Variable( input_a_np, dtype='float32') self.evaluate(variables_lib.variables_initializer([input_v])) a = keras.Input(tensor=input_v) b = keras.Input(shape=(3,), name='input_b') a_2 = keras.layers.Dense(4, name='dense_1')(a) dp = keras.layers.Dropout(0.5, name='dropout') b_2 = dp(b) model = keras.models.Model([a, b], [a_2, b_2]) model.summary() optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] model.compile(optimizer, loss, metrics=['mean_squared_error'], loss_weights=loss_weights, sample_weight_mode=None) # test train_on_batch out = model.train_on_batch(input_b_np, [output_a_np, output_b_np]) out = model.train_on_batch({'input_b': input_b_np}, [output_a_np, output_b_np]) out = model.test_on_batch({'input_b': input_b_np}, [output_a_np, output_b_np]) out = model.predict_on_batch({'input_b': input_b_np}) # test fit out = model.fit({'input_b': input_b_np}, [output_a_np, output_b_np], epochs=1, batch_size=10) out = model.fit(input_b_np, [output_a_np, output_b_np], epochs=1, batch_size=10) # test evaluate out = model.evaluate({'input_b': input_b_np}, [output_a_np, output_b_np], batch_size=10) out = model.evaluate(input_b_np, [output_a_np, output_b_np], batch_size=10) # test predict out = model.predict({'input_b': input_b_np}, batch_size=10) out = model.predict(input_b_np, batch_size=10) self.assertEqual(len(out), 2) # Now test a model with a single input # i.e. we don't pass any data to fit the model. self.evaluate(variables_lib.variables_initializer([input_v])) a = keras.Input(tensor=input_v) a_2 = keras.layers.Dense(4, name='dense_1')(a) a_2 = keras.layers.Dropout(0.5, name='dropout')(a_2) model = keras.models.Model(a, a_2) model.summary() optimizer = 'rmsprop' loss = 'mse' model.compile(optimizer, loss, metrics=['mean_squared_error']) # test train_on_batch out = model.train_on_batch(None, output_a_np) out = model.train_on_batch(None, output_a_np) out = model.test_on_batch(None, output_a_np) out = model.predict_on_batch(None) out = model.train_on_batch([], output_a_np) out = model.train_on_batch({}, output_a_np) # test fit _ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=3) _ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=3) # test evaluate _ = model.evaluate(None, output_a_np, steps=3) _ = model.evaluate(None, output_a_np, steps=3) # test predict out = model.predict(None, steps=3) out = model.predict(None, steps=3) self.assertEqual(out.shape, (10 * 3, 4)) # Same, without learning phase # i.e. we don't pass any data to fit the model. self.evaluate(variables_lib.variables_initializer([input_v])) a = keras.Input(tensor=input_v) a_2 = keras.layers.Dense(4, name='dense_1')(a) model = keras.models.Model(a, a_2) model.summary() optimizer = 'rmsprop' loss = 'mse' model.compile(optimizer, loss, metrics=['mean_squared_error']) # test train_on_batch out = model.train_on_batch(None, output_a_np) out = model.train_on_batch(None, output_a_np) out = model.test_on_batch(None, output_a_np) out = model.predict_on_batch(None) out = model.train_on_batch([], output_a_np) out = model.train_on_batch({}, output_a_np) # test fit _ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=10) _ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=10) # test evaluate _ = model.evaluate(None, output_a_np, steps=10) _ = model.evaluate(None, output_a_np, steps=10) # test predict out = model.predict(None, steps=3) out = model.predict(None, steps=3) self.assertEqual(out.shape, (10 * 3, 4)) def test_model_with_partial_loss(self): with self.cached_session(): a = keras.Input(shape=(3,), name='input_a') a_2 = keras.layers.Dense(4, name='dense_1')(a) dp = keras.layers.Dropout(0.5, name='dropout') a_3 = dp(a_2) model = keras.models.Model(a, [a_2, a_3]) optimizer = 'rmsprop' loss = {'dropout': 'mse'} model.compile(optimizer, loss, metrics=['mae']) input_a_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) # test train_on_batch _ = model.train_on_batch(input_a_np, output_a_np) _ = model.test_on_batch(input_a_np, output_a_np) # fit _ = model.fit(input_a_np, [output_a_np]) # evaluate _ = model.evaluate(input_a_np, [output_a_np]) # Same without dropout. a = keras.Input(shape=(3,), name='input_a') a_2 = keras.layers.Dense(4, name='dense_1')(a) a_3 = keras.layers.Dense(4, name='dense_2')(a_2) model = keras.models.Model(a, [a_2, a_3]) optimizer = 'rmsprop' loss = {'dense_2': 'mse'} model.compile(optimizer, loss, metrics={'dense_1': 'mae'}) # test train_on_batch _ = model.train_on_batch(input_a_np, output_a_np) _ = model.test_on_batch(input_a_np, output_a_np) # fit _ = model.fit(input_a_np, [output_a_np]) # evaluate _ = model.evaluate(input_a_np, [output_a_np]) @tf_test_util.run_deprecated_v1 def test_model_with_external_loss(self): with self.cached_session(): # None loss, only regularization loss. a = keras.Input(shape=(3,), name='input_a') a_2 = keras.layers.Dense(4, name='dense_1', kernel_regularizer='l1', bias_regularizer='l2')(a) dp = keras.layers.Dropout(0.5, name='dropout') a_3 = dp(a_2) model = keras.models.Model(a, [a_2, a_3]) optimizer = 'rmsprop' loss = None model.compile(optimizer, loss, metrics=['mae']) input_a_np = np.random.random((10, 3)) # test train_on_batch out = model.train_on_batch(input_a_np, None) out = model.test_on_batch(input_a_np, None) # fit out = model.fit(input_a_np, None) # evaluate out = model.evaluate(input_a_np, None) # No dropout, external loss. a = keras.Input(shape=(3,), name='input_a') a_2 = keras.layers.Dense(4, name='dense_1')(a) a_3 = keras.layers.Dense(4, name='dense_2')(a) model = keras.models.Model(a, [a_2, a_3]) model.add_loss(keras.backend.mean(a_3 + a_2)) optimizer = 'rmsprop' loss = None model.compile(optimizer, loss, metrics=['mae']) # test train_on_batch out = model.train_on_batch(input_a_np, None) out = model.test_on_batch(input_a_np, None) # fit out = model.fit(input_a_np, None) # evaluate out = model.evaluate(input_a_np, None) # Test model with no external data at all. input_v = keras.backend.variables_module.Variable( input_a_np, dtype='float32') self.evaluate(variables_lib.variables_initializer([input_v])) a = keras.Input(tensor=input_v) a_2 = keras.layers.Dense(4, name='dense_1')(a) a_2 = keras.layers.Dropout(0.5, name='dropout')(a_2) model = keras.models.Model(a, a_2) model.add_loss(keras.backend.mean(a_2)) model.compile(optimizer='rmsprop', loss=None, metrics=['mean_squared_error']) # test train_on_batch out = model.train_on_batch(None, None) out = model.test_on_batch(None, None) out = model.predict_on_batch(None) # test fit with self.assertRaises(ValueError): out = model.fit(None, None, epochs=1, batch_size=10) out = model.fit(None, None, epochs=1, steps_per_epoch=1) # test fit with validation data with self.assertRaises(ValueError): out = model.fit(None, None, epochs=1, steps_per_epoch=None, validation_steps=2) out = model.fit(None, None, epochs=1, steps_per_epoch=2, validation_steps=2) # test evaluate with self.assertRaises(ValueError): out = model.evaluate(None, None, batch_size=10) out = model.evaluate(None, None, steps=3) # test predict with self.assertRaises(ValueError): out = model.predict(None, batch_size=10) out = model.predict(None, steps=3) self.assertEqual(out.shape, (10 * 3, 4)) # Test multi-output model with no external data at all. self.evaluate(variables_lib.variables_initializer([input_v])) a = keras.Input(tensor=input_v) a_1 = keras.layers.Dense(4, name='dense_1')(a) a_2 = keras.layers.Dropout(0.5, name='dropout')(a_1) model = keras.models.Model(a, [a_1, a_2]) model.add_loss(keras.backend.mean(a_2)) model.compile(optimizer='rmsprop', loss=None, metrics=['mean_squared_error']) # test train_on_batch out = model.train_on_batch(None, None) out = model.test_on_batch(None, None) out = model.predict_on_batch(None) # test fit with self.assertRaises(ValueError): out = model.fit(None, None, epochs=1, batch_size=10) out = model.fit(None, None, epochs=1, steps_per_epoch=1) # test evaluate with self.assertRaises(ValueError): out = model.evaluate(None, None, batch_size=10) out = model.evaluate(None, None, steps=3) # test predict with self.assertRaises(ValueError): out = model.predict(None, batch_size=10, verbose=1) out = model.predict(None, steps=3) self.assertEqual(len(out), 2) self.assertEqual(out[0].shape, (10 * 3, 4)) self.assertEqual(out[1].shape, (10 * 3, 4)) def test_target_tensors(self): with self.cached_session(): # single-output, as list model = keras.models.Sequential() model.add(keras.layers.Dense(4, input_shape=(4,), name='dense')) input_val = np.random.random((10, 4)) target_val = np.random.random((10, 4)) target = keras.backend.variable(target_val) model.compile(optimizer='rmsprop', loss='mse', target_tensors=[target]) model.train_on_batch(input_val, None) # single-output, as single tensor model.compile(optimizer='rmsprop', loss='mse', target_tensors=target) model.train_on_batch(input_val, None) # single-output, as dict model.compile(optimizer='rmsprop', loss='mse', target_tensors={'dense': target}) model.train_on_batch(input_val, None) # test invalid arguments with self.assertRaises(TypeError): model.compile(optimizer='rmsprop', loss='mse', target_tensors=set()) with self.assertRaises(ValueError): model.compile(optimizer='rmsprop', loss='mse', target_tensors=[target, target]) with self.assertRaises(ValueError): model.compile(optimizer='rmsprop', loss='mse', target_tensors={'dense2': None}) with self.assertRaises(ValueError): model.compile(optimizer='rmsprop', loss='mse', target_tensors=[target]) model.train_on_batch(input_val, target_val) # multi-output, as list input_val = np.random.random((10, 4)) target_val_a = np.random.random((10, 4)) target_val_b = np.random.random((10, 4)) target_a = keras.backend.variable(target_val_a) target_b = keras.backend.variable(target_val_b) inputs = keras.layers.Input(shape=(4,)) output_a = keras.layers.Dense(4, name='dense_a')(inputs) output_b = keras.layers.Dense(4, name='dense_b')(inputs) model = keras.models.Model(inputs, [output_a, output_b]) model.compile(optimizer='rmsprop', loss='mse', target_tensors=[target_a, target_b]) model.train_on_batch(input_val, None) # multi-output, as dict model.compile(optimizer='rmsprop', loss='mse', target_tensors={'dense_a': target_a, 'dense_b': target_b}) model.train_on_batch(input_val, None) # test with sample weights model.compile( optimizer='rmsprop', loss='mse', metrics=['mae', metrics_module.CategoricalAccuracy()], target_tensors=[target_a, target_b]) model.train_on_batch(input_val, None, sample_weight={'dense_a': np.random.random((10,))}) @tf_test_util.run_deprecated_v1 def test_model_custom_target_tensors(self): with self.cached_session(): a = keras.Input(shape=(3,), name='input_a') b = keras.Input(shape=(3,), name='input_b') a_2 = keras.layers.Dense(4, name='dense_1')(a) dp = keras.layers.Dropout(0.5, name='dropout') b_2 = dp(b) y = keras.backend.placeholder([10, 4], name='y') y1 = keras.backend.placeholder([10, 3], name='y1') y2 = keras.backend.placeholder([7, 5], name='y2') model = keras.models.Model([a, b], [a_2, b_2]) optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] # test list of target tensors with self.assertRaises(ValueError): model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None, target_tensors=[y, y1, y2]) model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None, target_tensors=[y, y1]) input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) _ = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], { 'dense_1': np.random.random((10,)), 'dropout': np.random.random((10,)) }) # test dictionary of target_tensors with self.assertRaises(ValueError): model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None, target_tensors={'does_not_exist': y2}) # test dictionary of target_tensors model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None, target_tensors={'dense_1': y, 'dropout': y1}) _ = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], { 'dense_1': np.random.random((10,)), 'dropout': np.random.random((10,)) }) # test with custom TF placeholder as target pl_target_a = keras.backend.array_ops.placeholder('float32', shape=(None, 4)) model.compile(optimizer='rmsprop', loss='mse', target_tensors={'dense_1': pl_target_a}) model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) class TestTrainingWithMetrics(keras_parameterized.TestCase): """Training tests related to metrics.""" @keras_parameterized.run_all_keras_modes def test_metrics_names(self): a = keras.layers.Input(shape=(3,), name='input_a') b = keras.layers.Input(shape=(3,), name='input_b') dense = keras.layers.Dense(4, name='dense') c = dense(a) d = dense(b) e = keras.layers.Dropout(0.5, name='dropout')(c) model = keras.models.Model([a, b], [d, e]) optimizer = RMSPropOptimizer(learning_rate=0.001) metrics = ['mse', metrics_module.BinaryAccuracy()] model.compile( optimizer, loss='mae', metrics=metrics, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) mse_metric = 'mse' if tf2.enabled() else 'mean_squared_error' reference_metric_names = [ 'loss', 'dense_loss', 'dropout_loss', 'dense_' + mse_metric, 'dense_binary_accuracy', 'dropout_' + mse_metric, 'dropout_binary_accuracy' ] self.assertEqual(reference_metric_names, model.metrics_names) # Verify that model metric names are not altered during training. input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_d_np = np.random.random((10, 4)) output_e_np = np.random.random((10, 4)) model.fit([input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, batch_size=5) self.assertEqual(reference_metric_names, model.metrics_names) @keras_parameterized.run_all_keras_modes def test_metric_state_reset_between_fit_and_evaluate(self): model = keras.Sequential() model.add(keras.layers.Dense(3, activation='relu', input_dim=4)) model.add(keras.layers.Dense(1, activation='sigmoid')) acc_obj = metrics_module.BinaryAccuracy() model.compile( loss='mae', metrics=[acc_obj], optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x_train = np.random.random((100, 4)) y_train = np.random.random((100, 1)) model.fit(x_train, y_train, batch_size=5, epochs=2) self.assertEqual(self.evaluate(acc_obj.count), 100) x_test = np.random.random((10, 4)) y_test = np.random.random((10, 1)) model.evaluate(x_test, y_test, batch_size=5) self.assertEqual(self.evaluate(acc_obj.count), 10) @keras_parameterized.run_with_all_model_types(exclude_models=['sequential']) @keras_parameterized.run_all_keras_modes def test_metrics_valid_compile_input_formats(self): inp_1 = keras.layers.Input(shape=(1,), name='input_1') inp_2 = keras.layers.Input(shape=(1,), name='input_2') x = keras.layers.Dense(3, kernel_initializer='ones', trainable=False) out_1 = keras.layers.Dense( 1, kernel_initializer='ones', name='output_1', trainable=False) out_2 = keras.layers.Dense( 1, kernel_initializer='ones', name='output_2', trainable=False) branch_a = [inp_1, x, out_1] branch_b = [inp_2, x, out_2] model = testing_utils.get_multi_io_model(branch_a, branch_b) # list of metrics. model.compile( optimizer='rmsprop', loss='mse', metrics=[keras.metrics.MeanSquaredError()], weighted_metrics=[keras.metrics.MeanSquaredError()], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # list of list of metrics. model.compile( optimizer='rmsprop', loss='mse', metrics=[ keras.metrics.MeanSquaredError(), [keras.metrics.MeanSquaredError(), keras.metrics.Accuracy()] ], weighted_metrics=[ keras.metrics.MeanSquaredError(), [keras.metrics.MeanSquaredError(), keras.metrics.Accuracy()] ], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # dict of metrics. model.compile( optimizer='rmsprop', loss='mse', metrics={ 'output_1': keras.metrics.MeanSquaredError(), 'output_2': [ keras.metrics.MeanSquaredError(), keras.metrics.Accuracy() ], }, weighted_metrics={ 'output_1': keras.metrics.MeanSquaredError(), 'output_2': [ keras.metrics.MeanSquaredError(), keras.metrics.Accuracy() ], }, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) @keras_parameterized.run_all_keras_modes def test_invalid_metrics(self): num_classes = 5 input_dim = 5 model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=num_classes, input_dim=input_dim) with self.assertRaisesRegexp( TypeError, 'Type of `metrics` argument not understood. ' 'Expected a list or dictionary, found: '): model.compile( RMSPropOptimizer(learning_rate=0.001), loss='categorical_crossentropy', metrics=metrics_module.CategoricalAccuracy(), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) inp = keras.layers.Input(shape=(1,)) x = keras.layers.Dense(3, activation='relu')(inp) out_1 = keras.layers.Dense(1, activation='sigmoid', name='output_1')(x) out_2 = keras.layers.Dense(1, activation='sigmoid', name='output_2')(x) model = keras.models.Model(inp, [out_1, out_2]) with self.assertRaisesRegex( ValueError, 'When passing a list of lists as `metrics`, ' 'it should have one entry per model output. ' 'The model has 2 outputs, but you passed metrics='): model.compile('rmsprop', loss='mse', metrics=[['mse']]) with self.assertRaisesRegex( ValueError, r'Unknown entries in metrics dictionary: \[\'output_3\'\]. Only ' r'expected following keys: \[\'output_1\', \'output_2\'\]'): model.compile( optimizer='rmsprop', loss='mse', metrics={ 'output_1': 'mse', 'output_3': 'mse', }, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) with self.assertRaisesRegex( ValueError, r'Unknown entries in metrics dictionary: \[\'output_3\'\]. Only ' r'expected following keys: \[\'output_1\', \'output_2\'\]'): model.compile( optimizer='rmsprop', loss='mse', weighted_metrics={ 'output_1': 'mse', 'output_3': 'mse', }, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) @keras_parameterized.run_all_keras_modes def test_metrics_masking(self): if testing_utils.should_run_eagerly(): self.skipTest('b/120495761') with self.cached_session(): np.random.seed(1337) model = keras.models.Sequential() model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1))) model.add( keras.layers.TimeDistributed( keras.layers.Dense(1, kernel_initializer='ones'))) model.compile( RMSPropOptimizer(learning_rate=0.001), loss='mse', weighted_metrics=['accuracy'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # verify that masking is applied. x = np.array([[[1], [1]], [[1], [1]], [[0], [0]]]) y = np.array([[[1], [1]], [[0], [1]], [[1], [1]]]) scores = model.train_on_batch(x, y) self.assertArrayNear(scores, [0.25, 0.75], 0.1) # verify that masking is combined with sample weights. w = np.array([3, 2, 4]) scores = model.train_on_batch(x, y, sample_weight=w) self.assertArrayNear(scores, [0.3328, 0.8], 0.001) @keras_parameterized.run_all_keras_modes def test_add_metric_with_tensor_on_model(self): x = keras.layers.Input(shape=(1,)) y = keras.layers.Dense(1, kernel_initializer='ones')(x) model = keras.models.Model(x, y) model.add_metric( math_ops.reduce_sum(y), name='metric_1', aggregation='mean') if context.executing_eagerly(): # This is not a use case in v1 graph mode. mean_result = metrics_module.Mean()(y) with self.assertRaisesRegex( ValueError, 'Expected a symbolic Tensor for the metric value'): model.add_metric(mean_result, name='metric_2') with self.assertRaisesRegex( ValueError, 'Using the result of calling a `Metric` object '): with keras.backend.get_graph().as_default(): model.add_metric(metrics_module.Mean(name='metric_2')(y)) model.compile( 'sgd', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) inputs = np.ones(shape=(10, 1)) targets = np.ones(shape=(10, 1)) history = model.fit( inputs, targets, epochs=2, batch_size=5, validation_data=(inputs, targets)) self.assertEqual(history.history['metric_1'][-1], 5) self.assertEqual(history.history['val_metric_1'][-1], 5) eval_results = model.evaluate(inputs, targets, batch_size=5) self.assertEqual(eval_results[-1], 5) model.predict(inputs, batch_size=5) model.train_on_batch(inputs, targets) model.test_on_batch(inputs, targets) @keras_parameterized.run_all_keras_modes def test_add_metric_in_model_call(self): class TestModel(keras.Model): def __init__(self): super(TestModel, self).__init__(name='test_model') self.dense1 = keras.layers.Dense(2, kernel_initializer='ones') self.mean = metrics_module.Mean(name='metric_1') def call(self, x): self.add_metric( math_ops.reduce_sum(x), name='metric_2', aggregation='mean') # Provide same name as in the instance created in __init__ # for eager mode self.add_metric(self.mean(x), name='metric_1') return self.dense1(x) model = TestModel() model.compile( loss='mse', optimizer=RMSPropOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones(shape=(10, 1)) y = np.ones(shape=(10, 2)) history = model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y)) self.assertAlmostEqual(history.history['metric_1'][-1], 1, 0) self.assertAlmostEqual(history.history['val_metric_1'][-1], 1, 0) self.assertAlmostEqual(history.history['metric_2'][-1], 5, 0) self.assertAlmostEqual(history.history['val_metric_2'][-1], 5, 0) eval_results = model.evaluate(x, y, batch_size=5) self.assertAlmostEqual(eval_results[1], 1, 0) self.assertAlmostEqual(eval_results[2], 5, 0) model.predict(x, batch_size=5) model.train_on_batch(x, y) model.test_on_batch(x, y) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_add_metric_in_layer_call(self): class TestLayer(keras.layers.Layer): def build(self, input_shape): self.a = self.add_variable( 'a', (1, 1), initializer='ones', trainable=False) self.built = True def call(self, inputs): self.add_metric( math_ops.reduce_sum(inputs), name='metric_1', aggregation='mean') return inputs + 1 layers = [ TestLayer(input_shape=(1,)), keras.layers.Dense(2, kernel_initializer='ones') ] model = testing_utils.get_model_from_layers(layers, input_shape=(1,)) model.compile( loss='mse', optimizer=RMSPropOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones(shape=(10, 1)) y = np.ones(shape=(10, 2)) history = model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y)) self.assertEqual(history.history['metric_1'][-1], 5) self.assertAlmostEqual(history.history['val_metric_1'][-1], 5, 0) @keras_parameterized.run_all_keras_modes def test_model_metrics_list(self): class LayerWithAddMetric(keras.layers.Layer): def __init__(self): super(LayerWithAddMetric, self).__init__() self.dense = keras.layers.Dense(1, kernel_initializer='ones') def __call__(self, inputs): outputs = self.dense(inputs) self.add_metric( math_ops.reduce_sum(outputs), name='metric_1', aggregation='mean') return outputs class LayerWithNestedAddMetricLayer(keras.layers.Layer): def __init__(self): super(LayerWithNestedAddMetricLayer, self).__init__() self.layer = LayerWithAddMetric() def call(self, inputs): outputs = self.layer(inputs) self.add_metric( math_ops.reduce_sum(outputs), name='metric_2', aggregation='mean') return outputs x = keras.layers.Input(shape=(1,)) y = LayerWithNestedAddMetricLayer()(x) model = keras.models.Model(x, y) model.add_metric( math_ops.reduce_sum(y), name='metric_3', aggregation='mean') if context.executing_eagerly(): # This is not a use case in v1 graph mode. mean_result = metrics_module.Mean()(y) with self.assertRaisesRegex( ValueError, 'Expected a symbolic Tensor for the metric value'): model.add_metric(mean_result, name='metric_4') with self.assertRaisesRegex( ValueError, 'Using the result of calling a `Metric` object '): with keras.backend.get_graph().as_default(): model.add_metric(metrics_module.Mean(name='metric_4')(y)) model.compile( 'sgd', loss='mse', metrics=[metrics_module.Accuracy('metric_4')], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # Verify that the metrics added using `compile` and `add_metric` API are # included self.assertEqual([m.name for m in model._compile_metrics], ['metric_4']) self.assertEqual([m.name for m in model.metrics], ['metric_4', 'metric_2', 'metric_1', 'metric_3']) @keras_parameterized.run_all_keras_modes def test_model_metrics_list_in_call(self): class TestModel(keras.Model): def __init__(self): super(TestModel, self).__init__(name='test_model') self.dense1 = keras.layers.Dense(2, kernel_initializer='ones') def call(self, x): self.add_metric( math_ops.reduce_sum(x), name='metric_1', aggregation='mean') return self.dense1(x) model = TestModel() model.compile( loss='mse', optimizer=RMSPropOptimizer(0.01), metrics=[metrics_module.Accuracy('acc')], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones(shape=(10, 1)) y = np.ones(shape=(10, 2)) model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y)) self.assertEqual([m.name for m in model._compile_metrics], ['acc']) self.assertEqual([m.name for m in model.metrics], ['acc', 'metric_1']) @keras_parameterized.run_all_keras_modes def test_multiple_add_metric_calls(self): class TestModel(keras.Model): def __init__(self): super(TestModel, self).__init__(name='test_model') self.dense1 = keras.layers.Dense(2, kernel_initializer='ones') self.mean1 = metrics_module.Mean(name='metric_1') self.mean2 = metrics_module.Mean(name='metric_2') def call(self, x): self.add_metric(self.mean2(x), name='metric_2') self.add_metric(self.mean1(x), name='metric_1') self.add_metric( math_ops.reduce_sum(x), name='metric_3', aggregation='mean') return self.dense1(x) model = TestModel() model.compile( loss='mse', optimizer=RMSPropOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones(shape=(10, 1)) y = np.ones(shape=(10, 2)) history = model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y)) self.assertAlmostEqual(history.history['metric_1'][-1], 1, 0) self.assertAlmostEqual(history.history['metric_2'][-1], 1, 0) self.assertAlmostEqual(history.history['metric_3'][-1], 5, 0) eval_results = model.evaluate(x, y, batch_size=5) self.assertArrayNear(eval_results[1:4], [1, 1, 5], 0.1) model.predict(x, batch_size=5) model.train_on_batch(x, y) model.test_on_batch(x, y) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_invalid_metric_tensor(self): class TestLayer(keras.layers.Layer): def build(self, input_shape): self.built = True def call(self, inputs): self.add_metric(math_ops.reduce_mean(inputs), name='metric_1') return inputs + 1 layers = [TestLayer(input_shape=(1,))] layers.append(keras.layers.Dense(2, kernel_initializer='ones')) x = np.ones(shape=(10, 1)) y = np.ones(shape=(10, 2)) with self.assertRaisesRegexp( ValueError, 'We do not support adding an aggregated metric result tensor that is ' 'not the output of a `tf.keras.metrics.Metric` metric instance.'): model = testing_utils.get_model_from_layers(layers, input_shape=(1,)) model.compile( loss='mse', optimizer=RMSPropOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y)) @keras_parameterized.run_all_keras_modes def test_duplicate_metric_name_in_add_metric(self): class TestModel(keras.Model): def __init__(self): super(TestModel, self).__init__(name='test_model') self.dense1 = keras.layers.Dense(2, kernel_initializer='ones') self.mean = metrics_module.Mean(name='metric_1') self.mean2 = metrics_module.Mean(name='metric_1') def call(self, x): self.add_metric(self.mean(x), name='metric_1') return self.dense1(x) model = TestModel() model.compile( loss='mse', optimizer=RMSPropOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones(shape=(10, 1)) y = np.ones(shape=(10, 2)) with self.assertRaisesRegexp( ValueError, 'Please provide different names for the metrics you have added. ' 'We found 2 metrics with the name: "metric_1"'): model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y)) @keras_parameterized.run_all_keras_modes def test_add_metric_without_name(self): class TestModel(keras.Model): def __init__(self): super(TestModel, self).__init__(name='test_model') self.dense1 = keras.layers.Dense(2, kernel_initializer='ones') def call(self, x): self.add_metric(math_ops.reduce_sum(x), aggregation='mean') return self.dense1(x) model = TestModel() model.compile( loss='mse', optimizer=RMSPropOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones(shape=(10, 1)) y = np.ones(shape=(10, 2)) with self.assertRaisesRegex(ValueError, 'Please provide a name for your metric like'): model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y)) @keras_parameterized.run_all_keras_modes def test_add_metric_correctness(self): inputs = keras.Input(shape=(1,)) targets = keras.Input(shape=(1,)) class Bias(keras.layers.Layer): def build(self, input_shape): self.bias = self.add_variable('bias', (1,), initializer='zeros') self.mae = metrics_module.MeanAbsoluteError(name='mae_1') def call(self, inputs): inputs, targets = inputs outputs = inputs + self.bias self.add_metric(self.mae(targets, outputs), name='mae_1') return outputs outputs = Bias()([inputs, targets]) model = keras.Model([inputs, targets], outputs) model.add_metric( metrics_module.mean_absolute_error(targets, outputs), name='mae_2', aggregation='mean') model.compile( loss='mae', optimizer=keras.optimizer_v2.gradient_descent.SGD(0.1), metrics=[metrics_module.MeanAbsoluteError(name='mae_3')], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.array([[0.], [1.], [2.]]) y = np.array([[0.5], [2.], [3.5]]) history = model.fit([x, y], y, batch_size=3, epochs=5) expected_val = [1., 0.9, 0.8, 0.7, 0.6] for key in ['loss', 'mae_1', 'mae_2', 'mae_3']: self.assertAllClose(history.history[key], expected_val, 1e-3) @keras_parameterized.run_all_keras_modes def test_model_with_nested_compiled_model(self): class LayerWithAddMetric(keras.layers.Layer): def __init__(self): super(LayerWithAddMetric, self).__init__() self.dense = keras.layers.Dense(1, kernel_initializer='ones') def call(self, inputs): outputs = self.dense(inputs) self.add_metric( math_ops.reduce_sum(outputs), name='mean', aggregation='mean') return outputs x = keras.layers.Input(shape=(1,)) y = LayerWithAddMetric()(x) inner_model = keras.models.Model(x, y) inner_model.add_metric( math_ops.reduce_sum(y), name='mean1', aggregation='mean') inner_model.compile( 'sgd', loss='mse', metrics=[metrics_module.Accuracy('acc')], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertEqual([m.name for m in inner_model.metrics], ['acc', 'mean', 'mean1']) x = keras.layers.Input(shape=[1]) y = inner_model(x) outer_model = keras.Model(x, y) outer_model.add_metric( math_ops.reduce_sum(y), name='mean2', aggregation='mean') outer_model.compile( 'sgd', loss='mse', metrics=[metrics_module.Accuracy('acc2')], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertEqual([m.name for m in outer_model.metrics], ['acc2', 'mean', 'mean1', 'mean2']) class BareUpdateLayer(keras.layers.Layer): def build(self, input_shape): self.counter = self.add_weight( 'counter', dtype='int32', shape=(), initializer='zeros', trainable=False) def call(self, inputs): state_ops.assign_add(self.counter, 1) return math_ops.cast(self.counter, inputs.dtype) * inputs class LambdaUpdateLayer(keras.layers.Layer): def build(self, input_shape): self.counter = self.add_weight( 'counter', dtype='int32', shape=(), initializer='zeros', trainable=False) def call(self, inputs): # Make sure update isn't run twice. self.add_update(lambda: state_ops.assign_add(self.counter, 1)) return math_ops.cast(self.counter, inputs.dtype) * inputs class NestedUpdateLayer(keras.layers.Layer): def build(self, input_shape): self.layer = BareUpdateLayer() self.layer.build(input_shape) @property def counter(self): return self.layer.counter def call(self, inputs): return self.layer(inputs) class SubgraphUpdateLayer(keras.layers.Layer): def build(self, input_shape): self.counter = self.add_weight( 'counter', dtype='int32', shape=(), initializer='zeros', trainable=False) def call(self, inputs, training=None): if training is None: training = keras.backend.learning_phase() if training: self.counter.assign(self.counter + 1) return inputs @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class TestAutoUpdates(keras_parameterized.TestCase): @keras_parameterized.run_with_all_model_types @parameterized.named_parameters(('bare_update', BareUpdateLayer()), ('lambda_update', LambdaUpdateLayer()), ('nested_update', NestedUpdateLayer())) def test_updates_in_model(self, layer): x, y = np.ones((10, 10)), np.ones((10, 1)) model = testing_utils.get_model_from_layers( [layer, keras.layers.Dense(1)], input_shape=(10,)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, batch_size=2, epochs=1) self.assertEqual(self.evaluate(layer.counter), 5) @keras_parameterized.run_with_all_model_types def test_lambda_updates_trainable_false(self): x, y = np.ones((10, 10)), np.ones((10, 1)) layer = LambdaUpdateLayer() model = testing_utils.get_model_from_layers( [layer, keras.layers.Dense(1)], input_shape=(10,)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, batch_size=2, epochs=1) self.assertEqual(self.evaluate(layer.counter), 5) layer.trainable = False model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, batch_size=2, epochs=1) self.assertEqual(self.evaluate(layer.counter), 5) @keras_parameterized.run_with_all_model_types def test_subgraph_updates_in_model(self): layer = SubgraphUpdateLayer() x, y = np.ones((10, 10)), np.ones((10, 1)) model = testing_utils.get_model_from_layers( [layer, keras.layers.Dense(1)], input_shape=(10,)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, batch_size=2, epochs=1) self.assertEqual(self.evaluate(layer.counter), 5) @parameterized.named_parameters(('bare_update', BareUpdateLayer()), ('lambda_update', LambdaUpdateLayer()), ('nested_update', NestedUpdateLayer())) def test_updates_standalone_layer(self, layer): y = layer(np.ones((10, 10))) self.evaluate(layer.counter.initializer) self.evaluate(y) self.assertEqual(self.evaluate(layer.counter), 1) def test_trainable_false_standalone_layer(self): layer = LambdaUpdateLayer() y = layer(np.ones((10, 10))) self.evaluate(layer.counter.initializer) self.evaluate(y) self.assertEqual(self.evaluate(layer.counter), 1) layer.trainable = False y = layer(np.ones((10, 10))) self.evaluate(y) self.assertEqual(self.evaluate(layer.counter), 1) @keras_parameterized.run_with_all_model_types def test_batchnorm_trainable_false(self): bn = keras.layers.BatchNormalization() model = testing_utils.get_model_from_layers([bn, keras.layers.Dense(1)], input_shape=(10,)) bn.trainable = False model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x, y = np.ones((10, 10)), np.ones((10, 1)) model.fit(x, y, batch_size=2, epochs=1) self.assertAllEqual(self.evaluate(bn.moving_mean), np.zeros((10,))) self.assertAllEqual(self.evaluate(bn.moving_variance), np.ones((10,))) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/training_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Contains the `Node` class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.keras import backend from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.util import nest class Node(object): """A `Node` describes the connectivity between two layers. Each time a layer is connected to some new input, a node is added to `layer._inbound_nodes`. Each time the output of a layer is used by another layer, a node is added to `layer._outbound_nodes`. Arguments: outbound_layer: the layer that takes `input_tensors` and turns them into `output_tensors` (the node gets created when the `call` method of the layer was called). inbound_layers: a list of layers, the same length as `input_tensors`, the layers from where `input_tensors` originate. node_indices: a list of integers, the same length as `inbound_layers`. `node_indices[i]` is the origin node of `input_tensors[i]` (necessary since each inbound layer might have several nodes, e.g. if the layer is being shared with a different data stream). tensor_indices: a list of integers, the same length as `inbound_layers`. `tensor_indices[i]` is the index of `input_tensors[i]` within the output of the inbound layer (necessary since each inbound layer might have multiple tensor outputs, with each one being independently manipulable). input_tensors: list of input tensors. output_tensors: list of output tensors. arguments: dictionary of keyword arguments that were passed to the `call` method of the layer at the call that created the node. `node_indices` and `tensor_indices` are basically fine-grained coordinates describing the origin of the `input_tensors`. A node from layer A to layer B is added to: - A._outbound_nodes - B._inbound_nodes """ def __init__(self, outbound_layer, inbound_layers, node_indices, tensor_indices, input_tensors, output_tensors, arguments=None): # Layer instance (NOT a sequence) if isinstance(outbound_layer, (list, tuple, dict)): raise ValueError('`outbound_layer` should be a layer instance, ' 'not a list, tuple, or, dict.') # this is the layer that takes a nested structure of input tensors # and turns them into a nested structure of output tensors. # the current node will be added to # the inbound_nodes of outbound_layer. self.outbound_layer = outbound_layer # The following 3 properties describe where # the input tensors come from: which layers, # and for each layer, which node and which # tensor output of each node. # Nested structure of layer instances. self.inbound_layers = inbound_layers # Nested structure of integers, 1:1 mapping with inbound_layers. self.node_indices = node_indices # Nested of integers, 1:1 mapping with inbound_layers. self.tensor_indices = tensor_indices # Following 2 properties: # tensor inputs and outputs of outbound_layer. # Nested structure of tensors. 1:1 mapping with inbound_layers. self.input_tensors = input_tensors # Nested structure of tensors, created by outbound_layer.call(). self.output_tensors = output_tensors # Following 2 properties: input and output shapes. # Nested structure of shape tuples, shapes of input_tensors. self.input_shapes = nest.map_structure(backend.int_shape, input_tensors) # Nested structure of shape tuples, shapes of output_tensors. self.output_shapes = nest.map_structure(backend.int_shape, output_tensors) # Optional keyword arguments to layer's `call`. self.arguments = arguments # Create Keras History for any Keras Tensors in `arguments`. tensor_arguments = [ t for t in nest.flatten(self.arguments) if isinstance(t, ops.Tensor) ] for tensor_argument in tensor_arguments: if base_layer_utils.needs_keras_history( tensor_argument, ignore_call_context=True): base_layer_utils.create_keras_history(tensor_argument) # Add nodes to all layers involved. for layer in nest.flatten(inbound_layers): if layer is not None: # For compatibility with external Keras, we use the deprecated # accessor here. layer.outbound_nodes.append(self) # For compatibility with external Keras, we use the deprecated # accessor here. outbound_layer.inbound_nodes.append(self) def iterate_inbound(self, include_arguments=False): """Returns a list of tuples representing the inbound data. Arguments: include_arguments: Whether to also iterate over any Keras Tensors passed as args, kwargs. Returns: List of tuples like: (inbound_layer, node_index, tensor_index, tensor). """ inputs_inbound = list( zip( nest.flatten(self.inbound_layers), nest.flatten(self.node_indices), nest.flatten(self.tensor_indices), nest.flatten(self.input_tensors))) if include_arguments: keras_tensor_arguments = [ kt for kt in nest.flatten(self.arguments) if hasattr(kt, '_keras_history') ] def _get_inbound(keras_tensor): kh = keras_tensor._keras_history return kh.layer, kh.node_index, kh.tensor_index, keras_tensor arguments_inbound = nest.map_structure(_get_inbound, keras_tensor_arguments) return inputs_inbound + arguments_inbound else: return inputs_inbound def _get_all_node_dependencies(self): """Returns all of the nodes this node immediately depends on.""" node_deps = [] for layer, node_index, _, _ in self.iterate_inbound(): node_deps.append(layer._inbound_nodes[node_index]) for arg in nest.flatten(self.arguments): if isinstance(arg, ops.Tensor) and hasattr(arg, '_keras_history'): kh = arg._keras_history node_deps.append(kh.layer._inbound_nodes[kh.node_index]) return node_deps def get_config(self): inbound_names = nest.map_structure( lambda layer: layer.name if layer else None, self.inbound_layers) return { 'outbound_layer': self.outbound_layer.name, 'inbound_layers': inbound_names, 'node_indices': self.node_indices, 'tensor_indices': self.tensor_indices }
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/node.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for training utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import multiprocessing.pool import time from absl.testing import parameterized import numpy as np from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import readers from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import training_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging class ModelInputsTest(test.TestCase): def test_single_thing(self): a = np.ones(10) model_inputs = training_utils.ModelInputs(a) self.assertEqual(['input_1'], model_inputs.get_input_names()) vals = model_inputs.get_symbolic_inputs() self.assertTrue(tensor_util.is_tensor(vals)) vals = model_inputs.get_symbolic_inputs(return_single_as_list=True) self.assertEqual(1, len(vals)) self.assertTrue(tensor_util.is_tensor(vals[0])) self.assertEqual(backend.floatx(), vals[0].dtype) def test_single_thing_eager(self): with context.eager_mode(): a = np.ones(10, dtype=np.int32) model_inputs = training_utils.ModelInputs(a) self.assertEqual(['input_1'], model_inputs.get_input_names()) val = model_inputs.get_symbolic_inputs() self.assertTrue(tf_utils.is_symbolic_tensor(val)) vals = model_inputs.get_symbolic_inputs(return_single_as_list=True) self.assertEqual(1, len(vals)) self.assertTrue(tf_utils.is_symbolic_tensor(vals[0])) self.assertEqual(dtypes.int32, vals[0].dtype) def test_list(self): a = [np.ones(10), np.ones(20)] model_inputs = training_utils.ModelInputs(a) self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names()) vals = model_inputs.get_symbolic_inputs() self.assertTrue(tensor_util.is_tensor(vals[0])) self.assertTrue(tensor_util.is_tensor(vals[1])) def test_list_eager(self): with context.eager_mode(): a = [np.ones(10), np.ones(20)] model_inputs = training_utils.ModelInputs(a) self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names()) vals = model_inputs.get_symbolic_inputs() self.assertTrue(tf_utils.is_symbolic_tensor(vals[0])) self.assertTrue(tf_utils.is_symbolic_tensor(vals[1])) def test_dict(self): a = {'b': np.ones(10), 'a': np.ones(20)} model_inputs = training_utils.ModelInputs(a) self.assertEqual(['a', 'b'], model_inputs.get_input_names()) vals = model_inputs.get_symbolic_inputs() self.assertTrue(tensor_util.is_tensor(vals['a'])) self.assertTrue(tensor_util.is_tensor(vals['b'])) def test_dict_eager(self): with context.eager_mode(): a = {'b': np.ones(10), 'a': np.ones(20)} model_inputs = training_utils.ModelInputs(a) self.assertEqual(['a', 'b'], model_inputs.get_input_names()) vals = model_inputs.get_symbolic_inputs() self.assertTrue(tf_utils.is_symbolic_tensor(vals['a'])) self.assertTrue(tf_utils.is_symbolic_tensor(vals['b'])) class DatasetUtilsTest(test.TestCase, parameterized.TestCase): @parameterized.named_parameters( # pylint: disable=g-long-lambda ('Batch', lambda: dataset_ops.Dataset.range(5).batch(2), ValueError), ('Cache', lambda: dataset_ops.Dataset.range(5).cache()), ('Concatenate', lambda: dataset_ops.Dataset.range(5).concatenate( dataset_ops.Dataset.range(5))), ('FlatMap', lambda: dataset_ops.Dataset.range(5).flat_map( lambda _: dataset_ops.Dataset.from_tensors(0)), ValueError), ('Filter', lambda: dataset_ops.Dataset.range(5).filter(lambda _: True)), ('FixedLengthRecordDatasetV2', lambda: readers.FixedLengthRecordDatasetV2([], 42)), ('FromTensors', lambda: dataset_ops.Dataset.from_tensors(0)), ('FromTensorSlices', lambda: dataset_ops.Dataset.from_tensor_slices([0, 0, 0])), ('Interleave', lambda: dataset_ops.Dataset.range(5).interleave( lambda _: dataset_ops.Dataset.from_tensors(0), cycle_length=1), ValueError), ('ParallelInterleave', lambda: dataset_ops.Dataset.range(5).interleave( lambda _: dataset_ops.Dataset.from_tensors(0), cycle_length=1, num_parallel_calls=1), ValueError), ('Map', lambda: dataset_ops.Dataset.range(5).map(lambda x: x)), ('Options', lambda: dataset_ops.Dataset.range(5).with_options(dataset_ops.Options()) ), ('PaddedBatch', lambda: dataset_ops.Dataset.range(5).padded_batch(2, []), ValueError), ('ParallelMap', lambda: dataset_ops.Dataset.range(5).map( lambda x: x, num_parallel_calls=1)), ('Prefetch', lambda: dataset_ops.Dataset.range(5).prefetch(1)), ('Range', lambda: dataset_ops.Dataset.range(0)), ('Repeat', lambda: dataset_ops.Dataset.range(0).repeat(0)), ('Shuffle', lambda: dataset_ops.Dataset.range(5).shuffle(1)), ('Skip', lambda: dataset_ops.Dataset.range(5).skip(2)), ('Take', lambda: dataset_ops.Dataset.range(5).take(2)), ('TextLineDataset', lambda: readers.TextLineDatasetV2([])), ('TFRecordDataset', lambda: readers.TFRecordDatasetV2([])), ('Window', lambda: dataset_ops.Dataset.range(5).window(2), ValueError), ('Zip', lambda: dataset_ops.Dataset.zip(dataset_ops.Dataset.range(5))), # pylint: enable=g-long-lambda ) def test_assert_not_batched(self, dataset_fn, expected_error=None): if expected_error is None: training_utils.assert_not_batched(dataset_fn()) else: with self.assertRaises(expected_error): training_utils.assert_not_batched(dataset_fn()) @parameterized.named_parameters( # pylint: disable=g-long-lambda ('Batch', lambda: dataset_ops.Dataset.range(5).batch(2)), ('Cache', lambda: dataset_ops.Dataset.range(5).cache()), ('Concatenate', lambda: dataset_ops.Dataset.range(5).concatenate( dataset_ops.Dataset.range(5))), ('FlatMap', lambda: dataset_ops.Dataset.range(5).flat_map( lambda _: dataset_ops.Dataset.from_tensors(0)), ValueError), ('Filter', lambda: dataset_ops.Dataset.range(5).filter(lambda _: True)), ('FixedLengthRecordDatasetV2', lambda: readers.FixedLengthRecordDatasetV2([], 42)), ('FromTensors', lambda: dataset_ops.Dataset.from_tensors(0)), ('FromTensorSlices', lambda: dataset_ops.Dataset.from_tensor_slices([0, 0, 0])), ('Interleave', lambda: dataset_ops.Dataset.range(5).interleave( lambda _: dataset_ops.Dataset.from_tensors(0), cycle_length=1), ValueError), ('Map', lambda: dataset_ops.Dataset.range(5).map(lambda x: x)), ('Options', lambda: dataset_ops.Dataset.range(5).with_options(dataset_ops.Options()) ), ('PaddedBatch', lambda: dataset_ops.Dataset.range(5).padded_batch(2, [])), ('ParallelInterleave', lambda: dataset_ops.Dataset.range(5).interleave( lambda _: dataset_ops.Dataset.from_tensors(0), cycle_length=1, num_parallel_calls=1), ValueError), ('ParallelMap', lambda: dataset_ops.Dataset.range(5).map( lambda x: x, num_parallel_calls=1)), ('Prefetch', lambda: dataset_ops.Dataset.range(5).prefetch(1)), ('Range', lambda: dataset_ops.Dataset.range(0)), ('Repeat', lambda: dataset_ops.Dataset.range(0).repeat(0)), ('Shuffle', lambda: dataset_ops.Dataset.range(5).shuffle(1), ValueError), ('Skip', lambda: dataset_ops.Dataset.range(5).skip(2)), ('Take', lambda: dataset_ops.Dataset.range(5).take(2)), ('TextLineDataset', lambda: readers.TextLineDatasetV2([])), ('TFRecordDataset', lambda: readers.TFRecordDatasetV2([])), ('Window', lambda: dataset_ops.Dataset.range(5).window(2)), ('Zip', lambda: dataset_ops.Dataset.zip(dataset_ops.Dataset.range(5))), # pylint: enable=g-long-lambda ) def test_assert_not_shuffled(self, dataset_fn, expected_error=None): if expected_error is None: training_utils.assert_not_shuffled(dataset_fn()) else: with self.assertRaises(expected_error): training_utils.assert_not_shuffled(dataset_fn()) def test_verify_dataset_shuffled(self): dataset = dataset_ops.Dataset.range(5) training_utils.assert_not_shuffled(dataset) with test.mock.patch.object(logging, 'warning') as mock_log: training_utils.verify_dataset_shuffled(dataset) self.assertRegexpMatches( str(mock_log.call_args), 'input dataset `x` is not shuffled.') shuffled_dataset = dataset.shuffle(10) training_utils.verify_dataset_shuffled(shuffled_dataset) class StandardizeWeightsTest(keras_parameterized.TestCase): def test_sample_weights(self): y = np.array([0, 1, 0, 0, 2]) sample_weights = np.array([0.5, 1., 1., 0., 2.]) weights = training_utils.standardize_weights(y, sample_weights) self.assertAllClose(weights, sample_weights) def test_class_weights(self): y = np.array([0, 1, 0, 0, 2]) class_weights = {0: 0.5, 1: 1., 2: 1.5} weights = training_utils.standardize_weights(y, class_weight=class_weights) self.assertAllClose(weights, np.array([0.5, 1., 0.5, 0.5, 1.5])) def test_sample_weights_and_class_weights(self): y = np.array([0, 1, 0, 0, 2]) sample_weights = np.array([0.5, 1., 1., 0., 2.]) class_weights = {0: 0.5, 1: 1., 2: 1.5} weights = training_utils.standardize_weights(y, sample_weights, class_weights) expected = sample_weights * np.array([0.5, 1., 0.5, 0.5, 1.5]) self.assertAllClose(weights, expected) def test_dataset_with_class_weight(self): model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3) model.compile('rmsprop', 'mse') inputs = np.zeros((10, 3), np.float32) targets = np.zeros((10, 4), np.float32) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) class_weight_np = np.array([0.25, 0.25, 0.25, 0.25]) class_weight = dict(enumerate(class_weight_np)) model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=1, class_weight=class_weight) class MonitoredPool(multiprocessing.pool.ThreadPool): def __init__(self, *args, **kwargs): self._apply_counter = 0 self._func_wrapper = None super(MonitoredPool, self).__init__(*args, **kwargs) def apply_async(self, func, *args, **kwargs): self._apply_counter += 1 if self._func_wrapper: func = self._func_wrapper(func) # pylint: disable=not-callable return super(MonitoredPool, self).apply_async(func, *args, **kwargs) def add_sleep(f): @functools.wraps(f) def wrapped(*args, **kwargs): time.sleep(1.) return f(*args, **kwargs) return wrapped def cause_error(f): @functools.wraps(f) def wrapped(batch_element, batch_start, batch_end, is_finished): # pylint: disable=unused-argument # Induce a TypeError during assignment. return f(None, None, None, is_finished) return wrapped _TEST_DATA = np.array(( (3, 1, 3, 1, 2, 0, 3, 3, 1, 2), (0, 1, 2, 1, 3, 0, 0, 1, 3, 0), (3, 2, 1, 1, 1, 1, 1, 3, 2, 3), (2, 2, 0, 1, 0, 3, 3, 2, 1, 1), (3, 0, 3, 3, 3, 2, 1, 0, 0, 1), (1, 0, 3, 3, 3, 2, 1, 2, 3, 1),)) class AggregationTest(keras_parameterized.TestCase): def setUp(self): super(AggregationTest, self).setUp() self._old_pool = training_utils._COPY_POOL self._old_threshold = training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD self._old_timeout = training_utils.SliceAggregator._MAX_COPY_SECONDS training_utils._COPY_POOL = MonitoredPool(training_utils._COPY_THREADS) def tearDown(self): super(AggregationTest, self).tearDown() training_utils._COPY_POOL = self._old_pool training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD = self._old_threshold training_utils.SliceAggregator._MAX_COPY_SECONDS = self._old_timeout def _run_with_steps(self): aggregator = training_utils.OutputsAggregator(use_steps=True) for i, batch in enumerate(np.array_split(_TEST_DATA, 4)): if i == 0: aggregator.create(batch) aggregator.aggregate(batch) assert len(aggregator.results) == 1 assert isinstance(aggregator.results[0], training_utils.ConcatAggregator) aggregator.finalize() return aggregator.results def _run_without_steps(self): aggregator = training_utils.OutputsAggregator( use_steps=False, num_samples=6) batch_start = 0 for i, batch in enumerate(np.array_split(_TEST_DATA, 4)): if i == 0: aggregator.create(batch) batch_end = batch_start + batch.shape[0] aggregator.aggregate(batch, batch_start, batch_end) batch_start = batch_end assert len(aggregator.results) == 1 assert isinstance(aggregator.results[0], training_utils.SliceAggregator) aggregator.finalize() return aggregator.results def test_with_steps(self): self.assertAllEqual(self._run_with_steps(), _TEST_DATA) def test_without_steps(self): self.assertAllEqual(self._run_without_steps(), _TEST_DATA) def test_nested_aggregation(self): aggregator = training_utils.OutputsAggregator( use_steps=False, num_samples=6) batches = np.array_split(_TEST_DATA, 4) batch_start = 0 for i, batch in enumerate(zip(batches, batches)): if i == 0: aggregator.create(batch) batch_end = batch_start + batch[0].shape[0] aggregator.aggregate(batch, batch_start, batch_end) batch_start = batch_end assert len(aggregator.results) == 2 aggregator.finalize() self.assertAllEqual(aggregator.results, (_TEST_DATA, _TEST_DATA)) def test_concat_single_batch(self): aggregator = training_utils.OutputsAggregator(use_steps=True) data = _TEST_DATA.copy() aggregator.create(data) assert len(aggregator.results) == 1 assert isinstance(aggregator.results[0], training_utils.ConcatAggregator) aggregator.aggregate(data) aggregator.finalize() assert aggregator.results is data # No copy. def test_slice_single_batch(self): aggregator = training_utils.OutputsAggregator( use_steps=False, num_samples=6) data = _TEST_DATA.copy() aggregator.create(data) assert len(aggregator.results) == 1 assert isinstance(aggregator.results[0], training_utils.SliceAggregator) aggregator.aggregate(data, 0, 6) aggregator.finalize() assert aggregator.results is data # No copy. def test_async_copy(self): training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD = 15 self.assertAllEqual(self._run_without_steps(), _TEST_DATA) # Two of the four batches will have 20 elements and two will have 10. self.assertEqual(training_utils._COPY_POOL._apply_counter, 2) def test_async_copy_timeout(self): training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD = 15 training_utils.SliceAggregator._MAX_COPY_SECONDS = 0.1 training_utils._COPY_POOL._func_wrapper = add_sleep with self.assertRaisesRegexp(ValueError, 'Timed out waiting for copy'): self._run_without_steps() def test_async_copy_reraise(self): training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD = 15 training_utils.SliceAggregator._MAX_COPY_SECONDS = 1. training_utils._COPY_POOL._func_wrapper = cause_error with self.assertRaisesRegexp(TypeError, 'NoneType'): self._run_without_steps() if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/training_utils_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """End-to-end tests for a variety of small models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import itertools from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.ops import math_ops from tensorflow.python.platform import test def _conv2d_filter(**kwargs): """Convolution with non-default strides and dilation rate is not supported.""" return kwargs['strides'] <= 1 or kwargs['dilation_rate'] <= 1 # Scheme: (layer_class, data_shape, fuzz_dims, constructor_args, filter_fn) # layer_class: # A keras Layer class to be tested. # data_shape: # The shape of the input data. (not including batch dim) # fuzz_dims: # Dimensions which can be unspecified during model construction. For # instance, if data_shape is (2, 5) and fuzz_dims is (False, True), a pass # with model input shape of (2, None) will also be performed. # constructor_args: # An OrderedDict (to ensure consistent test names) with a key and a list # of values to test. Test cases will be generated for the Cartesian product # of all constructor args, so adding more fields can cause the drastically # increase the testing load. # filter_fn: # If not None, this function will be called on each set of generated # constructor args, and prevents generation of contradictory combinations. # A True return value indicates a valid test. _LAYERS_TO_TEST = [ (keras.layers.Dense, (1,), (False,), collections.OrderedDict([ ('units', [1])]), None), (keras.layers.Activation, (2, 2), (True, True), collections.OrderedDict([ ('activation', ['relu'])]), None), (keras.layers.Dropout, (16,), (False,), collections.OrderedDict([ ('rate', [0.25])]), None), (keras.layers.BatchNormalization, (8, 8, 3), (True, True, False), collections.OrderedDict([ ('axis', [3]), ('center', [True, False]), ('scale', [True, False]) ]), None), (keras.layers.Conv1D, (8, 8), (False, False), collections.OrderedDict([ ('filters', [1]), ('kernel_size', [1, 3]), ('strides', [1, 2]), ('padding', ['valid', 'same']), ('use_bias', [True]), ('kernel_regularizer', ['l2']), ('data_format', ['channels_last']) ]), None), (keras.layers.Conv2D, (8, 8, 3), (True, True, False), collections.OrderedDict([ ('filters', [1]), ('kernel_size', [1, 3]), ('strides', [1, 2]), ('padding', ['valid', 'same']), ('use_bias', [True, False]), ('kernel_regularizer', ['l2']), ('dilation_rate', [1, 2]), ('data_format', ['channels_last']) ]), _conv2d_filter), (keras.layers.LSTM, (4, 4), (False, False), collections.OrderedDict([ ('units', [1]), ('kernel_regularizer', ['l2']), ('dropout', [0, 0.5]), ('stateful', [True, False]), ('unroll', [True, False]), ('return_sequences', [True, False]) ]), None), ] def _gather_test_cases(): cases = [] for layer_type, inp_shape, fuzz_dims, arg_dict, filter_fn in _LAYERS_TO_TEST: arg_combinations = [[(k, i) for i in v] for k, v in arg_dict.items()] # pylint: disable=g-complex-comprehension for arguments in itertools.product(*arg_combinations): layer_kwargs = {k: v for k, v in arguments} if filter_fn is not None and not filter_fn(**layer_kwargs): continue name = '_{}_{}'.format(layer_type.__name__, '_'.join('{}_{}'.format(*i) for i in arguments)) cases.append((name, layer_type, inp_shape, fuzz_dims, layer_kwargs)) return cases OUTPUT_TEST_CASES = _gather_test_cases() class CoreLayerIntegrationTest(keras_parameterized.TestCase): """Test that layers and models produce the correct tensor types.""" # In v1 graph there are only symbolic tensors. @keras_parameterized.run_all_keras_modes(always_skip_v1=True) @parameterized.named_parameters(*OUTPUT_TEST_CASES) def test_layer_output_type(self, layer_to_test, input_shape, _, layer_kwargs): layer = layer_to_test(**layer_kwargs) input_data = np.ones(shape=(2,) + input_shape, dtype=np.float32) layer_result = layer(input_data) inp = keras.layers.Input(shape=input_shape, batch_size=2) model = keras.models.Model(inp, layer_to_test(**layer_kwargs)(inp)) model_result = model(input_data) for x in [layer_result, model_result]: if not isinstance(x, ops.Tensor): raise ValueError('Tensor or EagerTensor expected, got type {}' .format(type(x))) if isinstance(x, ops.EagerTensor) != context.executing_eagerly(): expected_type = (ops.EagerTensor if context.executing_eagerly() else ops.Tensor) raise ValueError('Expected type {}, got type {}' .format(expected_type, type(x))) def _run_fit_eval_predict(self, layer_to_test, input_shape, data_shape, layer_kwargs): batch_size = 2 run_eagerly = testing_utils.should_run_eagerly() experimental_run_tf_function = testing_utils.should_run_tf_function() def map_fn(_): x = keras.backend.random_uniform(shape=data_shape) y = keras.backend.random_uniform(shape=(1,)) return x, y dataset = dataset_ops.DatasetV2.range(4).map(map_fn).batch(batch_size) inp = keras.layers.Input(shape=input_shape, batch_size=batch_size) layer = layer_to_test(**layer_kwargs)(inp) # Condense the output down to a single scalar. layer = keras.layers.Flatten()(layer) layer = keras.layers.Lambda( lambda x: math_ops.reduce_mean(x, keepdims=True))(layer) layer = keras.layers.Dense(1, activation=None)(layer) model = keras.models.Model(inp, layer) model.compile(loss='mse', optimizer='sgd', run_eagerly=run_eagerly, experimental_run_tf_function=experimental_run_tf_function) model.fit(dataset, verbose=2, epochs=2) model.compile(loss='mse', optimizer='sgd', run_eagerly=run_eagerly, experimental_run_tf_function=experimental_run_tf_function) model.fit(dataset.repeat(2), verbose=2, epochs=2, steps_per_epoch=2) eval_dataset = dataset_ops.DatasetV2.range(4).map(map_fn).batch(batch_size) model.evaluate(eval_dataset, verbose=2) def pred_map_fn(_): return keras.backend.random_uniform(shape=data_shape) pred_dataset = dataset_ops.DatasetV2.range(4) pred_dataset = pred_dataset.map(pred_map_fn).batch(batch_size) model.predict(pred_dataset, verbose=2) @keras_parameterized.run_all_keras_modes(always_skip_v1=False) @parameterized.named_parameters(*OUTPUT_TEST_CASES) def test_model_loops(self, layer_to_test, input_shape, fuzz_dims, layer_kwargs): self._run_fit_eval_predict(layer_to_test, input_shape, input_shape, layer_kwargs) if any(fuzz_dims): fuzzed_shape = [] for dim, should_fuzz in zip(input_shape, fuzz_dims): fuzzed_shape.append(None if should_fuzz else dim) self._run_fit_eval_predict(layer_to_test, fuzzed_shape, input_shape, layer_kwargs) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/training_integration_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """DataAdapter tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import os import unittest from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras.engine import data_adapter from tensorflow.python.keras.utils import data_utils from tensorflow.python.ops import array_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class DataAdapterTestBase(test.TestCase, parameterized.TestCase): def setUp(self): super(DataAdapterTestBase, self).setUp() self.batch_size = 5 self.numpy_input = np.zeros((50, 10)) self.numpy_target = np.ones(50) self.tensor_input = constant_op.constant(2.0, shape=(50, 10)) self.tensor_target = array_ops.ones((50,)) self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices( (self.numpy_input, self.numpy_target)).shuffle(50).batch( self.batch_size) def generator(): while True: yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size)) self.generator_input = generator() self.sequence_input = TestSequence(batch_size=self.batch_size, feature_shape=10) self.model = keras.models.Sequential( [keras.layers.Dense(8, input_shape=(10,), activation='softmax')]) class TestSequence(data_utils.Sequence): def __init__(self, batch_size, feature_shape): self.batch_size = batch_size self.feature_shape = feature_shape def __getitem__(self, item): return (np.zeros((self.batch_size, self.feature_shape)), np.ones((self.batch_size,))) def __len__(self): return 10 class TensorLikeDataAdapterTest(DataAdapterTestBase): def setUp(self): super(TensorLikeDataAdapterTest, self).setUp() self.adapter_cls = data_adapter.TensorLikeDataAdapter def test_can_handle_numpy(self): self.assertTrue(self.adapter_cls.can_handle(self.numpy_input)) self.assertTrue( self.adapter_cls.can_handle(self.numpy_input, self.numpy_target)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) def test_iterator_expect_batch_size_numpy(self): with self.assertRaisesRegexp( ValueError, r'`batch_size` or `steps` is required'): self.adapter_cls(self.numpy_input, self.numpy_target) def test_size_numpy(self): adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=5) self.assertEqual(adapter.get_size(), 10) self.assertFalse(adapter.has_partial_batch()) def test_batch_size_numpy(self): adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=5) self.assertEqual(adapter.batch_size(), 5) def test_partial_batch_numpy(self): adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=4) self.assertEqual(adapter.get_size(), 13) # 50/4 self.assertTrue(adapter.has_partial_batch()) self.assertEqual(adapter.partial_batch_size(), 2) @test_util.run_in_graph_and_eager_modes def test_training_numpy(self): if not context.executing_eagerly(): return # Only test in eager. self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd') self.model.fit(self.numpy_input, self.numpy_target, batch_size=5) def test_can_handle(self): self.assertTrue(self.adapter_cls.can_handle(self.tensor_input)) self.assertTrue( self.adapter_cls.can_handle(self.tensor_input, self.tensor_target)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) @test_util.run_in_graph_and_eager_modes def test_training(self): if not context.executing_eagerly(): return # Only test EagerTensors. self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd') self.model.fit(self.tensor_input, self.tensor_target, batch_size=5) def test_size(self): adapter = self.adapter_cls( self.tensor_input, self.tensor_target, batch_size=5) self.assertEqual(adapter.get_size(), 10) self.assertFalse(adapter.has_partial_batch()) def test_shuffle_correctness(self): with context.eager_mode(): num_samples = 100 batch_size = 32 x = np.arange(num_samples) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle=True, epochs=2) def _get_epoch(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter)[0].numpy()) return np.concatenate(ds_data) ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) @parameterized.named_parameters( ('batch_size_5', 5, None, 5), ('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence ('steps_1', None, 1, 50), ('steps_4', None, 4, 13), ) def test_batch_size(self, batch_size_in, steps, batch_size_out): adapter = self.adapter_cls( self.tensor_input, self.tensor_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.batch_size(), batch_size_out) @parameterized.named_parameters( ('batch_size_5', 5, None, 10, 0), ('batch_size_4', 4, None, 13, 2), ('steps_1', None, 1, 1, 0), ('steps_5', None, 5, 5, 0), ('steps_4', None, 4, 4, 11), ) def test_partial_batch( self, batch_size_in, steps, size, partial_batch_size): adapter = self.adapter_cls( self.tensor_input, self.tensor_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.get_size(), size) # 50/steps self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size)) self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None) class DatasetAdapterTest(DataAdapterTestBase): def setUp(self): super(DatasetAdapterTest, self).setUp() self.adapter_cls = data_adapter.DatasetAdapter def test_can_handle(self): self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertTrue(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) def test_training(self): dataset = self.adapter_cls(self.dataset_input).get_dataset() self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd') self.model.fit(dataset) def test_size(self): adapter = self.adapter_cls(self.dataset_input) self.assertIsNone(adapter.get_size()) def test_batch_size(self): adapter = self.adapter_cls(self.dataset_input) self.assertIsNone(adapter.batch_size()) def test_partial_batch(self): adapter = self.adapter_cls(self.dataset_input) self.assertFalse(adapter.has_partial_batch()) self.assertIsNone(adapter.partial_batch_size()) class GeneratorDataAdapterTest(DataAdapterTestBase): def setUp(self): super(GeneratorDataAdapterTest, self).setUp() self.adapter_cls = data_adapter.GeneratorDataAdapter def test_can_handle(self): self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertTrue(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) def test_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd') self.model.fit(self.generator_input, steps_per_epoch=10) @unittest.skipIf( os.name == 'nt', 'use_multiprocessing=True does not work on windows properly.') @test_util.run_v2_only def test_with_multiprocessing_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd') self.model.fit(self.generator_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) # Fit twice to ensure there isn't any duplication that prevent the worker # from starting. self.model.fit(self.generator_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) def test_size(self): adapter = self.adapter_cls(self.generator_input) self.assertIsNone(adapter.get_size()) def test_batch_size(self): adapter = self.adapter_cls(self.generator_input) self.assertEqual(adapter.batch_size(), 5) def test_partial_batch(self): adapter = self.adapter_cls(self.generator_input) self.assertFalse(adapter.has_partial_batch()) self.assertIsNone(adapter.partial_batch_size()) class KerasSequenceAdapterTest(DataAdapterTestBase): def setUp(self): super(KerasSequenceAdapterTest, self).setUp() self.adapter_cls = data_adapter.KerasSequenceAdapter def test_can_handle(self): self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertTrue(self.adapter_cls.can_handle(self.sequence_input)) def test_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd') self.model.fit(self.sequence_input) @unittest.skipIf( os.name == 'nt', 'use_multiprocessing=True does not work on windows properly.') @test_util.run_v2_only def test_with_multiprocessing_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd') self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) # Fit twice to ensure there isn't any duplication that prevent the worker # from starting. self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) def test_size(self): adapter = self.adapter_cls(self.sequence_input) self.assertEqual(adapter.get_size(), 10) def test_batch_size(self): adapter = self.adapter_cls(self.sequence_input) self.assertEqual(adapter.batch_size(), 5) def test_partial_batch(self): adapter = self.adapter_cls(self.sequence_input) self.assertFalse(adapter.has_partial_batch()) self.assertIsNone(adapter.partial_batch_size()) if __name__ == '__main__': ops.enable_eager_execution() test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/data_adapter_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the base ProcessingLayer and a subclass that uses Combiners.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.ops import cardinality from tensorflow.python.data.ops import dataset_ops from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine import base_preprocessing_layer from tensorflow.python.ops import state_ops class CombinerPreprocessingLayer( base_preprocessing_layer.CombinerPreprocessingLayer): """V1-compatible CombinerPreprocessingLayer. This class overrides several methods of the CombinerPreprocessingLayer to make it compatible with V1 execution. End users should not need to worry about the implementation details here; Keras will export the appropriate class under the 'CombinerPreprocessingLayer' symbol. (Users should not directly instantiate engine.base_preprocessing_layer/_v1.CombinerPreprocessingLayer). When creating a subclass of PreprocessingLayer, you can create a V1-compatible subclass as follows: class MyProcLayer(MyProcLayer, base_preprocessing_layer_v1.CombinerPreprocessingLayer): pass Note that the same classname is required for serialization purposes. This is only necessary for internal classes, since any class that inherits from tf.keras.[...].CombinerPreprocessingLayer will get the right symbol. """ def _restore_updates(self): """Recreates a dict of updates from the layer's weights.""" data_dict = {} for name, var in self.state_variables.items(): data_dict[name] = K.get_session().run(var) return data_dict def _dataset_is_infinite(self, dataset): """True if the passed dataset is infinite.""" dataset_size = K.get_session().run(cardinality.cardinality(dataset)) return dataset_size == cardinality.INFINITE def _get_dataset_iterator(self, dataset): """Gets an iterator from a tf.data.Dataset.""" iterator = dataset_ops.make_one_shot_iterator(dataset) session = K.get_session() next_element = iterator.get_next() return lambda: session.run(next_element) def _set_state_variables(self, updates): """Directly update the internal state of this Layer. V1 compatible.""" # TODO(momernick): Do we need to do any more input sanitization? if not self.built: raise RuntimeError('_set_state_variables() must be called after build().') assignments = [] for var_name, value in updates.items(): assignments.append( state_ops.assign(self.state_variables[var_name], value)) K.get_session().run(assignments)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/base_preprocessing_layer_v1.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Training-related part of the Keras engine. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import numpy as np from tensorflow.python import tf2 from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import monitoring from tensorflow.python.framework import composite_tensor_utils from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import tensor_util from tensorflow.python.framework import type_spec from tensorflow.python.keras import backend as K from tensorflow.python.keras import losses from tensorflow.python.keras import metrics as metrics_module from tensorflow.python.keras import optimizers from tensorflow.python.keras.distribute import distributed_training_utils from tensorflow.python.keras.engine import data_adapter from tensorflow.python.keras.engine import network from tensorflow.python.keras.engine import training_arrays from tensorflow.python.keras.engine import training_distributed from tensorflow.python.keras.engine import training_eager from tensorflow.python.keras.engine import training_generator from tensorflow.python.keras.engine import training_utils from tensorflow.python.keras.engine import training_v2 from tensorflow.python.keras.engine import training_v2_utils from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.keras.saving import saving_utils from tensorflow.python.keras.utils import data_utils from tensorflow.python.keras.utils import losses_utils from tensorflow.python.keras.utils.mode_keys import ModeKeys from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.losses import util as tf_losses_utils from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training.tracking import base as trackable from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils from tensorflow.python.util import nest from tensorflow.python.util import serialization from tensorflow.python.util import tf_inspect from tensorflow.python.util.compat import collections_abc from tensorflow.python.util.tf_export import keras_export try: from scipy.sparse import issparse # pylint: disable=g-import-not-at-top except ImportError: issparse = None _keras_api_gauge = monitoring.BoolGauge('/tensorflow/api/keras', 'keras api usage', 'method') @keras_export('keras.models.Model', 'keras.Model') class Model(network.Network): """`Model` groups layers into an object with training and inference features. There are two ways to instantiate a `Model`: 1 - With the "functional API", where you start from `Input`, you chain layer calls to specify the model's forward pass, and finally you create your model from inputs and outputs: ```python import tensorflow as tf inputs = tf.keras.Input(shape=(3,)) x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs) outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) ``` 2 - By subclassing the `Model` class: in that case, you should define your layers in `__init__` and you should implement the model's forward pass in `call`. ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): super(MyModel, self).__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) def call(self, inputs): x = self.dense1(inputs) return self.dense2(x) model = MyModel() ``` If you subclass `Model`, you can optionally have a `training` argument (boolean) in `call`, which you can use to specify a different behavior in training and inference: ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): super(MyModel, self).__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) self.dropout = tf.keras.layers.Dropout(0.5) def call(self, inputs, training=False): x = self.dense1(inputs) if training: x = self.dropout(x, training=training) return self.dense2(x) model = MyModel() ``` """ def __init__(self, *args, **kwargs): super(Model, self).__init__(*args, **kwargs) _keras_api_gauge.get_cell('model').set(True) # initializing _distribution_strategy here since it is possible to call # predict on a model without compiling it. self._distribution_strategy = None self._compile_time_distribution_strategy = None # This flag is used to track if the user is using the deprecated path of # passing distribution strategy to compile rather than creating the model # under distribution strategy scope. self._compile_distribution = False self._run_eagerly = None self._experimental_run_tf_function = False def get_weights(self): """Retrieves the weights of the model. Returns: A flat list of Numpy arrays. """ strategy = (self._distribution_strategy or self._compile_time_distribution_strategy) if strategy: with strategy.scope(): return super(Model, self).get_weights() return super(Model, self).get_weights() def load_weights(self, filepath, by_name=False): """Loads all layer weights, either from a TensorFlow or an HDF5 file.""" if distributed_training_utils.is_tpu_strategy(self._distribution_strategy): if (self._distribution_strategy.extended.steps_per_run > 1 and (not network._is_hdf5_filepath(filepath))): # pylint: disable=protected-access raise ValueError('Load weights is not yet supported with TPUStrategy ' 'with steps_per_run greater than 1.') return super(Model, self).load_weights(filepath, by_name) @trackable.no_automatic_dependency_tracking def compile(self, optimizer='rmsprop', loss=None, metrics=None, loss_weights=None, sample_weight_mode=None, weighted_metrics=None, target_tensors=None, distribute=None, **kwargs): """Configures the model for training. Arguments: optimizer: String (name of optimizer) or optimizer instance. See `tf.keras.optimizers`. loss: String (name of objective function), objective function or `tf.losses.Loss` instance. See `tf.losses`. If the model has multiple outputs, you can use a different loss on each output by passing a dictionary or a list of losses. The loss value that will be minimized by the model will then be the sum of all individual losses. metrics: List of metrics to be evaluated by the model during training and testing. Typically you will use `metrics=['accuracy']`. To specify different metrics for different outputs of a multi-output model, you could also pass a dictionary, such as `metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}`. You can also pass a list (len = len(outputs)) of lists of metrics such as `metrics=[['accuracy'], ['accuracy', 'mse']]` or `metrics=['accuracy', ['accuracy', 'mse']]`. loss_weights: Optional list or dictionary specifying scalar coefficients (Python floats) to weight the loss contributions of different model outputs. The loss value that will be minimized by the model will then be the *weighted sum* of all individual losses, weighted by the `loss_weights` coefficients. If a list, it is expected to have a 1:1 mapping to the model's outputs. If a tensor, it is expected to map output names (strings) to scalar coefficients. sample_weight_mode: If you need to do timestep-wise sample weighting (2D weights), set this to `"temporal"`. `None` defaults to sample-wise weights (1D). If the model has multiple outputs, you can use a different `sample_weight_mode` on each output by passing a dictionary or a list of modes. weighted_metrics: List of metrics to be evaluated and weighted by sample_weight or class_weight during training and testing. target_tensors: By default, Keras will create placeholders for the model's target, which will be fed with the target data during training. If instead you would like to use your own target tensors (in turn, Keras will not expect external Numpy data for these targets at training time), you can specify them via the `target_tensors` argument. It can be a single tensor (for a single-output model), a list of tensors, or a dict mapping output names to target tensors. distribute: NOT SUPPORTED IN TF 2.0, please create and compile the model under distribution strategy scope instead of passing it to compile. **kwargs: Any additional arguments. Raises: ValueError: In case of invalid arguments for `optimizer`, `loss`, `metrics` or `sample_weight_mode`. """ self._run_eagerly = kwargs.pop('run_eagerly', None) self._experimental_run_tf_function = kwargs.pop( 'experimental_run_tf_function', True) self._set_optimizer(optimizer) is_any_optimizer_v1 = any(isinstance(opt, optimizers.Optimizer) for opt in nest.flatten(self.optimizer)) if ((sample_weight_mode is not None) or (target_tensors is not None) or is_any_optimizer_v1 or not ops.executing_eagerly_outside_functions()): # Fallback out of things that aren't supported with v2 loops self._experimental_run_tf_function = False self._compile_time_distribution_strategy = ( distribution_strategy_context.get_strategy()) if distribute is not None: if tf2.enabled() or self._experimental_run_tf_function: raise ValueError( 'Distribute argument in compile is not available in TF 2.0 please ' 'create the model under the distribution strategy scope.') logging.warning('Distribute argument in compile is deprecated please ' 'create the model under the distribution strategy scope.') self._distribution_strategy = distribute self._compile_distribution = True else: if distribution_strategy_context.has_strategy(): # When the user builds the model in the DS scope and cross replica # context we want distribution strategy to be set but when building the # replica copies of the models internally we should not be compiling # with distribution strategy and use the default compilation path. if distribution_strategy_context.in_cross_replica_context(): self._distribution_strategy = ( distribution_strategy_context.get_strategy()) if not self._experimental_run_tf_function: self._validate_compile_param_for_distribution_strategy(self.run_eagerly, sample_weight_mode, target_tensors, weighted_metrics) # We've disabled automatic dependency tracking for this method, but do want # to add a checkpoint dependency on the optimizer if it's trackable. if isinstance(self.optimizer, trackable.Trackable): self._track_trackable( self.optimizer, name='optimizer', overwrite=True) self.loss = loss or {} self.loss_weights = loss_weights self.sample_weight_mode = sample_weight_mode self._compile_metrics = metrics or [] self._compile_weighted_metrics = weighted_metrics if self.run_eagerly and target_tensors is not None: raise ValueError( 'target_tensors argument is not supported when ' 'running a model eagerly.') # _training_endpoints contains a list of _TrainingEndpoint object, which has # all the model output/target/loss and related metadata. self._training_endpoints = [] # Used to freeze the behavior of the Model once `compile` has been called. self._compiled_trainable_state = self._get_trainable_state() # Set tf.distribute.Strategy specific parameters. self._distributed_model_cache = {} self._distributed_function_cache = {} # Clear any `_eager_losses` that was added. self._clear_losses() if (not context.executing_eagerly() and self._distribution_strategy is not None): # Ensures a Session is created and configured correctly for Distribution # Strategy. K.configure_and_create_distributed_session(self._distribution_strategy) # Initialize model metric attributes. self._init_metric_attributes() if not self.built or not self.inputs or not self.outputs: # Model is not compilable because it does not know its number of inputs # and outputs, nor their shapes and names. We will compile after the first # time the model gets called on training data. return self._is_compiled = True _keras_api_gauge.get_cell('compile').set(True) # Prepare list of loss functions, same size of model outputs. self.loss_functions = training_utils.prepare_loss_functions( self.loss, self.output_names) target_tensors = self._process_target_tensor_for_compile(target_tensors) for o, n, l, t in zip(self.outputs, self.output_names, self.loss_functions, target_tensors): endpoint = _TrainingEndpoint(o, n, l) endpoint.create_training_target(t, run_eagerly=self.run_eagerly) self._training_endpoints.append(endpoint) # Prepare list loss weights, same size of model outputs. training_utils.prepare_loss_weights(self._training_endpoints, loss_weights) # Initialization for Eager mode execution. if self.run_eagerly: self._compile_eagerly(metrics, weighted_metrics, sample_weight_mode) return with K.get_graph().as_default(): # Save all metric attributes per output of the model. self._cache_output_metric_attributes(metrics, weighted_metrics) # Set metric attributes on model. self._set_metric_attributes() # Invoke metric functions (unweighted) for all the outputs. self._handle_metrics( self.outputs, targets=self._targets, skip_target_masks=self._prepare_skip_target_masks(), masks=self._prepare_output_masks()) # Prepare sample weight modes. List with the same length as model outputs. training_utils.prepare_sample_weight_modes( self._training_endpoints, sample_weight_mode) # Creates the model loss and weighted metrics sub-graphs. self._compile_weights_loss_and_weighted_metrics() # Functions for train, test and predict will # be compiled lazily when required. # This saves time when the user is not using all functions. self._function_kwargs = kwargs self.train_function = None self.test_function = None self.predict_function = None # Collected trainable weights, sorted in topological order. self._collected_trainable_weights = self._unique_trainable_weights # Validate all variables were correctly created in distribution scope. if self._distribution_strategy and not self._compile_distribution: for v in self.variables: strategy = self._distribution_strategy if not strategy.extended.variable_created_in_scope(v): raise ValueError( 'Variable (%s) was not created in the distribution strategy ' 'scope of (%s). It is most likely due to not all layers or ' 'the model or optimizer being created outside the distribution ' 'strategy scope. Try to make sure your code looks similar ' 'to the following.\n' 'with strategy.scope():\n' ' model=_create_model()\n' ' model.compile(...)'% (v, strategy)) @trackable.no_automatic_dependency_tracking def _init_distributed_function_cache_if_not_compiled(self): if not hasattr(self, '_distributed_function_cache'): self._distributed_function_cache = {} @property def metrics(self): """Returns the model's metrics added using `compile`, `add_metric` APIs.""" metrics = [] if self._is_compiled: metrics += self._compile_metric_functions metrics.extend(self._metrics) metrics.extend(_get_metrics_from_layers(self._layers)) return metrics @property def metrics_names(self): """Returns the model's display labels for all outputs.""" metrics_names = ['loss'] if self._is_compiled: # Add output loss metric names to the metric names list. if len(self._training_endpoints) > 1: metrics_names.extend([ e.loss_name() for e in self._training_endpoints if not e.should_skip_target() ]) # Add compile metrics/weighted metrics' names to the metric names list. metrics_names.extend([m.name for m in self._compile_metric_functions]) # Add metric names from layers. for layer in self.layers: metrics_names += [m.name for m in layer._metrics] # pylint: disable=protected-access metrics_names += [m.name for m in self._metrics] return metrics_names @property def run_eagerly(self): """Settable attribute indicating whether the model should run eagerly. Running eagerly means that your model will be run step by step, like Python code. Your model might run slower, but it should become easier for you to debug it by stepping into individual layer calls. By default, we will attempt to compile your model to a static graph to deliver the best execution performance. Returns: Boolean, whether the model should run eagerly. """ if self._run_eagerly is True and not context.executing_eagerly(): raise ValueError('You can only set `run_eagerly=True` if eager execution ' 'is enabled.') if not self.dynamic: if self._run_eagerly is None: # Respect `tf.config.experimental_run_functions_eagerly` unless # `run_eagerly` was explicitly passed to `compile`. return def_function.RUN_FUNCTIONS_EAGERLY else: return self._run_eagerly else: if not context.executing_eagerly(): raise ValueError('Your model contains layers that can only be ' 'successfully run in eager execution (layers ' 'constructed with `dynamic=True`). ' 'You must enable eager execution with ' '`tf.enable_eager_execution()`.') if self._run_eagerly is False: # TODO(fchollet): consider using py_func to enable this. raise ValueError('Your model contains layers that can only be ' 'successfully run in eager execution (layers ' 'constructed with `dynamic=True`). ' 'You cannot set `run_eagerly=False`.') return context.executing_eagerly() @run_eagerly.setter def run_eagerly(self, value): self._run_eagerly = value def _select_training_loop(self, inputs): """Select training loop for fit/eval/predict based on the inputs.""" # TODO(kaftan) or TODO(scottzhu): This check should eventually be nicely # integrated into the data adapters in the v2 loop. We can't do this yet # because we currently have to fall back for unhandled data types. if isinstance(inputs, (iterator_ops.Iterator, iterator_ops.IteratorV2)): raise ValueError('For performance reasons Keras `fit`, `evaluate` and' '`predict` accept tf.data `Datasets` as input but not ' 'iterators that have been manually generated from ' 'Datasets by users. Please directly pass in the ' 'original `Dataset` object instead of passing in ' '`iter(dataset)`.') # Experiment training loop with default DS path. if context.executing_eagerly() and self._experimental_run_tf_function: try: valid_adapter = data_adapter.select_data_adapter(inputs, None) except ValueError as data_failure_exception: valid_adapter = None logging.warning('Falling back from v2 loop because of error: ' '%s' % data_failure_exception) if valid_adapter: if self._in_multi_worker_mode(): return training_distributed.DistributionMultiWorkerTrainingLoop( training_v2.Loop()) else: return training_v2.Loop() # Case 1: distribution strategy. if self._distribution_strategy: if self._in_multi_worker_mode(): return training_distributed.DistributionMultiWorkerTrainingLoop( training_distributed.DistributionSingleWorkerTrainingLoop()) else: return training_distributed.DistributionSingleWorkerTrainingLoop() # Case 2: generator-like. Input is Python generator, or Sequence object, # or a non-distributed Dataset or iterator in eager execution. if data_utils.is_generator_or_sequence(inputs): return training_generator.GeneratorOrSequenceTrainingLoop() if training_utils.is_eager_dataset_or_iterator(inputs): return training_generator.EagerDatasetOrIteratorTrainingLoop() # Case 3: Symbolic tensors or Numpy array-like. # This includes Datasets and iterators in graph mode (since they # generate symbolic tensors). if self.run_eagerly: return training_generator.GeneratorLikeTrainingLoop() else: return training_arrays.ArrayLikeTrainingLoop() def fit(self, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, max_queue_size=10, workers=1, use_multiprocessing=False, **kwargs): """Trains the model for a fixed number of epochs (iterations on a dataset). Arguments: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. Should return a tuple of either `(inputs, targets)` or `(inputs, targets, sample_weights)`. - A generator or `keras.utils.Sequence` returning `(inputs, targets)` or `(inputs, targets, sample weights)`. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset, generator, or `keras.utils.Sequence` instance, `y` should not be specified (since targets will be obtained from `x`). batch_size: Integer or `None`. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the form of symbolic tensors, datasets, generators, or `keras.utils.Sequence` instances (since they generate batches). epochs: Integer. Number of epochs to train the model. An epoch is an iteration over the entire `x` and `y` data provided. Note that in conjunction with `initial_epoch`, `epochs` is to be understood as "final epoch". The model is not trained for a number of iterations given by `epochs`, but merely until the epoch of index `epochs` is reached. verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. Note that the progress bar is not particularly useful when logged to a file, so verbose=2 is recommended when not running interactively (eg, in a production environment). callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during training. See `tf.keras.callbacks`. validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss and any model metrics on this data at the end of each epoch. The validation data is selected from the last samples in the `x` and `y` data provided, before shuffling. This argument is not supported when `x` is a dataset, generator or `keras.utils.Sequence` instance. validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch. The model will not be trained on this data. `validation_data` will override `validation_split`. `validation_data` could be: - tuple `(x_val, y_val)` of Numpy arrays or tensors - tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays - dataset For the first two cases, `batch_size` must be provided. For the last case, `validation_steps` must be provided. shuffle: Boolean (whether to shuffle the training data before each epoch) or str (for 'batch'). 'batch' is a special option for dealing with the limitations of HDF5 data; it shuffles in batch-sized chunks. Has no effect when `steps_per_epoch` is not `None`. class_weight: Optional dictionary mapping class indices (integers) to a weight (float) value, used for weighting the loss function (during training only). This can be useful to tell the model to "pay more attention" to samples from an under-represented class. sample_weight: Optional Numpy array of weights for the training samples, used for weighting the loss function (during training only). You can either pass a flat (1D) Numpy array with the same length as the input samples (1:1 mapping between weights and samples), or in the case of temporal data, you can pass a 2D array with shape `(samples, sequence_length)`, to apply a different weight to every timestep of every sample. In this case you should make sure to specify `sample_weight_mode="temporal"` in `compile()`. This argument is not supported when `x` is a dataset, generator, or `keras.utils.Sequence` instance, instead provide the sample_weights as the third element of `x`. initial_epoch: Integer. Epoch at which to start training (useful for resuming a previous training run). steps_per_epoch: Integer or `None`. Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. When training with input tensors such as TensorFlow data tensors, the default `None` is equal to the number of samples in your dataset divided by the batch size, or 1 if that cannot be determined. If x is a `tf.data` dataset, and 'steps_per_epoch' is None, the epoch will run until the input dataset is exhausted. This argument is not supported with array inputs. validation_steps: Only relevant if `validation_data` is provided and is a `tf.data` dataset. Total number of steps (batches of samples) to draw before stopping when performing validation at the end of every epoch. If validation_data is a `tf.data` dataset and 'validation_steps' is None, validation will run until the `validation_data` dataset is exhausted. validation_freq: Only relevant if validation data is provided. Integer or `collections_abc.Container` instance (e.g. list, tuple, etc.). If an integer, specifies how many training epochs to run before a new validation run is performed, e.g. `validation_freq=2` runs validation every 2 epochs. If a Container, specifies the epochs on which to run validation, e.g. `validation_freq=[1, 2, 10]` runs validation at the end of the 1st, 2nd, and 10th epochs. max_queue_size: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. Used for generator or `keras.utils.Sequence` input only. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. **kwargs: Used for backwards compatibility. Returns: A `History` object. Its `History.history` attribute is a record of training loss values and metrics values at successive epochs, as well as validation loss values and validation metrics values (if applicable). Raises: RuntimeError: If the model was never compiled. ValueError: In case of mismatch between the provided input data and what the model expects. """ _keras_api_gauge.get_cell('fit').set(True) # Legacy support if 'nb_epoch' in kwargs: logging.warning( 'The `nb_epoch` argument in `fit` has been renamed `epochs`.') epochs = kwargs.pop('nb_epoch') if kwargs: raise TypeError('Unrecognized keyword arguments: ' + str(kwargs)) self._assert_compile_was_called() self._check_call_args('fit') func = self._select_training_loop(x) return func.fit( self, x=x, y=y, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_split=validation_split, validation_data=validation_data, shuffle=shuffle, class_weight=class_weight, sample_weight=sample_weight, initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, validation_freq=validation_freq, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing) def evaluate(self, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False): """Returns the loss value & metrics values for the model in test mode. Computation is done in batches. Arguments: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. - A generator or `keras.utils.Sequence` instance. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset, generator or `keras.utils.Sequence` instance, `y` should not be specified (since targets will be obtained from the iterator/dataset). batch_size: Integer or `None`. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` is your data is in the form of symbolic tensors, dataset, generators, or `keras.utils.Sequence` instances (since they generate batches). verbose: 0 or 1. Verbosity mode. 0 = silent, 1 = progress bar. sample_weight: Optional Numpy array of weights for the test samples, used for weighting the loss function. You can either pass a flat (1D) Numpy array with the same length as the input samples (1:1 mapping between weights and samples), or in the case of temporal data, you can pass a 2D array with shape `(samples, sequence_length)`, to apply a different weight to every timestep of every sample. In this case you should make sure to specify `sample_weight_mode="temporal"` in `compile()`. This argument is not supported when `x` is a dataset, instead pass sample weights as the third element of `x`. steps: Integer or `None`. Total number of steps (batches of samples) before declaring the evaluation round finished. Ignored with the default value of `None`. If x is a `tf.data` dataset and `steps` is None, 'evaluate' will run until the dataset is exhausted. This argument is not supported with array inputs. callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during evaluation. See [callbacks](/api_docs/python/tf/keras/callbacks). max_queue_size: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. Used for generator or `keras.utils.Sequence` input only. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: in case of invalid arguments. """ _keras_api_gauge.get_cell('evaluate').set(True) self._assert_compile_was_called() self._check_call_args('evaluate') func = self._select_training_loop(x) return func.evaluate( self, x=x, y=y, batch_size=batch_size, verbose=verbose, sample_weight=sample_weight, steps=steps, callbacks=callbacks, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing) def predict(self, x, batch_size=None, verbose=0, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False): """Generates output predictions for the input samples. Computation is done in batches. Arguments: x: Input samples. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A `tf.data` dataset. - A generator or `keras.utils.Sequence` instance. batch_size: Integer or `None`. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` is your data is in the form of symbolic tensors, dataset, generators, or `keras.utils.Sequence` instances (since they generate batches). verbose: Verbosity mode, 0 or 1. steps: Total number of steps (batches of samples) before declaring the prediction round finished. Ignored with the default value of `None`. If x is a `tf.data` dataset and `steps` is None, `predict` will run until the input dataset is exhausted. callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during prediction. See [callbacks](/api_docs/python/tf/keras/callbacks). max_queue_size: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. Used for generator or `keras.utils.Sequence` input only. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. Returns: Numpy array(s) of predictions. Raises: ValueError: In case of mismatch between the provided input data and the model's expectations, or in case a stateful model receives a number of samples that is not a multiple of the batch size. """ _keras_api_gauge.get_cell('predict').set(True) self._check_call_args('predict') func = self._select_training_loop(x) return func.predict( self, x=x, batch_size=batch_size, verbose=verbose, steps=steps, callbacks=callbacks, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing) def reset_metrics(self): """Resets the state of metrics.""" metrics = self._get_training_eval_metrics() for m in metrics: m.reset_states() # Reset metrics on all the distributed (cloned) models. if self._distribution_strategy: distributed_training_utils._reset_metrics(self) # pylint: disable=protected-access def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None, reset_metrics=True): """Runs a single gradient update on a single batch of data. Arguments: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset, `y` should not be specified (since targets will be obtained from the iterator). sample_weight: Optional array of the same length as x, containing weights to apply to the model's loss for each sample. In the case of temporal data, you can pass a 2D array with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. In this case you should make sure to specify sample_weight_mode="temporal" in compile(). This argument is not supported when `x` is a dataset. class_weight: Optional dictionary mapping class indices (integers) to a weight (float) to apply to the model's loss for the samples from this class during training. This can be useful to tell the model to "pay more attention" to samples from an under-represented class. reset_metrics: If `True`, the metrics returned will be only for this batch. If `False`, the metrics will be statefully accumulated across batches. Returns: Scalar training loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: In case of invalid user-provided arguments. """ self._assert_compile_was_called() self._check_call_args('train_on_batch') if self._experimental_run_tf_function: outputs = training_v2_utils.train_on_batch( self, x, y=y, sample_weight=sample_weight, class_weight=class_weight, reset_metrics=reset_metrics) outputs = (outputs['total_loss'] + outputs['output_losses'] + outputs['metrics']) outputs = [ training_v2_utils._non_none_constant_value(v) for v in outputs] # pylint: disable=protected-access if len(outputs) == 1: outputs = outputs[0] return outputs # If at this point we are in the replica context, then it is okay to execute # the Eager code path. The expected way to get here is to call `fit` that # calls `train_on_batch` on each replica. if (self._distribution_strategy and distribution_strategy_context.in_cross_replica_context()): raise NotImplementedError('`train_on_batch` is not supported for models ' 'distributed with tf.distribute.Strategy.') # Validate and standardize user data. x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight, extract_tensors_from_dataset=True) # If `self._distribution_strategy` is True, then we are in a replica context # at this point because of the check above. `train_on_batch` is being run # for each replica by `self._distribution_strategy` and the same code path # as Eager is expected to be taken. if self.run_eagerly or self._distribution_strategy: output_dict = training_eager.train_on_batch( self, x, y, sample_weights=sample_weights, output_loss_metrics=self._output_loss_metrics) outputs = (output_dict['total_loss'] + output_dict['output_losses'] + output_dict['metrics']) outputs = [ training_v2_utils._non_none_constant_value(v) for v in outputs] # pylint: disable=protected-access else: x = training_utils.ModelInputs(x).as_list() ins = x + (y or []) + (sample_weights or []) if not isinstance(K.symbolic_learning_phase(), int): ins += [True] # Add learning phase value. self._update_sample_weight_modes(sample_weights=sample_weights) self._make_train_function() outputs = self.train_function(ins) # pylint: disable=not-callable if reset_metrics: self.reset_metrics() if len(outputs) == 1: return outputs[0] return outputs def test_on_batch(self, x, y=None, sample_weight=None, reset_metrics=True): """Test the model on a single batch of samples. Arguments: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset `y` should not be specified (since targets will be obtained from the iterator). sample_weight: Optional array of the same length as x, containing weights to apply to the model's loss for each sample. In the case of temporal data, you can pass a 2D array with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. In this case you should make sure to specify sample_weight_mode="temporal" in compile(). This argument is not supported when `x` is a dataset. reset_metrics: If `True`, the metrics returned will be only for this batch. If `False`, the metrics will be statefully accumulated across batches. Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: In case of invalid user-provided arguments. """ self._assert_compile_was_called() self._check_call_args('test_on_batch') if self._experimental_run_tf_function: outputs = training_v2_utils.test_on_batch( self, x, y=y, sample_weight=sample_weight, reset_metrics=reset_metrics) outputs = (outputs['total_loss'] + outputs['output_losses'] + outputs['metrics']) outputs = [ training_v2_utils._non_none_constant_value(v) for v in outputs] # pylint: disable=protected-access if len(outputs) == 1: outputs = outputs[0] return outputs if (self._distribution_strategy and distribution_strategy_context.in_cross_replica_context()): raise NotImplementedError('`test_on_batch` is not supported for models ' 'distributed with tf.distribute.Strategy.') # Validate and standardize user data. x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight, extract_tensors_from_dataset=True) # If `self._distribution_strategy` is True, then we are in a replica context # at this point. if self.run_eagerly or self._distribution_strategy: output_dict = training_eager.test_on_batch( self, x, y, sample_weights=sample_weights, output_loss_metrics=self._output_loss_metrics) outputs = (output_dict['total_loss'] + output_dict['output_losses'] + output_dict['metrics']) outputs = [ training_v2_utils._non_none_constant_value(v) for v in outputs] # pylint: disable=protected-access else: x = training_utils.ModelInputs(x).as_list() inputs = x + (y or []) + (sample_weights or []) self._update_sample_weight_modes(sample_weights=sample_weights) self._make_test_function() outputs = self.test_function(inputs) # pylint: disable=not-callable if reset_metrics: self.reset_metrics() if len(outputs) == 1: return outputs[0] return outputs def predict_on_batch(self, x): """Returns predictions for a single batch of samples. Arguments: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A `tf.data` dataset. Returns: Numpy array(s) of predictions. Raises: ValueError: In case of mismatch between given number of inputs and expectations of the model. """ self._check_call_args('predict_on_batch') if self._experimental_run_tf_function: return training_v2_utils.predict_on_batch(self, x) if (self._distribution_strategy and distribution_strategy_context.in_cross_replica_context()): raise NotImplementedError( '`predict_on_batch` is not supported for models distributed with' ' tf.distribute.Strategy.') # Validate and standardize user data. inputs, _, _ = self._standardize_user_data( x, extract_tensors_from_dataset=True) # If `self._distribution_strategy` is True, then we are in a replica context # at this point. if self.run_eagerly or self._distribution_strategy: inputs = training_utils.cast_if_floating_dtype(inputs) if isinstance(inputs, collections_abc.Sequence): # Unwrap lists with only one input, as we do when training on batch if len(inputs) == 1: inputs = inputs[0] return self(inputs) # pylint: disable=not-callable self._make_predict_function() outputs = self.predict_function(inputs) if len(outputs) == 1: return outputs[0] return outputs def fit_generator(self, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, validation_freq=1, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0): """Fits the model on data yielded batch-by-batch by a Python generator. The generator is run in parallel to the model, for efficiency. For instance, this allows you to do real-time data augmentation on images on CPU in parallel to training your model on GPU. The use of `keras.utils.Sequence` guarantees the ordering and guarantees the single use of every input per epoch when using `use_multiprocessing=True`. Arguments: generator: A generator or an instance of `Sequence` (`keras.utils.Sequence`) object in order to avoid duplicate data when using multiprocessing. The output of the generator must be either - a tuple `(inputs, targets)` - a tuple `(inputs, targets, sample_weights)`. This tuple (a single output of the generator) makes a single batch. Therefore, all arrays in this tuple must have the same length (equal to the size of this batch). Different batches may have different sizes. For example, the last batch of the epoch is commonly smaller than the others, if the size of the dataset is not divisible by the batch size. The generator is expected to loop over its data indefinitely. An epoch finishes when `steps_per_epoch` batches have been seen by the model. steps_per_epoch: Total number of steps (batches of samples) to yield from `generator` before declaring one epoch finished and starting the next epoch. It should typically be equal to the number of samples of your dataset divided by the batch size. Optional for `Sequence`: if unspecified, will use the `len(generator)` as a number of steps. epochs: Integer, total number of iterations on the data. verbose: Verbosity mode, 0, 1, or 2. callbacks: List of callbacks to be called during training. validation_data: This can be either - a generator for the validation data - a tuple (inputs, targets) - a tuple (inputs, targets, sample_weights). validation_steps: Only relevant if `validation_data` is a generator. Total number of steps (batches of samples) to yield from `generator` before stopping. Optional for `Sequence`: if unspecified, will use the `len(validation_data)` as a number of steps. validation_freq: Only relevant if validation data is provided. Integer or `collections_abc.Container` instance (e.g. list, tuple, etc.). If an integer, specifies how many training epochs to run before a new validation run is performed, e.g. `validation_freq=2` runs validation every 2 epochs. If a Container, specifies the epochs on which to run validation, e.g. `validation_freq=[1, 2, 10]` runs validation at the end of the 1st, 2nd, and 10th epochs. class_weight: Dictionary mapping class indices to a weight for the class. max_queue_size: Integer. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. shuffle: Boolean. Whether to shuffle the order of the batches at the beginning of each epoch. Only used with instances of `Sequence` (`keras.utils.Sequence`). Has no effect when `steps_per_epoch` is not `None`. initial_epoch: Epoch at which to start training (useful for resuming a previous training run) Returns: A `History` object. Example: ```python def generate_arrays_from_file(path): while 1: f = open(path) for line in f: # create numpy arrays of input data # and labels, from each line in the file x1, x2, y = process_line(line) yield ({'input_1': x1, 'input_2': x2}, {'output': y}) f.close() model.fit_generator(generate_arrays_from_file('/my_file.txt'), steps_per_epoch=10000, epochs=10) ``` Raises: ValueError: In case the generator yields data in an invalid format. """ if self._distribution_strategy: raise NotImplementedError('`fit_generator` is not supported for ' 'models compiled with tf.distribute.Strategy.') _keras_api_gauge.get_cell('fit_generator').set(True) self._check_call_args('fit_generator') return training_generator.fit_generator( self, generator, steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, validation_freq=validation_freq, class_weight=class_weight, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, shuffle=shuffle, initial_epoch=initial_epoch, steps_name='steps_per_epoch') def evaluate_generator(self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0): """Evaluates the model on a data generator. The generator should return the same kind of data as accepted by `test_on_batch`. Arguments: generator: Generator yielding tuples (inputs, targets) or (inputs, targets, sample_weights) or an instance of `keras.utils.Sequence` object in order to avoid duplicate data when using multiprocessing. steps: Total number of steps (batches of samples) to yield from `generator` before stopping. Optional for `Sequence`: if unspecified, will use the `len(generator)` as a number of steps. callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during evaluation. See [callbacks](/api_docs/python/tf/keras/callbacks). max_queue_size: maximum size for the generator queue workers: Integer. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. verbose: Verbosity mode, 0 or 1. Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: in case of invalid arguments. Raises: ValueError: In case the generator yields data in an invalid format. """ if self._distribution_strategy: raise NotImplementedError('`evaluate_generator` is not supported for ' 'models compiled with tf.distribute.Strategy.') _keras_api_gauge.get_cell('evaluate_generator').set(True) self._check_call_args('evaluate_generator') return training_generator.evaluate_generator( self, generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose, callbacks=callbacks) def predict_generator(self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0): """Generates predictions for the input samples from a data generator. The generator should return the same kind of data as accepted by `predict_on_batch`. Arguments: generator: Generator yielding batches of input samples or an instance of `keras.utils.Sequence` object in order to avoid duplicate data when using multiprocessing. steps: Total number of steps (batches of samples) to yield from `generator` before stopping. Optional for `Sequence`: if unspecified, will use the `len(generator)` as a number of steps. callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during prediction. See [callbacks](/api_docs/python/tf/keras/callbacks). max_queue_size: Maximum size for the generator queue. workers: Integer. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. verbose: verbosity mode, 0 or 1. Returns: Numpy array(s) of predictions. Raises: ValueError: In case the generator yields data in an invalid format. """ if self._distribution_strategy: raise NotImplementedError('`predict_generator` is not supported for ' 'models compiled with tf.distribute.Strategy.') _keras_api_gauge.get_cell('predict_generator').set(True) return training_generator.predict_generator( self, generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose, callbacks=callbacks) def _check_call_args(self, method_name): """Check that `call` has only one positional arg.""" # Always allow first arg, regardless of arg name. fullargspec = tf_inspect.getfullargspec(self.call) if fullargspec.defaults: positional_args = fullargspec.args[:-len(fullargspec.defaults)] else: positional_args = fullargspec.args if 'training' in positional_args: positional_args.remove('training') # self and first arg can be positional. if len(positional_args) > 2: extra_args = positional_args[2:] raise ValueError( 'Models passed to `' + method_name + '` can only have `training` ' 'and the first argument in `call` as positional arguments, ' 'found: ' + str(extra_args) + '.') def _set_optimizer(self, optimizer): """Sets self.optimizer. Sets self.optimizer to `optimizer`, potentially wrapping it with a LossScaleOptimizer. Args: optimizer: The optimizer(s) to assign to self.optimizer. """ if isinstance(optimizer, (list, tuple)): self.optimizer = [optimizers.get(opt) for opt in optimizer] else: self.optimizer = optimizers.get(optimizer) if (self._dtype_policy.loss_scale is not None and not isinstance(self.optimizer, loss_scale_optimizer.LossScaleOptimizer)): if isinstance(self.optimizer, list): raise ValueError('When a dtype policy with a loss scale is used, you ' 'can only pass a single optimizer. Using policy %s ' 'and got optimizers: %s' % self._dtype_policy, self.optimizer) if not isinstance(self.optimizer, optimizer_v2.OptimizerV2): raise ValueError('"optimizer" must be an instance of ' 'tf.keras.optimizers.Optimizer when a dype policy ' 'with a loss scale used, but got: %s. Using policy: ' '%s' % (self.optimizer, self._dtype_policy)) self.optimizer = loss_scale_optimizer.LossScaleOptimizer( self.optimizer, self._dtype_policy.loss_scale) if (isinstance(self.optimizer, loss_scale_optimizer.LossScaleOptimizer) and self._dtype_policy.loss_scale and self.optimizer.loss_scale != self._dtype_policy.loss_scale): logging.warning('LossScale of LossScaleOptimizer passed to compile (%s) ' 'is not the same as the dtype policy\'s loss scale (%s). ' 'Because the dtype policy has a loss scale, you should ' 'pass an optimizer that is not wrapped with a ' 'LossScaleOptimizer,' % (self.optimizer.loss_scale, self._dtype_policy.loss_scale)) def _prepare_validation_data(self, validation_data, batch_size, validation_steps): """Unpack and check the validation data.""" val_x, val_y, val_sample_weights = training_utils.unpack_validation_data( validation_data) return self._standardize_user_data( val_x, val_y, sample_weight=val_sample_weights, batch_size=batch_size, steps=validation_steps, steps_name='validation_steps') def _validate_compile_param_for_distribution_strategy( self, run_eagerly, sample_weight_mode, target_tensors, weighted_metrics): # Validate that arguments passed by the user to `compile` are supported by # tf.distribute.Strategy. if self._distribution_strategy: if sample_weight_mode: raise NotImplementedError('sample_weight_mode is not supported with ' 'tf.distribute.Strategy.') if weighted_metrics: raise NotImplementedError('weighted_metrics is not supported with ' 'tf.distribute.Strategy.') if target_tensors: raise ValueError('target_tensors is not supported with ' 'tf.distribute.Strategy.') if run_eagerly: raise ValueError( 'We currently do not support enabling `run_eagerly` with ' 'distribution strategy.') if (distributed_training_utils.is_distributing_by_cloning(self) and (not self.built or not self.inputs or not self.outputs)): raise ValueError( 'We currently do not support distribution strategy with a ' '`Sequential` model that is created without `input_shape`/' '`input_dim` set in its first layer or a subclassed model.') def _process_target_tensor_for_compile(self, target_tensors): if self.run_eagerly: # target tensor is not supported with run_eagerly. Create a list with None # as placeholder for each output. return [None for _ in self.output_names] if target_tensors is not None and not (isinstance(target_tensors, list) and target_tensors == []): # pylint: disable=g-explicit-bool-comparison if isinstance(target_tensors, list): if len(target_tensors) != len(self.outputs): raise ValueError( 'When passing a list as `target_tensors`, ' 'it should have one entry per model output. ' 'The model has %s outputs, but you passed target_tensors=%s' % (len(self.outputs), target_tensors)) elif isinstance(target_tensors, dict): unexpected_target_tensor_names = set(target_tensors.keys()).difference( self.output_names) if unexpected_target_tensor_names: raise ValueError( 'Unknown entry in `target_tensors` dictionary: "{name}". ' 'Only expected the following keys: {keys}'.format( name=unexpected_target_tensor_names, keys=str(self.output_names))) tmp_target_tensors = [] for name in self.output_names: tmp_target_tensors.append(target_tensors.get(name, None)) target_tensors = tmp_target_tensors elif tensor_util.is_tensor(target_tensors): target_tensors = [target_tensors] else: raise TypeError('Expected `target_tensors` to be a list or tuple or ' 'dict or a single tensor, but got:', target_tensors) else: # In case target tensor is empty or None, create a list with Nones # that has same length as self.output_names. With that, the None check of # target tensor can be skipped downstream. target_tensors = [None for _ in self.output_names] return target_tensors def _compile_eagerly(self, metrics, weighted_metrics, sample_weight_mode): # Prepare sample weight modes. List with the same length as model outputs. training_utils.prepare_sample_weight_modes( self._training_endpoints, sample_weight_mode) # Prepare sample weights. self._prepare_sample_weights() # Save all metric attributes per output of the model. self._cache_output_metric_attributes(metrics, weighted_metrics) self.total_loss = None # Set metric attributes on model. self._set_metric_attributes() self._collected_trainable_weights = self._unique_trainable_weights def _update_sample_weight_modes(self, sample_weights=None): """Updates sample weight modes based on training/eval inputs. Sample weight placeholders will be created for all or no outputs based on whether sample_weight is provided for any output. If model contains `_sample_weight_modes` we check if the input `sample_weights` corresponds to the sample weight modes. 1. Set sample weight mode to be 'temporal' for output i, if `compile` sample_weight_mode was set to `temporal` and sample weight inputs are given for one or more outputs. 2. Set sample weight mode to be 'samplewise' for output i, if `compile` sample_weight_mode was not set and sample weight inputs are given for one or more outputs. 3. Reset sample weight mode to None for output i if sample weight mode was set but there is no sample weight input. Args: sample_weights: List of sample weights of the same length as model outputs or None. """ if not self._is_compiled: return if sample_weights and any([s is not None for s in sample_weights]): for endpoint in self._training_endpoints: endpoint.sample_weight_mode = ( endpoint.sample_weight_mode or 'samplewise') else: for endpoint in self._training_endpoints: endpoint.sample_weight_mode = None def _recompile_weights_loss_and_weighted_metrics(self): if not self._is_compiled: return False recompile = any([e.sample_weights_mismatch() for e in self._training_endpoints]) if recompile: self._compile_weights_loss_and_weighted_metrics() return recompile @trackable.no_automatic_dependency_tracking def _compile_weights_loss_and_weighted_metrics(self, sample_weights=None): """Compiles the model loss and weighted metric sub-graphs. This may be used to set graph tensors as sample weights (instead of creating placeholders). This functionality is necessary for `tf.keras.estimator.model_to_estimator`, which calls Keras models in a v1 graph, and creates iterator tensors for inputs, targets, and sample weights. Args: sample_weights: List of tensors to use as the sample weights. Must be the same length as the number of outputs. If left as `None`, placeholders are used instead. """ with K.get_graph().as_default(): if sample_weights is not None: self._update_sample_weight_modes(sample_weights) self._prepare_sample_weights(sample_weights) masks = self._prepare_output_masks() # Compute weighted metrics. self._handle_metrics( self.outputs, targets=self._targets, skip_target_masks=self._prepare_skip_target_masks(), sample_weights=self.sample_weights, masks=masks, return_weighted_metrics=True) # Compute total loss. # Used to keep track of the total loss value (stateless). # eg., total_loss = loss_weight_1 * output_1_loss_fn(...) + # loss_weight_2 * output_2_loss_fn(...) + # layer losses. self.total_loss = self._prepare_total_loss(masks) def _prepare_skip_target_masks(self): """Boolean mask for whether the target in the output list should be skipped. If the loss function corresponding to a model output is None, then this output will be skipped during total loss calculation and feed targets preparation. Returns: A boolean list for whether the corresponding target in the output list should be skipped during loss calculation. """ return [l is None for l in self.loss_functions] def _prepare_output_masks(self): """Returns masks corresponding to model outputs.""" return [getattr(x, '_keras_mask', None) for x in self.outputs] def _prepare_total_loss(self, masks): """Computes total loss from loss functions. Arguments: masks: List of mask values corresponding to each model output. Returns: A list of loss weights of python floats. Raises: TypeError: If model run_eagerly is True. """ if self.run_eagerly: raise TypeError('total loss can not be computed when compiled with ' 'run_eagerly = True.') total_loss = None with K.name_scope('loss'): for endpoint, mask in zip(self._training_endpoints, masks): if endpoint.should_skip_target(): continue y_true = endpoint.training_target.target y_pred = endpoint.output loss_fn = endpoint.loss_fn loss_weight = endpoint.loss_weight loss_name = endpoint.loss_name() sample_weight = endpoint.sample_weight with K.name_scope(loss_name): if mask is not None: mask = math_ops.cast(mask, y_pred.dtype) # Update weights with mask. if sample_weight is None: sample_weight = mask else: # Update dimensions of weights to match with mask if possible. mask, _, sample_weight = ( tf_losses_utils.squeeze_or_expand_dimensions( mask, sample_weight=sample_weight)) sample_weight *= mask if hasattr(loss_fn, 'reduction'): per_sample_losses = loss_fn.call(y_true, y_pred) weighted_losses = losses_utils.compute_weighted_loss( per_sample_losses, sample_weight=sample_weight, reduction=losses_utils.ReductionV2.NONE) loss_reduction = loss_fn.reduction # `AUTO` loss reduction defaults to `SUM_OVER_BATCH_SIZE` for all # compile use cases. if loss_reduction == losses_utils.ReductionV2.AUTO: loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE # Compute the stateless loss value. output_loss = losses_utils.reduce_weighted_loss( weighted_losses, reduction=loss_reduction) else: # Compute the stateless loss value for a custom loss class. # Here we assume that the class takes care of loss reduction # because if this class returns a vector value we cannot # differentiate between use case where a custom optimizer # expects a vector loss value vs unreduced per-sample loss value. output_loss = loss_fn(y_true, y_pred, sample_weight=sample_weight) loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE if len(self.outputs) > 1: # Keep track of stateful result tensor for the loss. endpoint.output_loss_metric(output_loss) # Scale output loss for distribution. For custom losses we assume # reduction was mean. if loss_reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE: output_loss = losses_utils.scale_loss_for_distribution(output_loss) if total_loss is None: total_loss = loss_weight * output_loss else: total_loss += loss_weight * output_loss if total_loss is None: if not self.losses: raise ValueError('The model cannot be compiled ' 'because it has no loss to optimize.') else: total_loss = 0. # Add regularization penalties and other layer-specific losses. custom_losses = self.get_losses_for(None) + self.get_losses_for( self.inputs) if custom_losses: total_loss += losses_utils.scale_loss_for_distribution( math_ops.add_n(custom_losses)) return total_loss def _get_callback_model(self): """Returns the Callback Model for this Model.""" if hasattr(self, '_replicated_model') and self._replicated_model: # When using training_distributed, we set the callback model # to an instance of the `DistributedModel` that we create in # the `compile` call. The `DistributedModel` is initialized # with the first replicated model. We need to set the callback # model to a DistributedModel to allow us to override saving # and loading weights when we checkpoint the model during training. return self._replicated_model if hasattr(self, 'callback_model') and self.callback_model: return self.callback_model return self def _make_callback_model(self, grouped_model): first_replicated_model = self._distribution_strategy.unwrap( grouped_model)[0] # We initialize the callback model with the first replicated model. self._replicated_model = DistributedCallbackModel(first_replicated_model) self._replicated_model.set_original_model(self) def _validate_or_infer_batch_size(self, batch_size, steps, x): """Validates that the `batch_size` provided is consistent with InputLayer. It's possible that the user specified a static batch size in their InputLayer. If so, this method checks the provided `batch_size` and `x` arguments are consistent with this static batch size. Also, if `batch_size` is `None`, this method will attempt to infer the batch size from the static batch size of the InputLayer. Lastly, ValueError will be raised if `x` is a tf.data.Dataset and `batch_size` is specified as we expect users to provide batched datasets. Arguments: batch_size: The batch_size provided as an argument to fit/evaluate/predict. steps: The steps provided as an argument to fit/evaluate/predict. x: The data passed as `x` to fit/evaluate/predict. Returns: The validated batch_size, auto-inferred from the first layer if not provided. """ if (isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2, data_utils.Sequence)) or tf_inspect.isgenerator(x)): if batch_size is not None: raise ValueError( 'The `batch_size` argument must not be specified for the given ' 'input type. Received input: {}, batch_size: {}'.format( x, batch_size)) return layers = super(Model, self).layers # Avoids the override in Sequential. if layers: first_layer = layers[0] # The per-replica static batch size. static_batch_size = training_utils.get_static_batch_size(first_layer) if static_batch_size is not None: # Determine number of times the user-supplied batch size will be split. if (self._distribution_strategy and distributed_training_utils.global_batch_size_supported( self._distribution_strategy)): num_splits_for_ds = self._distribution_strategy.num_replicas_in_sync else: num_splits_for_ds = 1 # Check `batch_size` argument is consistent with InputLayer. if batch_size is not None: if batch_size % num_splits_for_ds != 0: raise ValueError('The `batch_size` argument value {} cannot be ' 'divisible by number of replicas {}'.format( batch_size, num_splits_for_ds)) per_replica_batch_size = batch_size // num_splits_for_ds if per_replica_batch_size != static_batch_size: raise ValueError('The `batch_size` argument value {} is ' 'incompatible with the specified batch size of ' 'your Input Layer: {}'.format( per_replica_batch_size, static_batch_size)) # Check Dataset/Iterator batch size is consistent with InputLayer. if isinstance(x, (dataset_ops.DatasetV2, iterator_ops.Iterator, iterator_ops.IteratorV2)): ds_batch_size = tensor_shape.as_dimension( nest.flatten(dataset_ops.get_legacy_output_shapes(x))[0][0]).value if ds_batch_size is not None: if ds_batch_size % num_splits_for_ds != 0: raise ValueError( 'The batch output shape of your `Dataset` {} ' 'cannot be divisible by number of replicas {}'.format( ds_batch_size, num_splits_for_ds)) ds_per_replica_batch_size = ds_batch_size // num_splits_for_ds if ds_per_replica_batch_size != static_batch_size: raise ValueError('The batch output shape of your `Dataset` is ' '{}, which is incompatible with the specified ' 'batch size of your Input Layer: {}'.format( ds_per_replica_batch_size, static_batch_size)) # Set inferred batch size from the InputLayer. if steps is None: batch_size = static_batch_size * num_splits_for_ds if batch_size is None and steps is None: # Backwards compatibility batch_size = 32 return batch_size def _prepare_sample_weights(self, sample_weights=None): """Sets sample weight attribute on the model.""" # List with the same length as model outputs. if sample_weights is not None: if len(sample_weights) != len(self._training_endpoints): raise ValueError('Provided sample weights must have same length as the ' 'number of outputs. Expected: {}, got: {}.'.format( len(self._training_endpoints), len(sample_weights))) else: sample_weights = [None] * len(self._training_endpoints) for endpoint, weight in zip(self._training_endpoints, sample_weights): endpoint.populate_sample_weight(weight, endpoint.sample_weight_mode) def _cache_output_metric_attributes(self, metrics, weighted_metrics): """Caches metric name and function attributes for every model output.""" output_shapes = [] for output in self.outputs: if output is None or output.shape.rank is None: output_shapes.append(None) else: output_shapes.append(output.shape.as_list()) self._per_output_metrics = training_utils.collect_per_output_metric_info( metrics, self.output_names, output_shapes, self.loss_functions) self._per_output_weighted_metrics = ( training_utils.collect_per_output_metric_info( weighted_metrics, self.output_names, output_shapes, self.loss_functions, is_weighted=True)) def _add_unique_metric_name(self, metric_name, output_index): """Makes the metric name unique and adds it to the model's metric name list. If there are multiple outputs for which the metrics are calculated, the metric names have to be made unique by appending an integer. Arguments: metric_name: Metric name that corresponds to the metric specified by the user. For example: 'acc'. output_index: The index of the model output for which the metric name is being added. Returns: string, name of the model's unique metric name """ if len(self.output_names) > 1: metric_name = '%s_%s' % (self.output_names[output_index], metric_name) j = 1 base_metric_name = metric_name while metric_name in self.metrics_names: metric_name = '%s_%d' % (base_metric_name, j) j += 1 return metric_name def _init_metric_attributes(self): """Initialized model metric attributes.""" # List of stateful metric functions. Used for resetting metric state during # training/eval. self._compile_metric_functions = [] def _set_per_output_metric_attributes(self, metrics_dict, output_index): """Sets the metric attributes on the model for the given output. Arguments: metrics_dict: A dict with metric names as keys and metric fns as values. output_index: The index of the model output for which the metric attributes are added. Returns: Metrics dict updated with unique metric names as keys. """ updated_metrics_dict = collections.OrderedDict() for metric_name, metric_fn in metrics_dict.items(): metric_name = self._add_unique_metric_name(metric_name, output_index) # Update the name on the metric class to be the unique generated name. metric_fn._name = metric_name # pylint: disable=protected-access updated_metrics_dict[metric_name] = metric_fn # Keep track of metric name and function. self._compile_metric_functions.append(metric_fn) return updated_metrics_dict def _set_metric_attributes(self): """Sets the metric attributes on the model for all the model outputs.""" updated_per_output_metrics = [] updated_per_output_weighted_metrics = [] for i, endpoint in enumerate(self._training_endpoints): if endpoint.should_skip_target(): updated_per_output_metrics.append(self._per_output_metrics[i]) updated_per_output_weighted_metrics.append( self._per_output_weighted_metrics[i]) continue updated_per_output_metrics.append( self._set_per_output_metric_attributes(self._per_output_metrics[i], i)) updated_per_output_weighted_metrics.append( self._set_per_output_metric_attributes( self._per_output_weighted_metrics[i], i)) # Create a metric wrapper for each output loss. This computes mean of an # output loss across mini-batches (irrespective of how we reduce within a # batch). if len(self._training_endpoints) > 1: for endpoint in self._training_endpoints: if not endpoint.should_skip_target(): endpoint.output_loss_metric = metrics_module.Mean( name=endpoint.loss_name()) self._per_output_metrics = updated_per_output_metrics self._per_output_weighted_metrics = updated_per_output_weighted_metrics def _handle_per_output_metrics(self, metrics_dict, y_true, y_pred, mask, weights=None): """Calls metric functions for a single output. Arguments: metrics_dict: A dict with metric names as keys and metric fns as values. y_true: Target output. y_pred: Predicted output. mask: Computed mask value for the current output. weights: Weights to be applied on the current output. Returns: A list of metric result tensors. """ metric_results = [] for metric_name, metric_fn in metrics_dict.items(): with K.name_scope(metric_name): metric_result = training_utils.call_metric_function( metric_fn, y_true, y_pred, weights=weights, mask=mask) metric_results.append(metric_result) return metric_results def _handle_metrics(self, outputs, targets=None, skip_target_masks=None, sample_weights=None, masks=None, return_weighted_metrics=False, return_weighted_and_unweighted_metrics=False): """Handles calling metric functions. Arguments: outputs: List of outputs (predictions). targets: List of targets. skip_target_masks: Optional. List of boolean for whether the corresponding target should be ignored or not. sample_weights: Optional list of sample weight arrays. masks: List of computed output mask values. return_weighted_metrics: Flag that indicates whether weighted metrics should be computed instead of unweighted metrics. This flag is ignored when `return_weighted_and_unweighted_metrics` is enabled. return_weighted_and_unweighted_metrics: Flag that is used to indicate whether both weighted and unweighted metrics should be computed. When this is not enabled, we use `return_weighted_metrics` param to indicate whether weighted or unweighted metrics should be returned. Returns: A list of metric result tensors. """ # TODO(scottzhu): Update this to use the new training_endpoints. Currently # the eager and graph logic is bit different. skip_target_masks = skip_target_masks or [False] * len(outputs) metric_results = [] with K.name_scope('metrics'): # Invoke all metrics added using `compile`. for i in range(len(outputs)): if skip_target_masks[i]: continue output = outputs[i] if outputs else None target = targets[i] if targets else None output_mask = masks[i] if masks else None if (return_weighted_and_unweighted_metrics or not return_weighted_metrics): metric_results.extend( self._handle_per_output_metrics(self._per_output_metrics[i], target, output, output_mask)) if return_weighted_and_unweighted_metrics or return_weighted_metrics: metric_results.extend( self._handle_per_output_metrics( self._per_output_weighted_metrics[i], target, output, output_mask, weights=sample_weights[i] if sample_weights else None)) return metric_results def _check_trainable_weights_consistency(self): """Check trainable weights count consistency. This will raise a warning if `trainable_weights` and `_collected_trainable_weights` are inconsistent (i.e. have different number of parameters). Inconsistency will typically arise when one modifies `model.trainable` without calling `model.compile` again. """ if not hasattr(self, '_collected_trainable_weights'): return if (len(self._unique_trainable_weights) != len(self._collected_trainable_weights)): logging.log_first_n( logging.WARN, 'Discrepancy between trainable weights and collected' ' trainable weights, did you set `model.trainable`' ' without calling `model.compile` after ?', 1) def _make_train_function(self): has_recompiled = self._recompile_weights_loss_and_weighted_metrics() self._check_trainable_weights_consistency() if isinstance(self.optimizer, list): raise ValueError('The `optimizer` in `compile` should be a single ' 'optimizer.') # If we have re-compiled the loss/weighted metric sub-graphs then create # train function even if one exists already. This is because # `_feed_sample_weights` list has been updated on re-copmpile. if getattr(self, 'train_function', None) is None or has_recompiled: # Restore the compiled trainable state. current_trainable_state = self._get_trainable_state() self._set_trainable_state(self._compiled_trainable_state) inputs = (self._feed_inputs + self._feed_targets + self._feed_sample_weights) if not isinstance(K.symbolic_learning_phase(), int): inputs += [K.symbolic_learning_phase()] with K.get_graph().as_default(): with K.name_scope('training'): # Training updates updates = self.optimizer.get_updates( params=self._collected_trainable_weights, loss=self.total_loss) # Unconditional updates updates += self.get_updates_for(None) # Conditional updates relevant to this model updates += self.get_updates_for(self.inputs) metrics = self._get_training_eval_metrics() metrics_tensors = [ m._call_result for m in metrics if hasattr(m, '_call_result') # pylint: disable=protected-access ] with K.name_scope('training'): # Gets loss and metrics. Updates weights at each call. fn = K.function( inputs, [self.total_loss] + metrics_tensors, updates=updates, name='train_function', **self._function_kwargs) setattr(self, 'train_function', fn) # Restore the current trainable state self._set_trainable_state(current_trainable_state) def _make_test_function(self): has_recompiled = self._recompile_weights_loss_and_weighted_metrics() # If we have re-compiled the loss/weighted metric sub-graphs then create # test function even if one exists already. This is because # `_feed_sample_weights` list has been updated on re-copmpile. if getattr(self, 'test_function', None) is None or has_recompiled: inputs = (self._feed_inputs + self._feed_targets + self._feed_sample_weights) with K.get_graph().as_default(): metrics = self._get_training_eval_metrics() metrics_tensors = [ m._call_result for m in metrics if hasattr(m, '_call_result') # pylint: disable=protected-access ] with K.name_scope('evaluation'): updates = self.state_updates # Return loss and metrics, no gradient updates. # Does update the network states. fn = K.function( inputs, [self.total_loss] + metrics_tensors, updates=updates, name='test_function', **self._function_kwargs) setattr(self, 'test_function', fn) def _make_predict_function(self): if not hasattr(self, 'predict_function'): self.predict_function = None if self.predict_function is None: inputs = self._feed_inputs # Gets network outputs. Does not update weights. # Does update the network states. kwargs = getattr(self, '_function_kwargs', {}) with K.name_scope(ModeKeys.PREDICT): self.predict_function = K.function( inputs, self.outputs, updates=self.state_updates, name='predict_function', **kwargs) def _make_execution_function(self, mode): if mode == ModeKeys.TRAIN: self._make_train_function() return self.train_function if mode == ModeKeys.TEST: self._make_test_function() return self.test_function if mode == ModeKeys.PREDICT: self._make_predict_function() return self.predict_function def _distribution_standardize_user_data(self, x, y=None, sample_weight=None, class_weight=None, batch_size=None, validation_split=0, shuffle=False, epochs=1, allow_partial_batch=False): """Runs validation checks on input and target data passed by the user. This is called when using tf.distribute.Strategy to train, evaluate or serve the model. Args: x: Input data. A numpy array or `tf.data` dataset. y: Target data. A numpy array or None if x is a `tf.data` dataset. sample_weight: An optional sample-weight array passed by the user to weight the importance of each sample in `x`. class_weight: An optional class-weight array by the user to weight the importance of samples in `x` based on the class they belong to, as conveyed by `y`. batch_size: Integer batch size. If provided, it is used to run additional validation checks on stateful models. validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. shuffle: Boolean whether to shuffle the training data before each epoch. epochs: Integer epochs. If > 1, repeat the numpy training data epochs times when converting to training dataset. allow_partial_batch: Boolean whether to enforce that all batches have the same size. Returns: Dataset instance. Raises: ValueError: In case of invalid user-provided data. RuntimeError: If the model was never compiled. """ if class_weight: raise NotImplementedError('`class_weight` is currently not supported ' 'when using tf.distribute.Strategy.') if (sample_weight is not None and sample_weight.all() and distributed_training_utils.is_tpu_strategy( self._distribution_strategy)): raise NotImplementedError('`sample_weight` is currently not supported ' 'when using TPUStrategy.') if (self.stateful and distributed_training_utils.is_tpu_strategy( self._distribution_strategy) and self._distribution_strategy. num_replicas_in_sync != 1): raise ValueError('Single core must be used for computation on ' 'stateful models. Consider adding `device_assignment` ' 'parameter to TPUStrategy using\n' 'topology = tf.contrib.distribute.' 'initialize_tpu_system()\n' 'device_assignment = tf.contrib.tpu.DeviceAssignment(' 'topology, core_assignment=tf.contrib.tpu.' 'SINGLE_CORE_ASSIGNMENT)\n' 'tpu_strategy = tf.contrib.distribute.TPUStrategy(' 'device_assignment=device_assignment)') # Validates `steps` and `shuffle` arguments right at the beginning # since we use it to construct the dataset object. # TODO(anjalisridhar): Remove this check once we refactor the # _standardize_user_data code path. This check is already present elsewhere # in the codebase. if isinstance(x, dataset_ops.DatasetV2): if shuffle: training_utils.verify_dataset_shuffled(x) strategy = self._distribution_strategy with strategy.scope(): # We should be sure to call get_session() inside the strategy.scope() # so the strategy can affect the session options. if ops.executing_eagerly_outside_functions(): session = None else: session = K.get_session() first_x_value = nest.flatten(x)[0] if isinstance(first_x_value, np.ndarray): x = training_utils.list_to_tuple(x) if y is not None: y = training_utils.list_to_tuple(y) if sample_weight is not None: sample_weight = training_utils.list_to_tuple(sample_weight) in_tuple = (x, y, sample_weight) else: in_tuple = (x, y) else: in_tuple = x ds = strategy.extended.experimental_make_numpy_dataset(in_tuple, session=session) if shuffle: # We want a buffer size that is larger than the batch size provided by # the user and provides sufficient randomness. Note that larger # numbers introduce more memory usage based on the size of each # sample. ds = ds.shuffle(max(1024, batch_size * 8)) if epochs > 1: ds = ds.repeat(epochs) # We need to use the drop_remainder argument to get a known static # input shape which is required for TPUs. drop_remainder = (not allow_partial_batch and strategy.extended.experimental_require_static_shapes) # TODO(b/131720208): We still drop remainder here if number of examples # is divisible by batch size, as sometimes dynamic padder will time out # with keras.metrics.CategoricalAccuracy() metric. if distributed_training_utils.is_tpu_strategy( strategy) and not drop_remainder: dataset_size = first_x_value.shape[0] if dataset_size % batch_size == 0: drop_remainder = True x = ds.batch(batch_size, drop_remainder=drop_remainder) else: assert isinstance(x, dataset_ops.DatasetV2) training_utils.validate_dataset_input(x, y, sample_weight, validation_split) return x def _standardize_user_data(self, x, y=None, sample_weight=None, class_weight=None, batch_size=None, check_steps=False, steps_name='steps', steps=None, validation_split=0, shuffle=False, extract_tensors_from_dataset=False): """Runs validation checks on input and target data passed by the user. Also standardizes the data to lists of arrays, in order. Also builds and compiles the model on the fly if it is a subclassed model that has never been called before (and thus has no inputs/outputs). This is a purely internal method, subject to refactoring at any time. Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset, `y` should not be specified (since targets will be obtained from the iterator). sample_weight: An optional sample-weight array passed by the user to weight the importance of each sample in `x`. class_weight: An optional class-weight array by the user to weight the importance of samples in `x` based on the class they belong to, as conveyed by `y`. If both `sample_weight` and `class_weight` are provided, the weights are multiplied. batch_size: Integer batch size. If provided, it is used to run additional validation checks on stateful models. check_steps: boolean, True if we want to check for validity of `steps` and False, otherwise. For example, when we are standardizing one batch of data for train_on_batch/predict_on_batch/test_on_batch APIs, `steps` value is not required and we should not check for its validity in these cases. steps_name: The public API's parameter name for `steps`. steps: Integer or `None`. Total number of steps (batches of samples) to execute. validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. shuffle: Boolean whether to shuffle the training data before each epoch. extract_tensors_from_dataset: Boolean. When `x` is a dataset instance, this indicates whether to extract actual tensors from the dataset or instead output the dataset instance itself. Set to True when calling from `train_on_batch`/etc. Returns: A tuple of 3: inputs (arrays or dicts, depending on whether `x` was a dict or not), target arrays, sample-weight arrays. If the model's input and targets are symbolic, these lists are empty (since the model takes no user-provided data, instead the data comes from the symbolic inputs/targets). Raises: ValueError: In case of invalid user-provided data. RuntimeError: If the model was never compiled. """ if isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)): # Graph mode dataset. We'll pass the dataset as-is (unless # `extract_tensors_from_dataset` is True, in which case we extract # the tensors from the dataset and we output them. training_utils.validate_dataset_input(x, y, sample_weight, validation_split) if shuffle: training_utils.verify_dataset_shuffled(x) is_dataset = True if extract_tensors_from_dataset: # We do this for `train_on_batch`/etc. x, y, sample_weight = training_utils.extract_tensors_from_dataset(x) elif isinstance(x, iterator_ops.Iterator): # Graph mode iterator. We extract the symbolic tensors. training_utils.validate_dataset_input(x, y, sample_weight, validation_split) iterator = x x, y, sample_weight = training_utils.unpack_iterator_input(iterator) is_dataset = True else: is_dataset = False # Validates `steps` argument based on x's type. if check_steps: training_utils.check_steps_argument(x, steps, steps_name) # First, we build the model on the fly if necessary. if not self.inputs: all_inputs, y_input, dict_inputs = self._build_model_with_inputs(x, y) is_build_called = True else: all_inputs = [] # Whether this is a subclassed model that expects dictionary inputs # rather than list inputs (e.g. FeatureColumn-based models). dict_inputs = isinstance(self.inputs, dict) is_build_called = False y_input = y # Second, we compile the model on the fly if necessary, mostly for subclass # models. is_compile_called = False if not self._is_compiled and self.optimizer: self._compile_from_inputs(all_inputs, y_input, x, y) is_compile_called = True # In graph mode, if we had just set inputs and targets as symbolic tensors # by invoking build and compile on the model respectively, we do not have to # feed anything to the model. Model already has input and target data as # part of the graph. # Note: in this case, `any` and `all` are equivalent since we disallow # mixed symbolic/value inputs. if (not self.run_eagerly and is_build_called and is_compile_called and not is_dataset and any(_is_symbolic_tensor(v) for v in all_inputs)): return [], [], None # What follows is input validation and standardization to list format, # in the case where all inputs are value arrays. if self.run_eagerly: # In eager mode, do not do shape validation # since the network has no input nodes (placeholders) to be fed. feed_input_names = self.input_names feed_input_shapes = None elif not self._is_graph_network: # Case: symbolic-mode subclassed network. Do not do shape validation. feed_input_names = self._feed_input_names feed_input_shapes = None else: # Case: symbolic-mode graph network. # In this case, we run extensive shape validation checks. feed_input_names = self._feed_input_names feed_input_shapes = self._feed_input_shapes # Standardize the inputs. if not isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)): # TODO(fchollet): run static checks with dataset output shape(s). x = training_utils.standardize_input_data( x, feed_input_names, feed_input_shapes, check_batch_axis=False, # Don't enforce the batch size. exception_prefix='input') # Get typespecs for the input data and sanitize it if necessary. # TODO(momernick): This should be capable of doing full input validation # at all times - validate that this is so and refactor the standardization # code. if isinstance(x, dataset_ops.DatasetV2): x_shapes = dataset_ops.get_structure(x) if isinstance(x_shapes, tuple): # If the output of a Dataset is a tuple, we assume it's either of the # form (x_data, y_data) or (x_data, y_data, sample_weights). In either # case, we only care about x_data here. x_shapes = x_shapes[0] else: flat_inputs = nest.flatten(x, expand_composites=False) flat_expected_inputs = nest.flatten(self.inputs, expand_composites=False) converted_x = [] for (a, b) in zip(flat_inputs, flat_expected_inputs): converted_x.append(_convert_scipy_sparse_tensor(a, b)) x = nest.pack_sequence_as(x, converted_x, expand_composites=False) x_shapes = nest.map_structure(type_spec.type_spec_from_value, x) flat_inputs = nest.flatten(x_shapes, expand_composites=False) flat_expected_inputs = nest.flatten(self.inputs, expand_composites=False) for (a, b) in zip(flat_inputs, flat_expected_inputs): nest.assert_same_structure(a, b, expand_composites=True) if y is not None: # Prepare self._sample_weight_modes. List with the same length as # model outputs. training_utils.prepare_sample_weight_modes(self._training_endpoints, self.sample_weight_mode) feed_output_names = self._feed_output_names feed_sample_weight_modes = self._sample_weight_modes if not self._is_graph_network: feed_output_shapes = None else: feed_output_shapes = self._feed_output_shapes # Standardize the outputs. y = training_utils.standardize_input_data( y, feed_output_names, # Don't enforce target shapes to match output shapes. # Precise checks will be run in `check_loss_and_target_compatibility`. shapes=None, check_batch_axis=False, # Don't enforce the batch size. exception_prefix='target') # Generate sample-wise weight values given the `sample_weight` and # `class_weight` arguments. sample_weights = training_utils.standardize_sample_weights( sample_weight, feed_output_names) class_weights = training_utils.standardize_class_weights( class_weight, feed_output_names) sample_weights = [ training_utils.standardize_weights(ref, sw, cw, mode) for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights, feed_sample_weight_modes) ] # Check that all arrays have the same length. if not self._distribution_strategy: training_utils.check_array_lengths(x, y, sample_weights) if self._is_graph_network and not self.run_eagerly: # Additional checks to avoid users mistakenly using improper loss fns. training_utils.check_loss_and_target_compatibility( y, self._feed_loss_fns, feed_output_shapes) # If sample weight mode has not been set and weights are None for all the # model outputs, return None (we do not create placeholders for # sample weights) so we do not want to feed any value. is_sample_weight_mode_set = any( s is not None for s in feed_sample_weight_modes) if (not is_sample_weight_mode_set and all(s is None for s in sample_weights)): sample_weights = None # If the list contains only None, return None else: y = [] sample_weights = None if self.stateful and batch_size and not is_dataset: # Check that for stateful networks, number of samples is a multiple # of the static batch size. if x[0].shape[0] % batch_size != 0: raise ValueError('In a stateful network, ' 'you should only pass inputs with ' 'a number of samples that can be ' 'divided by the batch size. Found: ' + str(x[0].shape[0]) + ' samples') # If dictionary inputs were provided, we return a dictionary as well. if dict_inputs and not isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)): x = dict(zip(feed_input_names, x)) return x, y, sample_weights def _build_model_with_inputs(self, inputs, targets): """Build the model (set model inputs/outputs), mainly for subclass model.""" processed_inputs = [] is_dict_inputs = False orig_inputs = inputs # We need to use `inputs` to set the model inputs. # If input data is a dataset iterator in graph mode or if it is an eager # iterator and only one batch of samples is required, we fetch the data # tensors from the iterator and then standardize them. if isinstance(inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)): inputs, targets, _ = training_utils.extract_tensors_from_dataset(inputs) # We type-check that `inputs` and `targets` are either single arrays # or lists of arrays, and extract a flat list of inputs from the passed # structure. training_utils.validate_input_types(inputs, orig_inputs) if isinstance(inputs, (list, tuple)): processed_inputs += list(inputs) elif isinstance(inputs, dict): is_dict_inputs = True keys = sorted(inputs.keys()) processed_inputs = [inputs[k] for k in keys] else: processed_inputs.append(inputs) # Now that we have a flat set of inputs, we make sure that none of them # are CompositeTensors or CompositeTensorValues of any type (or scipy # sparse arrays, which we treat as SparseTensor values). We cannot safely # infer input data from an arbitrary composite tensor, so we don't try - # users should explicitly add composite tensor inputs to their subclassed # models. for input_tensor in processed_inputs: if composite_tensor_utils.is_composite_or_composite_value(input_tensor): # TODO(b/132691975): Document subclass-model CT input handling. raise ValueError( 'All SparseTensor and RaggedTensor inputs must be explicitly ' 'declared using a keras.Input() with sparse=True or ragged=True. ' 'We found an undeclared input %s. For Sequential models, please ' 'add a keras.Input() as your first Layer. For subclassed models, ' 'please call self._set_inputs() on your input set, which you can ' 'create using keras.Input() for each input to your model.' % (input_tensor,)) # Build the model using the retrieved inputs (value or symbolic). # If values are generated from a dataset, then in symbolic-mode # placeholders will be created to match the value shapes. if isinstance(orig_inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2, iterator_ops.Iterator)): def create_tensor_spec(t): return tensor_spec.TensorSpec(t.shape, t.dtype) cast_inputs = nest.map_structure(create_tensor_spec, inputs) elif training_utils.has_tensors(inputs): cast_inputs = training_utils.cast_if_floating_dtype(inputs) else: cast_inputs = inputs self._set_inputs(cast_inputs) return processed_inputs, targets, is_dict_inputs def _compile_from_inputs(self, all_inputs, target, orig_inputs, orig_target): if target is not None: # We need to use `y` to set the model targets. if training_utils.has_tensors(target): target = training_utils.cast_if_floating_dtype_and_mismatch( target, self.outputs) training_utils.validate_input_types(target, orig_target, allow_dict=False, field_name='target') if isinstance(target, (list, tuple)): all_inputs += list(target) else: all_inputs.append(target) # Type check that all inputs are *either* value *or* symbolic. # TODO(fchollet): this check could be removed in Eager mode? if any(tensor_util.is_tensor(v) for v in all_inputs): if not all(tensor_util.is_tensor(v) for v in all_inputs): raise ValueError('Do not pass inputs that mix Numpy arrays and ' 'TensorFlow tensors. ' 'You passed: x=' + str(orig_inputs) + '; y=' + str(orig_target)) is_dataset = isinstance(orig_inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2, iterator_ops.Iterator)) if is_dataset or context.executing_eagerly(): target_tensors = None else: # Handle target tensors if any passed. if target is not None: if not isinstance(target, (list, tuple)): target = [target] target_tensors = [v for v in target if _is_symbolic_tensor(v)] else: target_tensors = None self.compile( optimizer=self.optimizer, loss=self.loss, metrics=self._compile_metrics, weighted_metrics=self._compile_weighted_metrics, loss_weights=self.loss_weights, target_tensors=target_tensors, sample_weight_mode=self.sample_weight_mode, run_eagerly=self.run_eagerly, experimental_run_tf_function=self._experimental_run_tf_function) # TODO(omalleyt): Consider changing to a more descriptive function name. def _set_inputs(self, inputs, outputs=None, training=None): """Set model's input and output specs based on the input data received. This is to be used for Model subclasses, which do not know at instantiation time what their inputs look like. Args: inputs: Single array, or list of arrays. The arrays could be placeholders, Numpy arrays, data tensors, or TensorSpecs. - if placeholders: the model is built on top of these placeholders, and we expect Numpy data to be fed for them when calling `fit`/etc. - if Numpy data or TensorShapes: we create placeholders matching the TensorShapes or shapes of the Numpy arrays. We expect Numpy data to be fed for these placeholders when calling `fit`/etc. - if data tensors: the model is built on top of these tensors. We do not expect any Numpy data to be provided when calling `fit`/etc. outputs: None, a data tensor, or a list of tensors. If None, the outputs will be determined by invoking `self.call()`, otherwise the provided value will be used. training: Boolean or None. Only relevant in symbolic mode. Specifies whether to build the model's graph in inference mode (False), training mode (True), or using the Keras learning phase (None). Raises: ValueError: If dict inputs are passed to a Sequential Model where the first layer isn't FeatureLayer. """ inputs = self._set_input_attrs(inputs) if outputs is None: kwargs = {} if self._expects_training_arg: # In V2 mode, feeding `training=None` is not allowed because any value # explicitly passed by the user is respected, even `None`.` if training is None and not ops.executing_eagerly_outside_functions(): training = K.learning_phase() if training is not None: kwargs['training'] = training try: outputs = self(inputs, **kwargs) except NotImplementedError: # This Model or a submodel is dynamic and hasn't overridden # `compute_output_shape`. outputs = None self._set_output_attrs(outputs) @trackable.no_automatic_dependency_tracking def _set_input_attrs(self, inputs): """Sets attributes related to the inputs of the Model.""" if self.inputs: raise ValueError('Model inputs are already set.') if self.__class__.__name__ == 'Sequential' and not self.built: if tensor_util.is_tensor(inputs): input_shape = (None,) + tuple(inputs.shape.as_list()[1:]) elif isinstance(inputs, tensor_shape.TensorShape): input_shape = (None,) + tuple(inputs.as_list()[1:]) elif isinstance(inputs, dict): # We assert that the first layer is a FeatureLayer. if not training_utils.is_feature_layer(self.layers[0]): raise ValueError('Passing a dictionary input to a Sequential Model ' 'which doesn\'t have FeatureLayer as the first layer' ' is an error.') input_shape = (None,) else: input_shape = (None,) + tuple(inputs.shape[1:]) self._build_input_shape = input_shape # On-the-fly setting of symbolic model inputs (either by using the tensor # provided, or by creating a placeholder if Numpy data was provided). model_inputs = training_utils.ModelInputs(inputs) inputs = model_inputs.get_symbolic_inputs() self.inputs = model_inputs.get_symbolic_inputs(return_single_as_list=True) self.input_names = model_inputs.get_input_names() self._feed_inputs = [] self._feed_input_names = [] self._feed_input_shapes = [] for k, v in model_inputs.as_dict(): if K.is_placeholder(v): self._feed_input_names.append(k) self._feed_inputs.append(v) self._feed_input_shapes.append(K.int_shape(v)) return inputs @trackable.no_automatic_dependency_tracking def _set_output_attrs(self, outputs): """Sets attributes related to the outputs of the Model.""" outputs = nest.flatten(outputs) self.outputs = outputs self.output_names = training_utils.generic_output_names(outputs) # TODO(scottzhu): Should we cleanup the self._training_endpoints here? self.built = True @property def _targets(self): """The output target tensors for the model.""" return [ e.training_target.target for e in self._training_endpoints if e.has_training_target() ] @property def _feed_targets(self): return [ e.training_target.target for e in self._training_endpoints if e.has_feedable_training_target() ] @property def _feed_output_names(self): return [ e.output_name for e in self._training_endpoints if e.has_feedable_training_target() ] @property def _feed_output_shapes(self): return [ e.feed_output_shape for e in self._training_endpoints if e.has_feedable_training_target() ] @property def _feed_loss_fns(self): return [ e.loss_fn for e in self._training_endpoints if e.has_feedable_training_target() ] @property def _loss_weights_list(self): return [e.loss_weight for e in self._training_endpoints] @property def _output_loss_metrics(self): if hasattr(self, '_training_endpoints'): return [ e.output_loss_metric for e in self._training_endpoints if e.output_loss_metric is not None ] return None @property def sample_weights(self): return [e.sample_weight for e in self._training_endpoints] @property def _sample_weight_modes(self): return [e.sample_weight_mode for e in self._training_endpoints] @property def _feed_sample_weights(self): return [e.sample_weight for e in self._training_endpoints if e.sample_weight is not None] def _maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode): """Maybe load initial epoch from ckpt considering possible worker recovery. Refer to tensorflow/python/keras/distribute/multi_worker_training_state.py for more information. Arguments: initial_epoch: The original initial_epoch user passes in in `fit()`. mode: The mode for running `model.fit()`. Returns: If the training is recovering from previous failure under multi-worker training setting, return the epoch the training is supposed to continue at. Otherwise, return the `initial_epoch` the user passes in. """ if hasattr(self, '_training_state'): return self._training_state.maybe_load_initial_epoch_from_ckpt( initial_epoch, mode) return initial_epoch def _get_training_eval_metrics(self): """Returns all the metrics that are to be reported. This includes the output loss metrics, compile metrics/weighted metrics, add_metric metrics. """ metrics = [] if getattr(self, '_output_loss_metrics', None) is not None: metrics.extend(self._output_loss_metrics) if hasattr(self, 'metrics'): metrics.extend(self.metrics) return metrics @property def _object_identifier(self): return '_tf_keras_model' @property def _tracking_metadata(self): metadata = json.loads(super(Model, self)._tracking_metadata) metadata.update(saving_utils.model_metadata( self, include_optimizer=True, require_config=False)) return json.dumps(metadata, default=serialization.get_json_type) def _assert_compile_was_called(self): # Checks whether `compile` has been called. If it has been called, # then the optimizer is set. This is different from whether the # model is compiled # (i.e. whether the model is built and its inputs/outputs are set). if not self.optimizer: raise RuntimeError('You must compile your model before ' 'training/testing. ' 'Use `model.compile(optimizer, loss)`.') def _in_multi_worker_mode(self): """Method to infer if this `Model` is working in multi-worker settings. Experimental. Signature and implementation are subject to change. Returns: Whether this model indicates it's working in multi-worker settings. """ # If the model was compiled under the scope of a `tf.distribute.Strategy', # `self._distribution_strategy` would have been set and model should infer # that as the used strategy (even if it's out of strategy scope already). strategy = self._distribution_strategy # Otherwise, use the strategy whose scope this is in. if not strategy and distribution_strategy_context.has_strategy(): strategy = distribution_strategy_context.get_strategy() return strategy and strategy._in_multi_worker_mode() # pylint: disable=protected-access class DistributedCallbackModel(Model): """Model that is used for callbacks with tf.distribute.Strategy.""" def __init__(self, model): super(DistributedCallbackModel, self).__init__() self.optimizer = model.optimizer def set_original_model(self, orig_model): self._original_model = orig_model def save_weights(self, filepath, overwrite=True, save_format=None): self._replicated_model.save_weights(filepath, overwrite=overwrite, save_format=save_format) def save(self, filepath, overwrite=True, include_optimizer=True): # save weights from the distributed model to the original model distributed_model_weights = self.get_weights() self._original_model.set_weights(distributed_model_weights) # TODO(anjalisridhar): Do we need to save the original model here? # Saving the first replicated model works as well. self._original_model.save(filepath, overwrite=True, include_optimizer=False) def load_weights(self, filepath, by_name=False): self._original_model.load_weights(filepath, by_name=False) # Copy the weights from the original model to each of the replicated models. orig_model_weights = self._original_model.get_weights() distributed_training_utils.set_weights( self._original_model._distribution_strategy, self, # pylint: disable=protected-access orig_model_weights) def __getattr__(self, item): # Whitelisted atttributes of the model that can be accessed by the user # during a callback. if item not in ('_setattr_tracking', '_layers'): logging.warning('You are accessing attribute ' + item + ' of the ' 'DistributedCallbackModel that may not have been set ' 'correctly.') return super(DistributedCallbackModel, self).__getattr__(item) class _TrainingEndpoint(object): """A container for the training output/target and related entities. In the case of model with multiple outputs, there is a one-to-one mapping between model output (y_pred), model target (y_true), loss, metrics etc. By unifying these entities into one class, different entity can access information between each other, rather than currently access different list of attributes of the model. """ def __init__(self, output, output_name, loss_fn, loss_weight=None, training_target=None, output_loss_metric=None, sample_weight=None, sample_weight_mode=None): """Initialize the _TrainingEndpoint. Note that the output and output_name should be stable as long as the model structure doesn't change. The training_target suppose to be mutable since the information is provided via `compile()` Args: output: the output tensor of the model. output_name: the unique name of the output tensor. loss_fn: the loss function for the output tensor. loss_weight: float, the weights for the loss. training_target: the _TrainingTarget for the model. output_loss_metric: the metric object for the loss function. sample_weight: the weights for how a sample is weighted during metric and loss calculation. Could be None. sample_weight_mode: string, 'temporal', 'samplewise' or None. The mode for how the sample_weight is populated. """ self._output = output self._output_name = output_name self._loss_fn = loss_fn self._loss_weight = loss_weight self._training_target = training_target self._output_loss_metric = output_loss_metric self._sample_weight = sample_weight self._sample_weight_mode = sample_weight_mode @property def output(self): return self._output @property def output_name(self): return self._output_name @property def shape(self): return K.int_shape(self.output) @property def loss_fn(self): return self._loss_fn @property def loss_weight(self): return self._loss_weight @loss_weight.setter def loss_weight(self, value): self._loss_weight = value @property def training_target(self): return self._training_target @training_target.setter def training_target(self, value): self._training_target = value def create_training_target(self, target, run_eagerly=False): """Create training_target instance and update the self.training_target. Note that the input target should just be a tensor or None, and corresponding training target will be created based on the output and loss_fn. Args: target: the target tensor for the current output. Could be None. run_eagerly: boolean, whether the model is in run_eagerly mode. Raises: ValueError if the training_target field for the current instance has already been populated. """ if self.has_training_target(): raise ValueError('The training_target field for the _TrainingEndpoint ' 'instance has already been populated') if run_eagerly: # When run_eagerly, the target tensor is ignored, and the None placeholder # is created instead. self.training_target = _TrainingTarget( None, feedable=True, skip_target_weights=False) return if self.should_skip_target(): self.training_target = _TrainingTarget(None) else: if target is not None and not K.is_placeholder(target): feedable = False skip_target_weights = True else: feedable = True skip_target_weights = False if target is None: target_dtype = losses.LABEL_DTYPES_FOR_LOSSES.get( self.loss_fn, K.dtype(self.output)) target = K.placeholder( ndim=len(self.shape), name=self.output_name + '_target', sparse=K.is_sparse(self.output), dtype=target_dtype) self.training_target = _TrainingTarget( target, feedable=feedable, skip_target_weights=skip_target_weights) @property def output_loss_metric(self): return self._output_loss_metric @output_loss_metric.setter def output_loss_metric(self, value): self._output_loss_metric = value @property def sample_weight(self): return self._sample_weight @sample_weight.setter def sample_weight(self, value): self._sample_weight = value @property def sample_weight_mode(self): return self._sample_weight_mode @sample_weight_mode.setter def sample_weight_mode(self, value): self._sample_weight_mode = value def should_skip_target(self): return self._loss_fn is None def should_skip_target_weights(self): return (self.should_skip_target() or self.training_target is None or self.training_target.skip_target_weights) def has_training_target(self): return self.training_target is not None def has_feedable_training_target(self): return (not self.should_skip_target() and self.training_target is not None and self.training_target.feedable) def loss_name(self): if self._loss_fn is not None: return self._output_name + '_loss' return None @property def feed_output_shape(self): """The output shape for the feedable target.""" if not self.has_feedable_training_target(): return None if ((isinstance(self.loss_fn, losses.LossFunctionWrapper) and self.loss_fn.fn == losses.sparse_categorical_crossentropy)) or ( isinstance(self.loss_fn, losses.SparseCategoricalCrossentropy)): if K.image_data_format() == 'channels_first': return (self.shape[0], 1) + self.shape[2:] else: return self.shape[:-1] + (1,) elif (not isinstance(self.loss_fn, losses.Loss) or (isinstance(self.loss_fn, losses.LossFunctionWrapper) and (getattr(losses, self.loss_fn.fn.__name__, None) is None))): # If the given loss is not an instance of the `Loss` class (custom # class) or if the loss function that is wrapped is not in the # `losses` module, then it is a user-defined loss and we make no # assumptions about it. return None else: return self.shape def sample_weights_mismatch(self): """Check if the sample weight and the mode match or not.""" # If there is a mismatch between sample weight mode and the placeholders # created, then recompile the sub-graphs that depend on sample weights. return ( (self.sample_weight_mode is not None and self.sample_weight is None) or (self.sample_weight_mode is None and self.sample_weight is not None)) def populate_sample_weight(self, sample_weight, sample_weight_mode): """Populate the sample weight and based on the sample weight mode.""" if (sample_weight is None and (self.should_skip_target_weights() or sample_weight_mode is None or context.executing_eagerly())): self._sample_weight = None return assert sample_weight_mode in ['temporal', 'samplewise'] if sample_weight_mode == 'temporal': default_value = [[1.]] shape = [None, None] else: # sample_weight_mode == 'samplewise' default_value = [1.] shape = [None] if sample_weight is not None: if not sample_weight.shape.is_compatible_with(shape): raise ValueError('Received sample weight with shape {}. Expected shape ' '{}.'.format(sample_weight.shape, shape)) self._sample_weight = sample_weight else: self._sample_weight = array_ops.placeholder_with_default( constant_op.constant(default_value, dtype=K.floatx()), shape=shape, name=self.output_name + '_sample_weights') class _TrainingTarget(object): """Container for a target tensor (y_true) and its metadata (shape, loss...). Arguments: target: A target tensor for the model. It may be `None` if the output is excluded from loss computation. It is still kept as None since each output of the model should have a corresponding target. If the target is None, the rest of the attributes will be None as well. feedable: Boolean, whether the target is feedable (requires data to be passed in `fit` or `train_on_batch`), or not (model compiled with `target_tensors` argument). skip_target_weights: Boolean, whether the target should be skipped during weights calculation. """ def __init__(self, target, feedable=False, skip_target_weights=True): self._target = target self._feedable = feedable self._skip_target_weights = skip_target_weights @property def target(self): return self._target @property def feedable(self): return self._feedable @property def skip_target_weights(self): return self._skip_target_weights def _is_symbolic_tensor(x): return tensor_util.is_tensor(x) and not isinstance(x, ops.EagerTensor) def _convert_scipy_sparse_tensor(value, expected_input): """Handle scipy sparse tensor conversions. This method takes a value 'value' and returns the proper conversion. If value is a scipy sparse tensor and the expected input is a dense tensor, we densify 'value'. If value is a scipy sparse tensor and the expected input is a TF SparseTensor, we convert 'value' to a SparseTensor. If 'value' is not a scipy sparse tensor, or scipy is not imported, we pass it through unchanged. Arguments: value: An object that may be a scipy sparse tensor expected_input: The expected input placeholder. Returns: The possibly-converted 'value'. """ if issparse is not None and issparse(value): if ops.is_dense_tensor_like(expected_input): return value.toarray() else: sparse_coo = value.tocoo() row, col = sparse_coo.row, sparse_coo.col data, shape = sparse_coo.data, sparse_coo.shape indices = np.concatenate((np.expand_dims(row, 1), np.expand_dims(col, 1)), 1) return sparse_tensor.SparseTensor(indices, data, shape) else: return value def _get_metrics_from_layers(layers): """Returns list of metrics from the given layers. This will not include the `compile` metrics of a model layer. Arguments: layers: List of layers. Returns: List of metrics. """ metrics = [] layers = trackable_layer_utils.filter_empty_layer_containers(layers) for layer in layers: if isinstance(layer, Model): # We cannot call 'metrics' on the model because we do not want to # include the metrics that were added in compile API of a nested model. metrics.extend(layer._metrics) # pylint: disable=protected-access metrics.extend(_get_metrics_from_layers(layer.layers)) else: metrics.extend(layer.metrics) return metrics
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/engine/training.py
# pylint: disable=g-bad-file-header # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Removes parts of a graph that are only needed for training. There are several common transformations that can be applied to GraphDefs created to train a model, that help reduce the amount of computation needed when the network is used only for inference. These include: - Removing training-only operations like checkpoint saving. - Stripping out parts of the graph that are never reached. - Removing debug operations like CheckNumerics. - Folding batch normalization ops into the pre-calculated weights. - Fusing common operations into unified versions. This script takes a frozen GraphDef file (where the weight variables have been converted into constants by the freeze_graph script) and outputs a new GraphDef with the optimizations applied. An example of command-line usage is: bazel build tensorflow/python/tools:optimize_for_inference && \ bazel-bin/tensorflow/python/tools/optimize_for_inference \ --input_graph=some_graph_def.pb \ --output_graph=/tmp/optimized_graph.pb \ --input_names=Mul \ --output_names=softmax """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math import re import numpy as np from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.python.framework import dtypes from tensorflow.python.framework import graph_util from tensorflow.python.framework import tensor_util from tensorflow.python.platform import flags as flags_lib from tensorflow.python.platform import tf_logging from tensorflow.python.tools import strip_unused_lib flags = flags_lib FLAGS = flags.FLAGS # Support folding two types of batch norm ops: # BatchNormWithGlobalNormalization and FusedBatchNorm. The two types only # differ in input order and attribute names, so we've collected their # differences up front. INPUT_ORDER = { # Order of inputs for BatchNormWithGlobalNormalization. "BatchNormWithGlobalNormalization": [ "conv_op", "mean_op", "var_op", "beta_op", "gamma_op" ], # Order of inputs for FusedBatchNorm. "FusedBatchNorm": ["conv_op", "gamma_op", "beta_op", "mean_op", "var_op"] } # Name of the attribute epsilon value is stored in. EPSILON_ATTR = { "BatchNormWithGlobalNormalization": "variance_epsilon", "FusedBatchNorm": "epsilon" } def optimize_for_inference(input_graph_def, input_node_names, output_node_names, placeholder_type_enum, toco_compatible=False): """Applies a series of inference optimizations on the input graph. Args: input_graph_def: A GraphDef containing a training model. input_node_names: A list of names of the nodes that are fed inputs during inference. output_node_names: A list of names of the nodes that produce the final results. placeholder_type_enum: The AttrValue enum for the placeholder data type, or a list that specifies one value per input node name. toco_compatible: Boolean, if True, only runs optimizations that result in TOCO compatible graph operations (default=False). Returns: An optimized version of the input graph. """ ensure_graph_is_valid(input_graph_def) optimized_graph_def = input_graph_def optimized_graph_def = strip_unused_lib.strip_unused( optimized_graph_def, input_node_names, output_node_names, placeholder_type_enum) optimized_graph_def = graph_util.remove_training_nodes( optimized_graph_def, output_node_names) optimized_graph_def = fold_batch_norms(optimized_graph_def) if not toco_compatible: optimized_graph_def = fuse_resize_and_conv(optimized_graph_def, output_node_names) ensure_graph_is_valid(optimized_graph_def) return optimized_graph_def def ensure_graph_is_valid(graph_def): """Makes sure that the graph is internally consistent. Checks basic properties of the graph def and raises an exception if there are input references to missing nodes, duplicated names, or other logic errors. Args: graph_def: Definition of a graph to be checked. Raises: ValueError: If the graph is incorrectly constructed. """ node_map = {} for node in graph_def.node: if node.name not in node_map: node_map[node.name] = node else: raise ValueError("Duplicate node names detected for ", node.name) for node in graph_def.node: for input_name in node.input: input_node_name = node_name_from_input(input_name) if input_node_name not in node_map: raise ValueError("Input for ", node.name, " not found: ", input_name) def node_name_from_input(node_name): """Strips off ports and other decorations to get the underlying node name.""" if node_name.startswith("^"): node_name = node_name[1:] m = re.search(r"(.*):\d+$", node_name) if m: node_name = m.group(1) return node_name def node_from_map(node_map, name): """Pulls a node def from a dictionary for a given name. Args: node_map: Dictionary containing an entry indexed by name for every node. name: Identifies the node we want to find. Returns: NodeDef of the node with the given name. Raises: ValueError: If the node isn't present in the dictionary. """ stripped_name = node_name_from_input(name) if stripped_name not in node_map: raise ValueError("No node named '%s' found in map." % name) return node_map[stripped_name] def values_from_const(node_def): """Extracts the values from a const NodeDef as a numpy ndarray. Args: node_def: Const NodeDef that has the values we want to access. Returns: Numpy ndarray containing the values. Raises: ValueError: If the node isn't a Const. """ if node_def.op != "Const": raise ValueError( "Node named '%s' should be a Const op for values_from_const." % node_def.name) input_tensor = node_def.attr["value"].tensor tensor_value = tensor_util.MakeNdarray(input_tensor) return tensor_value # Whether to scale by gamma after normalization. def scale_after_normalization(node): if node.op == "BatchNormWithGlobalNormalization": return node.attr["scale_after_normalization"].b return True def fold_batch_norms(input_graph_def): """Removes batch normalization ops by folding them into convolutions. Batch normalization during training has multiple dynamic parameters that are updated, but once the graph is finalized these become constants. That means there's an opportunity to reduce the computations down to a scale and addition, rather than the more expensive multiple ops, and even bake the scaling into the convolution weights. This function identifies the typical pattern of batch normalization subgraphs, and performs the transformation to fold the computations down into a simpler form. It currently only spots batch normalization that's performed by the BatchNormWithGlobalNormalization and FusedBatchNorm ops, and will need to be extended in the future to handle the newer style. Args: input_graph_def: A GraphDef containing a model. Returns: Modified graph with BN ops removed, and modified weights. Raises: ValueError: If the graph is badly formed with duplicate node names. """ input_node_map = {} for node in input_graph_def.node: if node.name not in input_node_map: input_node_map[node.name] = node else: raise ValueError("Duplicate node names detected for ", node.name) nodes_to_skip = {} new_ops = [] for node in input_graph_def.node: if node.op not in ("BatchNormWithGlobalNormalization", "FusedBatchNorm"): continue conv_op = node_from_map(input_node_map, node.input[INPUT_ORDER[node.op].index("conv_op")]) if conv_op.op != "Conv2D" and conv_op.op != "DepthwiseConv2dNative": tf_logging.warning("Didn't find expected Conv2D or DepthwiseConv2dNative" " input to '%s'" % node.name) continue weights_op = node_from_map(input_node_map, conv_op.input[1]) if weights_op.op != "Const": tf_logging.warning("Didn't find expected conv Constant input to '%s'," " found %s instead. Maybe because freeze_graph wasn't" " run first?" % (conv_op.name, weights_op)) continue weights = values_from_const(weights_op) if conv_op.op == "Conv2D": channel_count = weights.shape[3] elif conv_op.op == "DepthwiseConv2dNative": channel_count = weights.shape[2] * weights.shape[3] mean_op = node_from_map(input_node_map, node.input[INPUT_ORDER[node.op].index("mean_op")]) if mean_op.op != "Const": tf_logging.warning("Didn't find expected mean Constant input to '%s'," " found %s instead. Maybe because freeze_graph wasn't" " run first?" % (node.name, mean_op)) continue mean_value = values_from_const(mean_op) if mean_value.shape != (channel_count,): tf_logging.warning("Incorrect shape for mean, found %s, expected %s," " for node %s" % (str(mean_value.shape), str( (channel_count,)), node.name)) continue var_op = node_from_map(input_node_map, node.input[INPUT_ORDER[node.op].index("var_op")]) if var_op.op != "Const": tf_logging.warning("Didn't find expected var Constant input to '%s'," " found %s instead. Maybe because freeze_graph wasn't" " run first?" % (node.name, var_op)) continue var_value = values_from_const(var_op) if var_value.shape != (channel_count,): tf_logging.warning("Incorrect shape for var, found %s, expected %s," " for node %s" % (str(var_value.shape), str( (channel_count,)), node.name)) continue beta_op = node_from_map(input_node_map, node.input[INPUT_ORDER[node.op].index("beta_op")]) if beta_op.op != "Const": tf_logging.warning("Didn't find expected beta Constant input to '%s'," " found %s instead. Maybe because freeze_graph wasn't" " run first?" % (node.name, beta_op)) continue beta_value = values_from_const(beta_op) if beta_value.shape != (channel_count,): tf_logging.warning("Incorrect shape for beta, found %s, expected %s," " for node %s" % (str(beta_value.shape), str( (channel_count,)), node.name)) continue gamma_op = node_from_map(input_node_map, node.input[INPUT_ORDER[node.op].index("gamma_op")]) if gamma_op.op != "Const": tf_logging.warning("Didn't find expected gamma Constant input to '%s'," " found %s instead. Maybe because freeze_graph wasn't" " run first?" % (node.name, gamma_op)) continue gamma_value = values_from_const(gamma_op) if gamma_value.shape != (channel_count,): tf_logging.warning("Incorrect shape for gamma, found %s, expected %s," " for node %s" % (str(gamma_value.shape), str( (channel_count,)), node.name)) continue variance_epsilon_value = node.attr[EPSILON_ATTR[node.op]].f nodes_to_skip[node.name] = True nodes_to_skip[weights_op.name] = True nodes_to_skip[mean_op.name] = True nodes_to_skip[var_op.name] = True nodes_to_skip[beta_op.name] = True nodes_to_skip[gamma_op.name] = True nodes_to_skip[conv_op.name] = True if scale_after_normalization(node): scale_value = ( (1.0 / np.vectorize(math.sqrt)(var_value + variance_epsilon_value)) * gamma_value) else: scale_value = ( 1.0 / np.vectorize(math.sqrt)(var_value + variance_epsilon_value)) offset_value = (-mean_value * scale_value) + beta_value scaled_weights = np.copy(weights) it = np.nditer( scaled_weights, flags=["multi_index"], op_flags=["readwrite"]) if conv_op.op == "Conv2D": while not it.finished: current_scale = scale_value[it.multi_index[3]] it[0] *= current_scale it.iternext() elif conv_op.op == "DepthwiseConv2dNative": channel_multiplier = weights.shape[3] while not it.finished: current_scale = scale_value[it.multi_index[2] * channel_multiplier + it.multi_index[3]] it[0] *= current_scale it.iternext() scaled_weights_op = node_def_pb2.NodeDef() scaled_weights_op.op = "Const" scaled_weights_op.name = weights_op.name scaled_weights_op.attr["dtype"].CopyFrom(weights_op.attr["dtype"]) scaled_weights_op.attr["value"].CopyFrom( attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto( scaled_weights, weights.dtype.type, weights.shape))) new_conv_op = node_def_pb2.NodeDef() new_conv_op.CopyFrom(conv_op) offset_op = node_def_pb2.NodeDef() offset_op.op = "Const" offset_op.name = conv_op.name + "_bn_offset" offset_op.attr["dtype"].CopyFrom(mean_op.attr["dtype"]) offset_op.attr["value"].CopyFrom( attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto( offset_value, mean_value.dtype.type, offset_value.shape))) bias_add_op = node_def_pb2.NodeDef() bias_add_op.op = "BiasAdd" bias_add_op.name = node.name bias_add_op.attr["T"].CopyFrom(conv_op.attr["T"]) bias_add_op.attr["data_format"].CopyFrom(conv_op.attr["data_format"]) bias_add_op.input.extend([new_conv_op.name, offset_op.name]) new_ops.extend([scaled_weights_op, new_conv_op, offset_op, bias_add_op]) result_graph_def = graph_pb2.GraphDef() for node in input_graph_def.node: if node.name in nodes_to_skip: continue new_node = node_def_pb2.NodeDef() new_node.CopyFrom(node) result_graph_def.node.extend([new_node]) result_graph_def.node.extend(new_ops) return result_graph_def def fuse_resize_and_conv(input_graph_def, output_node_names): """Merges preceding resize and mirror pad ops into a specialized convolution. There's a common pattern of enlarging the input to a convolution using a resize operation, and also using MirrorPad to extend the boundaries to that zero edge pixels don't bleed inwards when convolving. This routine looks for that pattern of operations, and fuses them together into a Conv2DWithResizeOp. Args: input_graph_def: A GraphDef containing a model. output_node_names: A list of names of the nodes that produce the final results. Returns: Modified graph with resize and pad ops merged. Raises: ValueError: If the graph is badly formed with duplicate node names. """ input_node_map = {} for node in input_graph_def.node: if node.name not in input_node_map: input_node_map[node.name] = node else: raise ValueError("Duplicate node names detected for ", node.name) node_reference_count = collections.defaultdict(int) for node in input_graph_def.node: for input_name in node.input: stripped_name = node_name_from_input(input_name) node_reference_count[stripped_name] += 1 for output_name in output_node_names: node_reference_count[output_name] += 1 new_ops = [] for node in input_graph_def.node: if node.op != "Conv2D": continue conv_op = node input_op = node_from_map(input_node_map, conv_op.input[0]) if input_op.op == "MirrorPad": mirror_pad_op = input_op resize_op = node_from_map(input_node_map, mirror_pad_op.input[0]) if resize_op.op != "ResizeBilinear": resize_op = None else: mirror_pad_op = None if input_op.op == "ResizeBilinear": resize_op = input_op else: resize_op = None # There are no ops to be fused into the conv, so skip replacing this one. if not mirror_pad_op and not resize_op: continue # We're replacing this node, so make sure the old one is removed. node_reference_count[conv_op.name] = 0 if mirror_pad_op: node_reference_count[mirror_pad_op.name] -= 1 if resize_op: node_reference_count[resize_op.name] -= 1 fused_conv_op = node_def_pb2.NodeDef() if resize_op: fused_conv_op.op = "FusedResizeAndPadConv2D" else: fused_conv_op.op = "FusedPadConv2D" fused_conv_op.name = conv_op.name if mirror_pad_op: mirror_paddings_name = mirror_pad_op.input[1] mirror_paddings_mode = mirror_pad_op.attr["mode"] else: # If there was no MirrorPad op, then create settings that make the padding # stage of the fused operation a no-op. paddings_op = node_def_pb2.NodeDef() paddings_op.op = "Const" paddings_op.name = conv_op.name + "_dummy_paddings" paddings_op.attr["dtype"].CopyFrom( attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)) paddings_op.attr["value"].CopyFrom( attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto( [0, 0, 0, 0, 0, 0, 0, 0], dtypes.int32, [4, 2]))) new_ops.extend([paddings_op]) mirror_paddings_name = paddings_op.name mirror_paddings_mode = attr_value_pb2.AttrValue(s=b"REFLECT") if resize_op: fused_conv_op.input.extend([ resize_op.input[0], resize_op.input[1], mirror_paddings_name, conv_op.input[1] ]) fused_conv_op.attr["resize_align_corners"].CopyFrom( resize_op.attr["align_corners"]) else: fused_conv_op.input.extend( [mirror_pad_op.input[0], mirror_paddings_name, conv_op.input[1]]) fused_conv_op.attr["T"].CopyFrom(conv_op.attr["T"]) fused_conv_op.attr["mode"].CopyFrom(mirror_paddings_mode) fused_conv_op.attr["strides"].CopyFrom(conv_op.attr["strides"]) fused_conv_op.attr["padding"].CopyFrom(conv_op.attr["padding"]) new_ops.extend([fused_conv_op]) result_graph_def = graph_pb2.GraphDef() for node in input_graph_def.node: if node_reference_count[node.name] < 1: continue new_node = node_def_pb2.NodeDef() new_node.CopyFrom(node) result_graph_def.node.extend([new_node]) result_graph_def.node.extend(new_ops) return result_graph_def
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/optimize_for_inference_lib.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests the node stripping tool.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.core.framework import graph_pb2 from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import graph_io from tensorflow.python.framework import importer from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import math_ops from tensorflow.python.platform import test from tensorflow.python.tools import strip_unused_lib class StripUnusedTest(test_util.TensorFlowTestCase): def testStripUnused(self): input_graph_name = "input_graph.pb" output_graph_name = "output_graph.pb" # We'll create an input graph that has a single constant containing 1.0, # and that then multiplies it by 2. with ops.Graph().as_default(): constant_node = constant_op.constant(1.0, name="constant_node") wanted_input_node = math_ops.subtract(constant_node, 3.0, name="wanted_input_node") output_node = math_ops.multiply( wanted_input_node, 2.0, name="output_node") math_ops.add(output_node, 2.0, name="later_node") sess = session.Session() output = self.evaluate(output_node) self.assertNear(-4.0, output, 0.00001) graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name) # We save out the graph to disk, and then call the const conversion # routine. input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name) input_binary = False output_binary = True output_node_names = "output_node" output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name) def strip(input_node_names): strip_unused_lib.strip_unused_from_files(input_graph_path, input_binary, output_graph_path, output_binary, input_node_names, output_node_names, dtypes.float32.as_datatype_enum) with self.assertRaises(KeyError): strip("does_not_exist") with self.assertRaises(ValueError): strip("wanted_input_node:0") input_node_names = "wanted_input_node" strip(input_node_names) # Now we make sure the variable is now a constant, and that the graph still # produces the expected result. with ops.Graph().as_default(): output_graph_def = graph_pb2.GraphDef() with open(output_graph_path, "rb") as f: output_graph_def.ParseFromString(f.read()) _ = importer.import_graph_def(output_graph_def, name="") self.assertEqual(3, len(output_graph_def.node)) for node in output_graph_def.node: self.assertNotEqual("Add", node.op) self.assertNotEqual("Sub", node.op) if node.name == input_node_names: self.assertTrue("shape" in node.attr) with session.Session() as sess: input_node = sess.graph.get_tensor_by_name("wanted_input_node:0") output_node = sess.graph.get_tensor_by_name("output_node:0") output = sess.run(output_node, feed_dict={input_node: [10.0]}) self.assertNear(20.0, output, 0.00001) def testStripUnusedMultipleInputs(self): input_graph_name = "input_graph.pb" output_graph_name = "output_graph.pb" # We'll create an input graph that multiplies two input nodes. with ops.Graph().as_default(): constant_node1 = constant_op.constant(1.0, name="constant_node1") constant_node2 = constant_op.constant(2.0, name="constant_node2") input_node1 = math_ops.subtract(constant_node1, 3.0, name="input_node1") input_node2 = math_ops.subtract(constant_node2, 5.0, name="input_node2") output_node = math_ops.multiply( input_node1, input_node2, name="output_node") math_ops.add(output_node, 2.0, name="later_node") sess = session.Session() output = self.evaluate(output_node) self.assertNear(6.0, output, 0.00001) graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name) # We save out the graph to disk, and then call the const conversion # routine. input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name) input_binary = False input_node_names = "input_node1,input_node2" input_node_types = [ dtypes.float32.as_datatype_enum, dtypes.float32.as_datatype_enum ] output_binary = True output_node_names = "output_node" output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name) strip_unused_lib.strip_unused_from_files(input_graph_path, input_binary, output_graph_path, output_binary, input_node_names, output_node_names, input_node_types) # Now we make sure the variable is now a constant, and that the graph still # produces the expected result. with ops.Graph().as_default(): output_graph_def = graph_pb2.GraphDef() with open(output_graph_path, "rb") as f: output_graph_def.ParseFromString(f.read()) _ = importer.import_graph_def(output_graph_def, name="") self.assertEqual(3, len(output_graph_def.node)) for node in output_graph_def.node: self.assertNotEqual("Add", node.op) self.assertNotEqual("Sub", node.op) if node.name == input_node_names: self.assertTrue("shape" in node.attr) with session.Session() as sess: input_node1 = sess.graph.get_tensor_by_name("input_node1:0") input_node2 = sess.graph.get_tensor_by_name("input_node2:0") output_node = sess.graph.get_tensor_by_name("output_node:0") output = sess.run(output_node, feed_dict={input_node1: [10.0], input_node2: [-5.0]}) self.assertNear(-50.0, output, 0.00001) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/strip_unused_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SavedModel utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from google.protobuf import message from google.protobuf import text_format from tensorflow.core.protobuf import saved_model_pb2 from tensorflow.python.lib.io import file_io from tensorflow.python.saved_model import constants from tensorflow.python.util import compat def read_saved_model(saved_model_dir): """Reads the savedmodel.pb or savedmodel.pbtxt file containing `SavedModel`. Args: saved_model_dir: Directory containing the SavedModel file. Returns: A `SavedModel` protocol buffer. Raises: IOError: If the file does not exist, or cannot be successfully parsed. """ # Build the path to the SavedModel in pbtxt format. path_to_pbtxt = os.path.join( compat.as_bytes(saved_model_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT)) # Build the path to the SavedModel in pb format. path_to_pb = os.path.join( compat.as_bytes(saved_model_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB)) # Ensure that the SavedModel exists at either path. if not file_io.file_exists(path_to_pbtxt) and not file_io.file_exists( path_to_pb): raise IOError("SavedModel file does not exist at: %s" % saved_model_dir) # Parse the SavedModel protocol buffer. saved_model = saved_model_pb2.SavedModel() if file_io.file_exists(path_to_pb): try: file_content = file_io.FileIO(path_to_pb, "rb").read() saved_model.ParseFromString(file_content) return saved_model except message.DecodeError as e: raise IOError("Cannot parse file %s: %s." % (path_to_pb, str(e))) elif file_io.file_exists(path_to_pbtxt): try: file_content = file_io.FileIO(path_to_pbtxt, "rb").read() text_format.Merge(file_content.decode("utf-8"), saved_model) return saved_model except text_format.ParseError as e: raise IOError("Cannot parse file %s: %s." % (path_to_pbtxt, str(e))) else: raise IOError("SavedModel file does not exist at: %s/{%s|%s}" % (saved_model_dir, constants.SAVED_MODEL_FILENAME_PBTXT, constants.SAVED_MODEL_FILENAME_PB)) def get_saved_model_tag_sets(saved_model_dir): """Retrieves all the tag-sets available in the SavedModel. Args: saved_model_dir: Directory containing the SavedModel. Returns: String representation of all tag-sets in the SavedModel. """ saved_model = read_saved_model(saved_model_dir) all_tags = [] for meta_graph_def in saved_model.meta_graphs: all_tags.append(list(meta_graph_def.meta_info_def.tags)) return all_tags def get_meta_graph_def(saved_model_dir, tag_set): """Gets MetaGraphDef from SavedModel. Returns the MetaGraphDef for the given tag-set and SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect or execute. tag_set: Group of tag(s) of the MetaGraphDef to load, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. Raises: RuntimeError: An error when the given tag-set does not exist in the SavedModel. Returns: A MetaGraphDef corresponding to the tag-set. """ saved_model = read_saved_model(saved_model_dir) set_of_tags = set(tag_set.split(',')) for meta_graph_def in saved_model.meta_graphs: if set(meta_graph_def.meta_info_def.tags) == set_of_tags: return meta_graph_def raise RuntimeError('MetaGraphDef associated with tag-set ' + tag_set + ' could not be found in SavedModel')
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/saved_model_utils.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for print_selective_registration_header.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys from google.protobuf import text_format from tensorflow.core.framework import graph_pb2 from tensorflow.python.platform import gfile from tensorflow.python.platform import test from tensorflow.python.tools import selective_registration_header_lib # Note that this graph def is not valid to be loaded - its inputs are not # assigned correctly in all cases. GRAPH_DEF_TXT = """ node: { name: "node_1" op: "Reshape" input: [ "none", "none" ] device: "/cpu:0" attr: { key: "T" value: { type: DT_FLOAT } } } node: { name: "node_2" op: "MatMul" input: [ "none", "none" ] device: "/cpu:0" attr: { key: "T" value: { type: DT_FLOAT } } attr: { key: "transpose_a" value: { b: false } } attr: { key: "transpose_b" value: { b: false } } } node: { name: "node_3" op: "MatMul" input: [ "none", "none" ] device: "/cpu:0" attr: { key: "T" value: { type: DT_DOUBLE } } attr: { key: "transpose_a" value: { b: false } } attr: { key: "transpose_b" value: { b: false } } } """ # AccumulateNV2 is included because it should be included in the header despite # lacking a kernel (it's rewritten by AccumulateNV2RemovePass; see # core/common_runtime/accumulate_n_optimizer.cc. GRAPH_DEF_TXT_2 = """ node: { name: "node_4" op: "BiasAdd" input: [ "none", "none" ] device: "/cpu:0" attr: { key: "T" value: { type: DT_FLOAT } } } node: { name: "node_5" op: "AccumulateNV2" attr: { key: "T" value: { type: DT_INT32 } } attr: { key : "N" value: { i: 3 } } } """ class PrintOpFilegroupTest(test.TestCase): def setUp(self): _, self.script_name = os.path.split(sys.argv[0]) def WriteGraphFiles(self, graphs): fnames = [] for i, graph in enumerate(graphs): fname = os.path.join(self.get_temp_dir(), 'graph%s.pb' % i) with gfile.GFile(fname, 'wb') as f: f.write(graph.SerializeToString()) fnames.append(fname) return fnames def testGetOps(self): default_ops = 'NoOp:NoOp,_Recv:RecvOp,_Send:SendOp' graphs = [ text_format.Parse(d, graph_pb2.GraphDef()) for d in [GRAPH_DEF_TXT, GRAPH_DEF_TXT_2] ] ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels( 'rawproto', self.WriteGraphFiles(graphs), default_ops) matmul_prefix = '' self.assertListEqual( [ ('AccumulateNV2', None), # ('BiasAdd', 'BiasOp<CPUDevice, float>'), # ('MatMul', matmul_prefix + 'MatMulOp<CPUDevice, double, false >'), # ('MatMul', matmul_prefix + 'MatMulOp<CPUDevice, float, false >'), # ('NoOp', 'NoOp'), # ('Reshape', 'ReshapeOp'), # ('_Recv', 'RecvOp'), # ('_Send', 'SendOp'), # ], ops_and_kernels) graphs[0].node[0].ClearField('device') graphs[0].node[2].ClearField('device') ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels( 'rawproto', self.WriteGraphFiles(graphs), default_ops) self.assertListEqual( [ ('AccumulateNV2', None), # ('BiasAdd', 'BiasOp<CPUDevice, float>'), # ('MatMul', matmul_prefix + 'MatMulOp<CPUDevice, double, false >'), # ('MatMul', matmul_prefix + 'MatMulOp<CPUDevice, float, false >'), # ('NoOp', 'NoOp'), # ('Reshape', 'ReshapeOp'), # ('_Recv', 'RecvOp'), # ('_Send', 'SendOp'), # ], ops_and_kernels) def testAll(self): default_ops = 'all' graphs = [ text_format.Parse(d, graph_pb2.GraphDef()) for d in [GRAPH_DEF_TXT, GRAPH_DEF_TXT_2] ] ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels( 'rawproto', self.WriteGraphFiles(graphs), default_ops) header = selective_registration_header_lib.get_header_from_ops_and_kernels( ops_and_kernels, include_all_ops_and_kernels=True) self.assertListEqual( [ '// This file was autogenerated by %s' % self.script_name, '#ifndef OPS_TO_REGISTER', # '#define OPS_TO_REGISTER', # '#define SHOULD_REGISTER_OP(op) true', # '#define SHOULD_REGISTER_OP_KERNEL(clz) true', # '#define SHOULD_REGISTER_OP_GRADIENT true', # '#endif' ], header.split('\n')) self.assertListEqual( header.split('\n'), selective_registration_header_lib.get_header( self.WriteGraphFiles(graphs), 'rawproto', default_ops).split('\n')) def testGetSelectiveHeader(self): default_ops = '' graphs = [text_format.Parse(GRAPH_DEF_TXT_2, graph_pb2.GraphDef())] expected = """// This file was autogenerated by %s #ifndef OPS_TO_REGISTER #define OPS_TO_REGISTER namespace { constexpr const char* skip(const char* x) { return (*x) ? (*x == ' ' ? skip(x + 1) : x) : x; } constexpr bool isequal(const char* x, const char* y) { return (*skip(x) && *skip(y)) ? (*skip(x) == *skip(y) && isequal(skip(x) + 1, skip(y) + 1)) : (!*skip(x) && !*skip(y)); } template<int N> struct find_in { static constexpr bool f(const char* x, const char* const y[N]) { return isequal(x, y[0]) || find_in<N - 1>::f(x, y + 1); } }; template<> struct find_in<0> { static constexpr bool f(const char* x, const char* const y[]) { return false; } }; } // end namespace constexpr const char* kNecessaryOpKernelClasses[] = { "BiasOp<CPUDevice, float>", }; #define SHOULD_REGISTER_OP_KERNEL(clz) (find_in<sizeof(kNecessaryOpKernelClasses) / sizeof(*kNecessaryOpKernelClasses)>::f(clz, kNecessaryOpKernelClasses)) constexpr inline bool ShouldRegisterOp(const char op[]) { return false || isequal(op, "AccumulateNV2") || isequal(op, "BiasAdd") ; } #define SHOULD_REGISTER_OP(op) ShouldRegisterOp(op) #define SHOULD_REGISTER_OP_GRADIENT false #endif""" % self.script_name header = selective_registration_header_lib.get_header( self.WriteGraphFiles(graphs), 'rawproto', default_ops) print(header) self.assertListEqual(expected.split('\n'), header.split('\n')) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/print_selective_registration_header_test.py
# pylint: disable=g-bad-file-header # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.python.client.graph_util.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import importer from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import image_ops from tensorflow.python.ops import math_ops # pylint: disable=unused-import from tensorflow.python.ops import nn_ops from tensorflow.python.platform import test from tensorflow.python.tools import optimize_for_inference_lib class OptimizeForInferenceTest(test.TestCase): def create_node_def(self, op, name, inputs): new_node = node_def_pb2.NodeDef() new_node.op = op new_node.name = name for input_name in inputs: new_node.input.extend([input_name]) return new_node def create_constant_node_def(self, name, value, dtype, shape=None): node = self.create_node_def("Const", name, []) self.set_attr_dtype(node, "dtype", dtype) self.set_attr_tensor(node, "value", value, dtype, shape) return node def set_attr_dtype(self, node, key, value): node.attr[key].CopyFrom( attr_value_pb2.AttrValue(type=value.as_datatype_enum)) def set_attr_tensor(self, node, key, value, dtype, shape=None): node.attr[key].CopyFrom( attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto( value, dtype=dtype, shape=shape))) def testOptimizeForInference(self): self.maxDiff = 1000 unused_constant_name = "unused_constant" unconnected_add_name = "unconnected_add" a_constant_name = "a_constant" b_constant_name = "b_constant" a_check_name = "a_check" b_check_name = "b_check" a_identity_name = "a_identity" b_identity_name = "b_identity" add_name = "add" unused_output_add_name = "unused_output_add" graph_def = graph_pb2.GraphDef() unused_constant = self.create_constant_node_def( unused_constant_name, value=0, dtype=dtypes.float32, shape=[]) graph_def.node.extend([unused_constant]) unconnected_add_node = self.create_node_def( "Add", unconnected_add_name, [unused_constant_name, unused_constant_name]) self.set_attr_dtype(unconnected_add_node, "T", dtypes.float32) graph_def.node.extend([unconnected_add_node]) a_constant = self.create_constant_node_def( a_constant_name, value=1, dtype=dtypes.float32, shape=[]) graph_def.node.extend([a_constant]) a_check_node = self.create_node_def("CheckNumerics", a_check_name, [a_constant_name]) graph_def.node.extend([a_check_node]) a_identity_node = self.create_node_def( "Identity", a_identity_name, [a_constant_name, "^" + a_check_name]) graph_def.node.extend([a_identity_node]) b_constant = self.create_constant_node_def( b_constant_name, value=1, dtype=dtypes.float32, shape=[]) graph_def.node.extend([b_constant]) b_check_node = self.create_node_def("CheckNumerics", b_check_name, [b_constant_name]) graph_def.node.extend([b_check_node]) b_identity_node = self.create_node_def( "Identity", b_identity_name, [b_constant_name, "^" + b_check_name]) graph_def.node.extend([b_identity_node]) add_node = self.create_node_def("Add", add_name, [a_identity_name, b_identity_name]) self.set_attr_dtype(add_node, "T", dtypes.float32) graph_def.node.extend([add_node]) unused_output_add_node = self.create_node_def("Add", unused_output_add_name, [add_name, b_constant_name]) self.set_attr_dtype(unused_output_add_node, "T", dtypes.float32) graph_def.node.extend([unused_output_add_node]) expected_output = graph_pb2.GraphDef() a_constant = self.create_constant_node_def( a_constant_name, value=1, dtype=dtypes.float32, shape=[]) expected_output.node.extend([a_constant]) b_constant = self.create_constant_node_def( b_constant_name, value=1, dtype=dtypes.float32, shape=[]) expected_output.node.extend([b_constant]) add_node = self.create_node_def("Add", add_name, [a_constant_name, b_constant_name]) self.set_attr_dtype(add_node, "T", dtypes.float32) expected_output.node.extend([add_node]) output = optimize_for_inference_lib.optimize_for_inference( graph_def, [], [add_name], dtypes.float32.as_datatype_enum) self.assertProtoEquals(expected_output, output) @test_util.run_deprecated_v1 def testFoldBatchNorms(self): with self.cached_session() as sess: inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6] input_op = constant_op.constant( np.array(inputs), shape=[1, 1, 6, 2], dtype=dtypes.float32) weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4] weights_op = constant_op.constant( np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32) conv_op = nn_ops.conv2d( input_op, weights_op, [1, 1, 1, 1], padding="SAME", name="conv_op") mean_op = constant_op.constant( np.array([10, 20]), shape=[2], dtype=dtypes.float32) variance_op = constant_op.constant( np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32) beta_op = constant_op.constant( np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32) gamma_op = constant_op.constant( np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32) test_util.set_producer_version(ops.get_default_graph(), 8) gen_nn_ops._batch_norm_with_global_normalization( conv_op, mean_op, variance_op, beta_op, gamma_op, 0.00001, False, name="output") original_graph_def = sess.graph_def original_result = sess.run(["output:0"]) optimized_graph_def = optimize_for_inference_lib.fold_batch_norms( original_graph_def) with self.cached_session() as sess: _ = importer.import_graph_def( optimized_graph_def, input_map={}, name="optimized") optimized_result = sess.run(["optimized/output:0"]) self.assertAllClose(original_result, optimized_result) for node in optimized_graph_def.node: self.assertNotEqual("BatchNormWithGlobalNormalization", node.op) @test_util.run_deprecated_v1 def testFoldFusedBatchNorms(self): for data_format, use_gpu, conv2d_func in [ ("NHWC", False, nn_ops.conv2d), ("NCHW", True, nn_ops.conv2d), ("NHWC", False, nn_ops.depthwise_conv2d_native), ("NCHW", True, nn_ops.depthwise_conv2d_native) ]: with self.cached_session(use_gpu=use_gpu) as sess: inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6] input_op = constant_op.constant( np.array(inputs), shape=[1, 1, 6, 2] if data_format == "NHWC" else [1, 2, 1, 6], dtype=dtypes.float32) if conv2d_func == nn_ops.conv2d: weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4] weights_op = constant_op.constant( np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32) else: weights = [1, 2, 0.3, 0.4] weights_op = constant_op.constant( np.array(weights), shape=[1, 2, 2, 1], dtype=dtypes.float32) conv_op = conv2d_func( input_op, weights_op, [1, 1, 1, 1], padding="SAME", data_format=data_format, name="conv_op") mean_op = constant_op.constant( np.array([10, 20]), shape=[2], dtype=dtypes.float32) variance_op = constant_op.constant( np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32) beta_op = constant_op.constant( np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32) gamma_op = constant_op.constant( np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32) ops.get_default_graph().graph_def_versions.producer = 9 gen_nn_ops._fused_batch_norm( conv_op, gamma_op, beta_op, mean_op, variance_op, 0.00001, is_training=False, data_format=data_format, name="output") original_graph_def = sess.graph_def original_result = sess.run(["output:0"]) optimized_graph_def = optimize_for_inference_lib.fold_batch_norms( original_graph_def) _ = importer.import_graph_def( optimized_graph_def, input_map={}, name="optimized") optimized_result = sess.run(["optimized/output:0"]) self.assertAllClose( original_result, optimized_result, rtol=1e-04, atol=1e-06) for node in optimized_graph_def.node: self.assertNotEqual("FusedBatchNorm", node.op) @test_util.run_deprecated_v1 def testFuseResizePadAndConv(self): with self.cached_session() as sess: inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6] input_op = constant_op.constant( np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32) resize_op = image_ops.resize_bilinear( input_op, [12, 4], align_corners=False) pad_op = array_ops.pad(resize_op, [[0, 0], [1, 1], [2, 2], [0, 0]], mode="REFLECT") weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4] weights_op = constant_op.constant( np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32) nn_ops.conv2d( pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output") original_graph_def = sess.graph_def original_result = sess.run(["output:0"]) optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv( original_graph_def, ["output"]) with self.cached_session() as sess: _ = importer.import_graph_def( optimized_graph_def, input_map={}, name="optimized") optimized_result = sess.run(["optimized/output:0"]) self.assertAllClose(original_result, optimized_result) for node in optimized_graph_def.node: self.assertNotEqual("Conv2D", node.op) self.assertNotEqual("MirrorPad", node.op) self.assertNotEqual("ResizeBilinear", node.op) @test_util.run_deprecated_v1 def testFuseResizeAndConv(self): with self.cached_session() as sess: inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6] input_op = constant_op.constant( np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32) resize_op = image_ops.resize_bilinear( input_op, [12, 4], align_corners=False) weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4] weights_op = constant_op.constant( np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32) nn_ops.conv2d( resize_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output") original_graph_def = sess.graph_def original_result = sess.run(["output:0"]) optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv( original_graph_def, ["output"]) with self.cached_session() as sess: _ = importer.import_graph_def( optimized_graph_def, input_map={}, name="optimized") optimized_result = sess.run(["optimized/output:0"]) self.assertAllClose(original_result, optimized_result) for node in optimized_graph_def.node: self.assertNotEqual("Conv2D", node.op) self.assertNotEqual("MirrorPad", node.op) @test_util.run_deprecated_v1 def testFusePadAndConv(self): with self.cached_session() as sess: inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6] input_op = constant_op.constant( np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32) pad_op = array_ops.pad(input_op, [[0, 0], [1, 1], [2, 2], [0, 0]], mode="REFLECT") weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4] weights_op = constant_op.constant( np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32) nn_ops.conv2d( pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output") original_graph_def = sess.graph_def original_result = sess.run(["output:0"]) optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv( original_graph_def, ["output"]) with self.cached_session() as sess: _ = importer.import_graph_def( optimized_graph_def, input_map={}, name="optimized") optimized_result = sess.run(["optimized/output:0"]) self.assertAllClose(original_result, optimized_result) for node in optimized_graph_def.node: self.assertNotEqual("Conv2D", node.op) self.assertNotEqual("ResizeBilinear", node.op) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/optimize_for_inference_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A simple script for inspect checkpoint files.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import re import sys import numpy as np from tensorflow.python import pywrap_tensorflow from tensorflow.python.platform import app from tensorflow.python.platform import flags FLAGS = None def _count_total_params(reader, count_exclude_pattern=""): """Count total number of variables.""" var_to_shape_map = reader.get_variable_to_shape_map() # Filter out tensors that we don't want to count if count_exclude_pattern: regex_pattern = re.compile(count_exclude_pattern) new_var_to_shape_map = {} exclude_num_tensors = 0 exclude_num_params = 0 for v in var_to_shape_map: if regex_pattern.search(v): exclude_num_tensors += 1 exclude_num_params += np.prod(var_to_shape_map[v]) else: new_var_to_shape_map[v] = var_to_shape_map[v] var_to_shape_map = new_var_to_shape_map print("# Excluding %d tensors (%d params) that match %s when counting." % ( exclude_num_tensors, exclude_num_params, count_exclude_pattern)) var_sizes = [np.prod(var_to_shape_map[v]) for v in var_to_shape_map] return np.sum(var_sizes, dtype=int) def print_tensors_in_checkpoint_file(file_name, tensor_name, all_tensors, all_tensor_names=False, count_exclude_pattern=""): """Prints tensors in a checkpoint file. If no `tensor_name` is provided, prints the tensor names and shapes in the checkpoint file. If `tensor_name` is provided, prints the content of the tensor. Args: file_name: Name of the checkpoint file. tensor_name: Name of the tensor in the checkpoint file to print. all_tensors: Boolean indicating whether to print all tensors. all_tensor_names: Boolean indicating whether to print all tensor names. count_exclude_pattern: Regex string, pattern to exclude tensors when count. """ try: reader = pywrap_tensorflow.NewCheckpointReader(file_name) if all_tensors or all_tensor_names: var_to_shape_map = reader.get_variable_to_shape_map() for key in sorted(var_to_shape_map): print("tensor_name: ", key) if all_tensors: print(reader.get_tensor(key)) elif not tensor_name: print(reader.debug_string().decode("utf-8")) else: print("tensor_name: ", tensor_name) print(reader.get_tensor(tensor_name)) # Count total number of parameters print("# Total number of params: %d" % _count_total_params( reader, count_exclude_pattern=count_exclude_pattern)) except Exception as e: # pylint: disable=broad-except print(str(e)) if "corrupted compressed block contents" in str(e): print("It's likely that your checkpoint file has been compressed " "with SNAPPY.") if ("Data loss" in str(e) and any(e in file_name for e in [".index", ".meta", ".data"])): proposed_file = ".".join(file_name.split(".")[0:-1]) v2_file_error_template = """ It's likely that this is a V2 checkpoint and you need to provide the filename *prefix*. Try removing the '.' and extension. Try: inspect checkpoint --file_name = {}""" print(v2_file_error_template.format(proposed_file)) def parse_numpy_printoption(kv_str): """Sets a single numpy printoption from a string of the form 'x=y'. See documentation on numpy.set_printoptions() for details about what values x and y can take. x can be any option listed there other than 'formatter'. Args: kv_str: A string of the form 'x=y', such as 'threshold=100000' Raises: argparse.ArgumentTypeError: If the string couldn't be used to set any nump printoption. """ k_v_str = kv_str.split("=", 1) if len(k_v_str) != 2 or not k_v_str[0]: raise argparse.ArgumentTypeError("'%s' is not in the form k=v." % kv_str) k, v_str = k_v_str printoptions = np.get_printoptions() if k not in printoptions: raise argparse.ArgumentTypeError("'%s' is not a valid printoption." % k) v_type = type(printoptions[k]) if v_type is type(None): raise argparse.ArgumentTypeError( "Setting '%s' from the command line is not supported." % k) try: v = ( v_type(v_str) if v_type is not bool else flags.BooleanParser().parse(v_str)) except ValueError as e: raise argparse.ArgumentTypeError(e.message) np.set_printoptions(**{k: v}) def main(unused_argv): if not FLAGS.file_name: print("Usage: inspect_checkpoint --file_name=checkpoint_file_name " "[--tensor_name=tensor_to_print] " "[--all_tensors] " "[--all_tensor_names] " "[--printoptions]") sys.exit(1) else: print_tensors_in_checkpoint_file( FLAGS.file_name, FLAGS.tensor_name, FLAGS.all_tensors, FLAGS.all_tensor_names, count_exclude_pattern=FLAGS.count_exclude_pattern) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.register("type", "bool", lambda v: v.lower() == "true") parser.add_argument( "--file_name", type=str, default="", help="Checkpoint filename. " "Note, if using Checkpoint V2 format, file_name is the " "shared prefix between all files in the checkpoint.") parser.add_argument( "--tensor_name", type=str, default="", help="Name of the tensor to inspect") parser.add_argument( "--count_exclude_pattern", type=str, default="", help="Pattern to exclude tensors, e.g., from optimizers, when counting.") parser.add_argument( "--all_tensors", nargs="?", const=True, type="bool", default=False, help="If True, print the names and values of all the tensors.") parser.add_argument( "--all_tensor_names", nargs="?", const=True, type="bool", default=False, help="If True, print the names of all the tensors.") parser.add_argument( "--printoptions", nargs="*", type=parse_numpy_printoption, help="Argument for numpy.set_printoptions(), in the form 'k=v'.") FLAGS, unparsed = parser.parse_known_args() app.run(main=main, argv=[sys.argv[0]] + unparsed)
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/inspect_checkpoint.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Command-line interface to inspect and execute a graph in a SavedModel. For detailed usages and examples, please refer to: https://www.tensorflow.org/guide/saved_model#cli_to_inspect_and_execute_savedmodel """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import collections.abc as collections import ast import os import re import sys import warnings import numpy as np from six import integer_types from tensorflow.core.example import example_pb2 from tensorflow.core.framework import types_pb2 from tensorflow.python.client import session from tensorflow.python.debug.wrappers import local_cli_wrapper from tensorflow.python.framework import meta_graph as meta_graph_lib from tensorflow.python.framework import ops as ops_lib from tensorflow.python.framework import tensor_spec from tensorflow.python.lib.io import file_io from tensorflow.python.platform import app # pylint: disable=unused-import from tensorflow.python.saved_model import load from tensorflow.python.saved_model import loader from tensorflow.python.saved_model import save from tensorflow.python.tools import saved_model_utils # Set of ops to blacklist. _OP_BLACKLIST = set(['WriteFile', 'ReadFile', 'PrintV2']) def _show_tag_sets(saved_model_dir): """Prints the tag-sets stored in SavedModel directory. Prints all the tag-sets for MetaGraphs stored in SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect. """ tag_sets = saved_model_utils.get_saved_model_tag_sets(saved_model_dir) print('The given SavedModel contains the following tag-sets:') for tag_set in sorted(tag_sets): print(', '.join(sorted(tag_set))) def _show_signature_def_map_keys(saved_model_dir, tag_set): """Prints the keys for each SignatureDef in the SignatureDef map. Prints the list of SignatureDef keys from the SignatureDef map specified by the given tag-set and SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect. tag_set: Group of tag(s) of the MetaGraphDef to get SignatureDef map from, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. """ signature_def_map = get_signature_def_map(saved_model_dir, tag_set) print('The given SavedModel MetaGraphDef contains SignatureDefs with the ' 'following keys:') for signature_def_key in sorted(signature_def_map.keys()): print('SignatureDef key: \"%s\"' % signature_def_key) def _get_inputs_tensor_info_from_meta_graph_def(meta_graph_def, signature_def_key): """Gets TensorInfo for all inputs of the SignatureDef. Returns a dictionary that maps each input key to its TensorInfo for the given signature_def_key in the meta_graph_def Args: meta_graph_def: MetaGraphDef protocol buffer with the SignatureDef map to look up SignatureDef key. signature_def_key: A SignatureDef key string. Returns: A dictionary that maps input tensor keys to TensorInfos. """ return meta_graph_def.signature_def[signature_def_key].inputs def _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def, signature_def_key): """Gets TensorInfos for all outputs of the SignatureDef. Returns a dictionary that maps each output key to its TensorInfo for the given signature_def_key in the meta_graph_def. Args: meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefmap to look up signature_def_key. signature_def_key: A SignatureDef key string. Returns: A dictionary that maps output tensor keys to TensorInfos. """ return meta_graph_def.signature_def[signature_def_key].outputs def _show_inputs_outputs(saved_model_dir, tag_set, signature_def_key, indent=0): """Prints input and output TensorInfos. Prints the details of input and output TensorInfos for the SignatureDef mapped by the given signature_def_key. Args: saved_model_dir: Directory containing the SavedModel to inspect. tag_set: Group of tag(s) of the MetaGraphDef, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. signature_def_key: A SignatureDef key string. indent: How far (in increments of 2 spaces) to indent each line of output. """ meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set) inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def( meta_graph_def, signature_def_key) outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def( meta_graph_def, signature_def_key) indent_str = ' ' * indent def in_print(s): print(indent_str + s) in_print('The given SavedModel SignatureDef contains the following input(s):') for input_key, input_tensor in sorted(inputs_tensor_info.items()): in_print(' inputs[\'%s\'] tensor_info:' % input_key) _print_tensor_info(input_tensor, indent+1) in_print('The given SavedModel SignatureDef contains the following ' 'output(s):') for output_key, output_tensor in sorted(outputs_tensor_info.items()): in_print(' outputs[\'%s\'] tensor_info:' % output_key) _print_tensor_info(output_tensor, indent+1) in_print('Method name is: %s' % meta_graph_def.signature_def[signature_def_key].method_name) def _show_defined_functions(saved_model_dir): """Prints the callable concrete and polymorphic functions of the Saved Model. Args: saved_model_dir: Directory containing the SavedModel to inspect. """ meta_graphs = saved_model_utils.read_saved_model(saved_model_dir).meta_graphs has_object_graph_def = False for meta_graph_def in meta_graphs: has_object_graph_def |= meta_graph_def.HasField('object_graph_def') if not has_object_graph_def: return with ops_lib.Graph().as_default(): trackable_object = load.load(saved_model_dir) print('\nDefined Functions:', end='') functions = ( save._AugmentedGraphView(trackable_object) # pylint: disable=protected-access .list_functions(trackable_object)) functions = sorted(functions.items(), key=lambda x: x[0]) for name, function in functions: print('\n Function Name: \'%s\'' % name) concrete_functions = \ function._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access concrete_functions = sorted(concrete_functions, key=lambda x: x.name) for index, concrete_function in enumerate(concrete_functions, 1): args, kwargs = concrete_function.structured_input_signature print(' Option #%d' % index) print(' Callable with:') _print_args(args, indent=4) if kwargs: _print_args(kwargs, 'Named Argument', indent=4) def _print_args(arguments, argument_type='Argument', indent=0): """Formats and prints the argument of the concrete functions defined in the model. Args: arguments: Arguments to format print. argument_type: Type of arguments. indent: How far (in increments of 2 spaces) to indent each line of output. """ indent_str = ' ' * indent def _maybe_add_quotes(value): is_quotes = '\'' * isinstance(value, str) return is_quotes + str(value) + is_quotes def in_print(s, end='\n'): print(indent_str + s, end=end) for index, element in enumerate(arguments, 1): if indent == 4: in_print('%s #%d' % (argument_type, index)) if isinstance(element, tensor_spec.TensorSpec): print((indent + 1) * ' ' + '%s: %s' % (element.name, repr(element))) elif (isinstance(element, collections.Iterable) and not isinstance(element, dict)): in_print(' DType: %s' % type(element).__name__) in_print(' Value: [', end='') for value in element: print('%s' % _maybe_add_quotes(value), end=', ') print('\b\b]') elif isinstance(element, dict): in_print(' DType: %s' % type(element).__name__) in_print(' Value: {', end='') for (key, value) in element.items(): print('\'%s\': %s' % (str(key), _maybe_add_quotes(value)), end=', ') print('\b\b}') else: in_print(' DType: %s' % type(element).__name__) in_print(' Value: %s' % str(element)) def _print_tensor_info(tensor_info, indent=0): """Prints details of the given tensor_info. Args: tensor_info: TensorInfo object to be printed. indent: How far (in increments of 2 spaces) to indent each line output """ indent_str = ' ' * indent def in_print(s): print(indent_str + s) in_print(' dtype: ' + {value: key for (key, value) in types_pb2.DataType.items()}[tensor_info.dtype]) # Display shape as tuple. if tensor_info.tensor_shape.unknown_rank: shape = 'unknown_rank' else: dims = [str(dim.size) for dim in tensor_info.tensor_shape.dim] shape = ', '.join(dims) shape = '(' + shape + ')' in_print(' shape: ' + shape) in_print(' name: ' + tensor_info.name) def _show_all(saved_model_dir): """Prints tag-set, SignatureDef and Inputs/Outputs information in SavedModel. Prints all tag-set, SignatureDef and Inputs/Outputs information stored in SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect. """ tag_sets = saved_model_utils.get_saved_model_tag_sets(saved_model_dir) for tag_set in sorted(tag_sets): print("\nMetaGraphDef with tag-set: '%s' " "contains the following SignatureDefs:" % ', '.join(tag_set)) tag_set = ','.join(tag_set) signature_def_map = get_signature_def_map(saved_model_dir, tag_set) for signature_def_key in sorted(signature_def_map.keys()): print('\nsignature_def[\'' + signature_def_key + '\']:') _show_inputs_outputs(saved_model_dir, tag_set, signature_def_key, indent=1) _show_defined_functions(saved_model_dir) def get_meta_graph_def(saved_model_dir, tag_set): """DEPRECATED: Use saved_model_utils.get_meta_graph_def instead. Gets MetaGraphDef from SavedModel. Returns the MetaGraphDef for the given tag-set and SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect or execute. tag_set: Group of tag(s) of the MetaGraphDef to load, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. Raises: RuntimeError: An error when the given tag-set does not exist in the SavedModel. Returns: A MetaGraphDef corresponding to the tag-set. """ return saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set) def get_signature_def_map(saved_model_dir, tag_set): """Gets SignatureDef map from a MetaGraphDef in a SavedModel. Returns the SignatureDef map for the given tag-set in the SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect or execute. tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. Returns: A SignatureDef map that maps from string keys to SignatureDefs. """ meta_graph = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set) return meta_graph.signature_def def scan_meta_graph_def(meta_graph_def): """Scans meta_graph_def and reports if there are ops on blacklist. Print ops if they are on black list, or print success if no blacklisted ops found. Args: meta_graph_def: MetaGraphDef protocol buffer. """ all_ops_set = set( meta_graph_lib.ops_used_by_graph_def(meta_graph_def.graph_def)) blacklisted_ops = _OP_BLACKLIST & all_ops_set if blacklisted_ops: # TODO(yifeif): print more warnings print('MetaGraph with tag set %s contains the following blacklisted ops:' % meta_graph_def.meta_info_def.tags, blacklisted_ops) else: print('MetaGraph with tag set %s does not contain blacklisted ops.' % meta_graph_def.meta_info_def.tags) def run_saved_model_with_feed_dict(saved_model_dir, tag_set, signature_def_key, input_tensor_key_feed_dict, outdir, overwrite_flag, worker=None, init_tpu=False, tf_debug=False): """Runs SavedModel and fetch all outputs. Runs the input dictionary through the MetaGraphDef within a SavedModel specified by the given tag_set and SignatureDef. Also save the outputs to file if outdir is not None. Args: saved_model_dir: Directory containing the SavedModel to execute. tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. signature_def_key: A SignatureDef key string. input_tensor_key_feed_dict: A dictionary maps input keys to numpy ndarrays. outdir: A directory to save the outputs to. If the directory doesn't exist, it will be created. overwrite_flag: A boolean flag to allow overwrite output file if file with the same name exists. worker: If provided, the session will be run on the worker. Valid worker specification is a bns or gRPC path. init_tpu: If true, the TPU system will be initialized after the session is created. tf_debug: A boolean flag to use TensorFlow Debugger (TFDBG) to observe the intermediate Tensor values and runtime GraphDefs while running the SavedModel. Raises: ValueError: When any of the input tensor keys is not valid. RuntimeError: An error when output file already exists and overwrite is not enabled. """ # Get a list of output tensor names. meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set) # Re-create feed_dict based on input tensor name instead of key as session.run # uses tensor name. inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def( meta_graph_def, signature_def_key) # Check if input tensor keys are valid. for input_key_name in input_tensor_key_feed_dict.keys(): if input_key_name not in inputs_tensor_info: raise ValueError( '"%s" is not a valid input key. Please choose from %s, or use ' '--show option.' % (input_key_name, '"' + '", "'.join(inputs_tensor_info.keys()) + '"')) inputs_feed_dict = { inputs_tensor_info[key].name: tensor for key, tensor in input_tensor_key_feed_dict.items() } # Get outputs outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def( meta_graph_def, signature_def_key) # Sort to preserve order because we need to go from value to key later. output_tensor_keys_sorted = sorted(outputs_tensor_info.keys()) output_tensor_names_sorted = [ outputs_tensor_info[tensor_key].name for tensor_key in output_tensor_keys_sorted ] with session.Session(worker, graph=ops_lib.Graph()) as sess: if init_tpu: print('Initializing TPU System ...') # This is needed for freshly started worker, or if the job # restarts after a preemption. sess.run(tf.contrib.tpu.initialize_system()) loader.load(sess, tag_set.split(','), saved_model_dir) if tf_debug: sess = local_cli_wrapper.LocalCLIDebugWrapperSession(sess) outputs = sess.run(output_tensor_names_sorted, feed_dict=inputs_feed_dict) for i, output in enumerate(outputs): output_tensor_key = output_tensor_keys_sorted[i] print('Result for output key %s:\n%s' % (output_tensor_key, output)) # Only save if outdir is specified. if outdir: # Create directory if outdir does not exist if not os.path.isdir(outdir): os.makedirs(outdir) output_full_path = os.path.join(outdir, output_tensor_key + '.npy') # If overwrite not enabled and file already exist, error out if not overwrite_flag and os.path.exists(output_full_path): raise RuntimeError( 'Output file %s already exists. Add \"--overwrite\" to overwrite' ' the existing output files.' % output_full_path) np.save(output_full_path, output) print('Output %s is saved to %s' % (output_tensor_key, output_full_path)) def preprocess_inputs_arg_string(inputs_str): """Parses input arg into dictionary that maps input to file/variable tuple. Parses input string in the format of, for example, "input1=filename1[variable_name1],input2=filename2" into a dictionary looks like {'input_key1': (filename1, variable_name1), 'input_key2': (file2, None)} , which maps input keys to a tuple of file name and variable name(None if empty). Args: inputs_str: A string that specified where to load inputs. Inputs are separated by semicolons. * For each input key: '<input_key>=<filename>' or '<input_key>=<filename>[<variable_name>]' * The optional 'variable_name' key will be set to None if not specified. Returns: A dictionary that maps input keys to a tuple of file name and variable name. Raises: RuntimeError: An error when the given input string is in a bad format. """ input_dict = {} inputs_raw = inputs_str.split(';') for input_raw in filter(bool, inputs_raw): # skip empty strings # Format of input=filename[variable_name]' match = re.match(r'([^=]+)=([^\[\]]+)\[([^\[\]]+)\]$', input_raw) if match: input_dict[match.group(1)] = match.group(2), match.group(3) else: # Format of input=filename' match = re.match(r'([^=]+)=([^\[\]]+)$', input_raw) if match: input_dict[match.group(1)] = match.group(2), None else: raise RuntimeError( '--inputs "%s" format is incorrect. Please follow' '"<input_key>=<filename>", or' '"<input_key>=<filename>[<variable_name>]"' % input_raw) return input_dict def preprocess_input_exprs_arg_string(input_exprs_str, safe=True): """Parses input arg into dictionary that maps input key to python expression. Parses input string in the format of 'input_key=<python expression>' into a dictionary that maps each input_key to its python expression. Args: input_exprs_str: A string that specifies python expression for input keys. Each input is separated by semicolon. For each input key: 'input_key=<python expression>' safe: Whether to evaluate the python expression as literals or allow arbitrary calls (e.g. numpy usage). Returns: A dictionary that maps input keys to their values. Raises: RuntimeError: An error when the given input string is in a bad format. """ input_dict = {} for input_raw in filter(bool, input_exprs_str.split(';')): if '=' not in input_exprs_str: raise RuntimeError('--input_exprs "%s" format is incorrect. Please follow' '"<input_key>=<python expression>"' % input_exprs_str) input_key, expr = input_raw.split('=', 1) if safe: try: input_dict[input_key] = ast.literal_eval(expr) except: raise RuntimeError( f'Expression "{expr}" is not a valid python literal.') else: # ast.literal_eval does not work with numpy expressions input_dict[input_key] = eval(expr) # pylint: disable=eval-used return input_dict def preprocess_input_examples_arg_string(input_examples_str): """Parses input into dict that maps input keys to lists of tf.Example. Parses input string in the format of 'input_key1=[{feature_name: feature_list}];input_key2=[{feature_name:feature_list}];' into a dictionary that maps each input_key to its list of serialized tf.Example. Args: input_examples_str: A string that specifies a list of dictionaries of feature_names and their feature_lists for each input. Each input is separated by semicolon. For each input key: 'input=[{feature_name1: feature_list1, feature_name2:feature_list2}]' items in feature_list can be the type of float, int, long or str. Returns: A dictionary that maps input keys to lists of serialized tf.Example. Raises: ValueError: An error when the given tf.Example is not a list. """ input_dict = preprocess_input_exprs_arg_string(input_examples_str) for input_key, example_list in input_dict.items(): if not isinstance(example_list, list): raise ValueError( 'tf.Example input must be a list of dictionaries, but "%s" is %s' % (example_list, type(example_list))) input_dict[input_key] = [ _create_example_string(example) for example in example_list ] return input_dict def _create_example_string(example_dict): """Create a serialized tf.example from feature dictionary.""" example = example_pb2.Example() for feature_name, feature_list in example_dict.items(): if not isinstance(feature_list, list): raise ValueError('feature value must be a list, but %s: "%s" is %s' % (feature_name, feature_list, type(feature_list))) if isinstance(feature_list[0], float): example.features.feature[feature_name].float_list.value.extend( feature_list) elif isinstance(feature_list[0], str): example.features.feature[feature_name].bytes_list.value.extend( feature_list) elif isinstance(feature_list[0], integer_types): example.features.feature[feature_name].int64_list.value.extend( feature_list) else: raise ValueError( 'Type %s for value %s is not supported for tf.train.Feature.' % (type(feature_list[0]), feature_list[0])) return example.SerializeToString() def load_inputs_from_input_arg_string(inputs_str, input_exprs_str, input_examples_str): """Parses input arg strings and create inputs feed_dict. Parses '--inputs' string for inputs to be loaded from file, and parses '--input_exprs' string for inputs to be evaluated from python expression. '--input_examples' string for inputs to be created from tf.example feature dictionary list. Args: inputs_str: A string that specified where to load inputs. Each input is separated by semicolon. * For each input key: '<input_key>=<filename>' or '<input_key>=<filename>[<variable_name>]' * The optional 'variable_name' key will be set to None if not specified. * File specified by 'filename' will be loaded using numpy.load. Inputs can be loaded from only .npy, .npz or pickle files. * The "[variable_name]" key is optional depending on the input file type as descripted in more details below. When loading from a npy file, which always contains a numpy ndarray, the content will be directly assigned to the specified input tensor. If a variable_name is specified, it will be ignored and a warning will be issued. When loading from a npz zip file, user can specify which variable within the zip file to load for the input tensor inside the square brackets. If nothing is specified, this function will check that only one file is included in the zip and load it for the specified input tensor. When loading from a pickle file, if no variable_name is specified in the square brackets, whatever that is inside the pickle file will be passed to the specified input tensor, else SavedModel CLI will assume a dictionary is stored in the pickle file and the value corresponding to the variable_name will be used. input_exprs_str: A string that specifies python expressions for inputs. * In the format of: '<input_key>=<python expression>'. * numpy module is available as np. input_examples_str: A string that specifies tf.Example with dictionary. * In the format of: '<input_key>=<[{feature:value list}]>' Returns: A dictionary that maps input tensor keys to numpy ndarrays. Raises: RuntimeError: An error when a key is specified, but the input file contains multiple numpy ndarrays, none of which matches the given key. RuntimeError: An error when no key is specified, but the input file contains more than one numpy ndarrays. """ tensor_key_feed_dict = {} inputs = preprocess_inputs_arg_string(inputs_str) input_exprs = preprocess_input_exprs_arg_string(input_exprs_str) input_examples = preprocess_input_examples_arg_string(input_examples_str) for input_tensor_key, (filename, variable_name) in inputs.items(): data = np.load(file_io.FileIO(filename, mode='rb'), allow_pickle=True) # When a variable_name key is specified for the input file if variable_name: # if file contains a single ndarray, ignore the input name if isinstance(data, np.ndarray): warnings.warn( 'Input file %s contains a single ndarray. Name key \"%s\" ignored.' % (filename, variable_name)) tensor_key_feed_dict[input_tensor_key] = data else: if variable_name in data: tensor_key_feed_dict[input_tensor_key] = data[variable_name] else: raise RuntimeError( 'Input file %s does not contain variable with name \"%s\".' % (filename, variable_name)) # When no key is specified for the input file. else: # Check if npz file only contains a single numpy ndarray. if isinstance(data, np.lib.npyio.NpzFile): variable_name_list = data.files if len(variable_name_list) != 1: raise RuntimeError( 'Input file %s contains more than one ndarrays. Please specify ' 'the name of ndarray to use.' % filename) tensor_key_feed_dict[input_tensor_key] = data[variable_name_list[0]] else: tensor_key_feed_dict[input_tensor_key] = data # When input is a python expression: for input_tensor_key, py_expr_evaluated in input_exprs.items(): if input_tensor_key in tensor_key_feed_dict: warnings.warn( 'input_key %s has been specified with both --inputs and --input_exprs' ' options. Value in --input_exprs will be used.' % input_tensor_key) tensor_key_feed_dict[input_tensor_key] = py_expr_evaluated # When input is a tf.Example: for input_tensor_key, example in input_examples.items(): if input_tensor_key in tensor_key_feed_dict: warnings.warn( 'input_key %s has been specified in multiple options. Value in ' '--input_examples will be used.' % input_tensor_key) tensor_key_feed_dict[input_tensor_key] = example return tensor_key_feed_dict def show(args): """Function triggered by show command. Args: args: A namespace parsed from command line. """ # If all tag is specified, display all information. if args.all: _show_all(args.dir) else: # If no tag is specified, display all tag_set, if no signaure_def key is # specified, display all SignatureDef keys, else show input output tensor # information corresponding to the given SignatureDef key if args.tag_set is None: _show_tag_sets(args.dir) else: if args.signature_def is None: _show_signature_def_map_keys(args.dir, args.tag_set) else: _show_inputs_outputs(args.dir, args.tag_set, args.signature_def) def run(args): """Function triggered by run command. Args: args: A namespace parsed from command line. Raises: AttributeError: An error when neither --inputs nor --input_exprs is passed to run command. """ if not args.inputs and not args.input_exprs and not args.input_examples: raise AttributeError( 'At least one of --inputs, --input_exprs or --input_examples must be ' 'required') tensor_key_feed_dict = load_inputs_from_input_arg_string( args.inputs, args.input_exprs, args.input_examples) run_saved_model_with_feed_dict(args.dir, args.tag_set, args.signature_def, tensor_key_feed_dict, args.outdir, args.overwrite, worker=args.worker, init_tpu=args.init_tpu, tf_debug=args.tf_debug) def scan(args): """Function triggered by scan command. Args: args: A namespace parsed from command line. """ if args.tag_set: scan_meta_graph_def( saved_model_utils.get_meta_graph_def(args.dir, args.tag_set)) else: saved_model = saved_model_utils.read_saved_model(args.dir) for meta_graph_def in saved_model.meta_graphs: scan_meta_graph_def(meta_graph_def) def convert_with_tensorrt(args): """Function triggered by 'convert tensorrt' command. Args: args: A namespace parsed from command line. """ # Import here instead of at top, because this will crash if TensorRT is # not installed from tensorflow.contrib import tensorrt # pylint: disable=g-import-not-at-top tensorrt.create_inference_graph( None, None, max_batch_size=args.max_batch_size, max_workspace_size_bytes=args.max_workspace_size_bytes, precision_mode=args.precision_mode, minimum_segment_size=args.minimum_segment_size, is_dynamic_op=args.is_dynamic_op, input_saved_model_dir=args.dir, input_saved_model_tags=args.tag_set.split(','), output_saved_model_dir=args.output_dir) def create_parser(): """Creates a parser that parse the command line arguments. Returns: A namespace parsed from command line arguments. """ parser = argparse.ArgumentParser( description='saved_model_cli: Command-line interface for SavedModel') parser.add_argument('-v', '--version', action='version', version='0.1.0') subparsers = parser.add_subparsers( title='commands', description='valid commands', help='additional help') # show command show_msg = ( 'Usage examples:\n' 'To show all tag-sets in a SavedModel:\n' '$saved_model_cli show --dir /tmp/saved_model\n\n' 'To show all available SignatureDef keys in a ' 'MetaGraphDef specified by its tag-set:\n' '$saved_model_cli show --dir /tmp/saved_model --tag_set serve\n\n' 'For a MetaGraphDef with multiple tags in the tag-set, all tags must be ' 'passed in, separated by \';\':\n' '$saved_model_cli show --dir /tmp/saved_model --tag_set serve,gpu\n\n' 'To show all inputs and outputs TensorInfo for a specific' ' SignatureDef specified by the SignatureDef key in a' ' MetaGraph.\n' '$saved_model_cli show --dir /tmp/saved_model --tag_set serve' ' --signature_def serving_default\n\n' 'To show all available information in the SavedModel:\n' '$saved_model_cli show --dir /tmp/saved_model --all') parser_show = subparsers.add_parser( 'show', description=show_msg, formatter_class=argparse.RawTextHelpFormatter) parser_show.add_argument( '--dir', type=str, required=True, help='directory containing the SavedModel to inspect') parser_show.add_argument( '--all', action='store_true', help='if set, will output all information in given SavedModel') parser_show.add_argument( '--tag_set', type=str, default=None, help='tag-set of graph in SavedModel to show, separated by \',\'') parser_show.add_argument( '--signature_def', type=str, default=None, metavar='SIGNATURE_DEF_KEY', help='key of SignatureDef to display input(s) and output(s) for') parser_show.set_defaults(func=show) # run command run_msg = ('Usage example:\n' 'To run input tensors from files through a MetaGraphDef and save' ' the output tensors to files:\n' '$saved_model_cli show --dir /tmp/saved_model --tag_set serve \\\n' ' --signature_def serving_default \\\n' ' --inputs input1_key=/tmp/124.npz[x],input2_key=/tmp/123.npy ' '\\\n' ' --input_exprs \'input3_key=np.ones(2)\' \\\n' ' --input_examples ' '\'input4_key=[{"id":[26],"weights":[0.5, 0.5]}]\' \\\n' ' --outdir=/out\n\n' 'For more information about input file format, please see:\n' 'https://www.tensorflow.org/guide/saved_model_cli\n') parser_run = subparsers.add_parser( 'run', description=run_msg, formatter_class=argparse.RawTextHelpFormatter) parser_run.add_argument( '--dir', type=str, required=True, help='directory containing the SavedModel to execute') parser_run.add_argument( '--tag_set', type=str, required=True, help='tag-set of graph in SavedModel to load, separated by \',\'') parser_run.add_argument( '--signature_def', type=str, required=True, metavar='SIGNATURE_DEF_KEY', help='key of SignatureDef to run') msg = ('Loading inputs from files, in the format of \'<input_key>=<filename>,' ' or \'<input_key>=<filename>[<variable_name>]\', separated by \';\'.' ' The file format can only be from .npy, .npz or pickle.') parser_run.add_argument('--inputs', type=str, default='', help=msg) msg = ('Specifying inputs by python expressions, in the format of' ' "<input_key>=\'<python expression>\'", separated by \';\'. ' 'numpy module is available as \'np\'. Please note that the expression ' 'will be evaluated as-is, and is susceptible to code injection. ' 'When this is set, the value will override duplicate input keys from ' '--inputs option.') parser_run.add_argument('--input_exprs', type=str, default='', help=msg) msg = ( 'Specifying tf.Example inputs as list of dictionaries. For example: ' '<input_key>=[{feature0:value_list,feature1:value_list}]. Use ";" to ' 'separate input keys. Will override duplicate input keys from --inputs ' 'and --input_exprs option.') parser_run.add_argument('--input_examples', type=str, default='', help=msg) parser_run.add_argument( '--outdir', type=str, default=None, help='if specified, output tensor(s) will be saved to given directory') parser_run.add_argument( '--overwrite', action='store_true', help='if set, output file will be overwritten if it already exists.') parser_run.add_argument( '--tf_debug', action='store_true', help='if set, will use TensorFlow Debugger (tfdbg) to watch the ' 'intermediate Tensors and runtime GraphDefs while running the ' 'SavedModel.') parser_run.add_argument( '--worker', type=str, default=None, help='if specified, a Session will be run on the worker. ' 'Valid worker specification is a bns or gRPC path.') parser_run.add_argument( '--init_tpu', action='store_true', default=None, help='if specified, tpu.initialize_system will be called on the Session. ' 'This option should be only used if the worker is a TPU job.') parser_run.set_defaults(func=run) # scan command scan_msg = ('Usage example:\n' 'To scan for blacklisted ops in SavedModel:\n' '$saved_model_cli scan --dir /tmp/saved_model\n' 'To scan a specific MetaGraph, pass in --tag_set\n') parser_scan = subparsers.add_parser( 'scan', description=scan_msg, formatter_class=argparse.RawTextHelpFormatter) parser_scan.add_argument( '--dir', type=str, required=True, help='directory containing the SavedModel to execute') parser_scan.add_argument( '--tag_set', type=str, help='tag-set of graph in SavedModel to scan, separated by \',\'') parser_scan.set_defaults(func=scan) # convert command convert_msg = ('Usage example:\n' 'To convert the SavedModel to one that have TensorRT ops:\n' '$saved_model_cli convert \\\n' ' --dir /tmp/saved_model \\\n' ' --tag_set serve \\\n' ' --output_dir /tmp/saved_model_trt \\\n' ' tensorrt \n') parser_convert = subparsers.add_parser( 'convert', description=convert_msg, formatter_class=argparse.RawTextHelpFormatter) parser_convert.add_argument( '--dir', type=str, required=True, help='directory containing the SavedModel to convert') parser_convert.add_argument( '--output_dir', type=str, required=True, help='output directory for the converted SavedModel') parser_convert.add_argument( '--tag_set', type=str, required=True, help='tag-set of graph in SavedModel to convert, separated by \',\'') convert_subparsers = parser_convert.add_subparsers( title='conversion methods', description='valid conversion methods', help='the conversion to run with the SavedModel') parser_convert_with_tensorrt = convert_subparsers.add_parser( 'tensorrt', description='Convert the SavedModel with Tensorflow-TensorRT integration', formatter_class=argparse.RawTextHelpFormatter) parser_convert_with_tensorrt.add_argument( '--max_batch_size', type=int, default=1, help='max size for the input batch') parser_convert_with_tensorrt.add_argument( '--max_workspace_size_bytes', type=int, default=2 << 20, help=('the maximum GPU temporary memory which the TRT engine can use at ' 'execution time')) parser_convert_with_tensorrt.add_argument( '--precision_mode', type=str, default='FP32', help='one of FP32, FP16 and INT8') parser_convert_with_tensorrt.add_argument( '--minimum_segment_size', type=int, default=3, help=('the minimum number of nodes required for a subgraph to be replaced' 'in a TensorRT node')) parser_convert_with_tensorrt.add_argument( '--is_dynamic_op', type=bool, default=False, help=('whether to generate dynamic TRT ops which will build the TRT ' 'network and engine at run time')) parser_convert_with_tensorrt.set_defaults(func=convert_with_tensorrt) return parser def main(): parser = create_parser() args = parser.parse_args() if not hasattr(args, 'func'): parser.error('too few arguments') args.func(args) if __name__ == '__main__': sys.exit(main())
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/saved_model_cli.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ================================ """Imports a protobuf model as a graph in Tensorboard.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys from tensorflow.core.framework import graph_pb2 from tensorflow.python.client import session from tensorflow.python.framework import importer from tensorflow.python.framework import ops from tensorflow.python.platform import app from tensorflow.python.platform import gfile from tensorflow.python.summary import summary # Try importing TensorRT ops if available # TODO(aaroey): ideally we should import everything from contrib, but currently # tensorrt module would cause build errors when being imported in # tensorflow/contrib/__init__.py. Fix it. # pylint: disable=unused-import,g-import-not-at-top,wildcard-import try: from tensorflow.contrib.tensorrt.ops.gen_trt_engine_op import * except ImportError: pass # Try importing Horovod ops if available try: import horovod.tensorflow except ImportError: pass # pylint: enable=unused-import,g-import-not-at-top,wildcard-import def import_to_tensorboard(model_dir, log_dir): """View an imported protobuf model (`.pb` file) as a graph in Tensorboard. Args: model_dir: The location of the protobuf (`pb`) model to visualize log_dir: The location for the Tensorboard log to begin visualization from. Usage: Call this function with your model location and desired log directory. Launch Tensorboard by pointing it to the log directory. View your imported `.pb` model as a graph. """ with session.Session(graph=ops.Graph()) as sess: with gfile.GFile(model_dir, "rb") as f: graph_def = graph_pb2.GraphDef() graph_def.ParseFromString(f.read()) importer.import_graph_def(graph_def, name='') pb_visual_writer = summary.FileWriter(log_dir) pb_visual_writer.add_graph(sess.graph) print("Model Imported. Visualize by running: " "tensorboard --logdir={}".format(log_dir)) def main(unused_args): import_to_tensorboard(FLAGS.model_dir, FLAGS.log_dir) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.register("type", "bool", lambda v: v.lower() == "true") parser.add_argument( "--model_dir", type=str, default="", required=True, help="The location of the protobuf (\'pb\') model to visualize.") parser.add_argument( "--log_dir", type=str, default="", required=True, help="The location for the Tensorboard log to begin visualization from.") FLAGS, unparsed = parser.parse_known_args() app.run(main=main, argv=[sys.argv[0]] + unparsed)
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/import_pb_to_tensorboard.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helper functions for modules.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os def get_parent_dir(module): return os.path.abspath(os.path.join(os.path.dirname(module.__file__), ".."))
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/module_util.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Converts checkpoint variables into Const ops in a standalone GraphDef file. This script is designed to take a GraphDef proto, a SaverDef proto, and a set of variable values stored in a checkpoint file, and output a GraphDef with all of the variable ops converted into const ops containing the values of the variables. It's useful to do this when we need to load a single file in C++, especially in environments like mobile or embedded where we may not have access to the RestoreTensor ops and file loading calls that they rely on. An example of command-line usage is: bazel build tensorflow/python/tools:freeze_graph && \ bazel-bin/tensorflow/python/tools/freeze_graph \ --input_graph=some_graph_def.pb \ --input_checkpoint=model.ckpt-8361242 \ --output_graph=/tmp/frozen_graph.pb --output_node_names=softmax You can also look at freeze_graph_test.py for an example of how to use it. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import re import sys from google.protobuf import text_format from tensorflow.core.framework import graph_pb2 from tensorflow.core.protobuf import saver_pb2 from tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef from tensorflow.python import pywrap_tensorflow from tensorflow.python.client import session from tensorflow.python.framework import graph_util from tensorflow.python.framework import importer from tensorflow.python.platform import app from tensorflow.python.platform import gfile from tensorflow.python.saved_model import loader from tensorflow.python.saved_model import tag_constants from tensorflow.python.tools import saved_model_utils from tensorflow.python.training import checkpoint_management from tensorflow.python.training import saver as saver_lib def _has_no_variables(sess): """Determines if the graph has any variables. Args: sess: TensorFlow Session. Returns: Bool. """ for op in sess.graph.get_operations(): if op.type.startswith("Variable") or op.type.endswith("VariableOp"): return False return True def freeze_graph_with_def_protos(input_graph_def, input_saver_def, input_checkpoint, output_node_names, restore_op_name, filename_tensor_name, output_graph, clear_devices, initializer_nodes, variable_names_whitelist="", variable_names_blacklist="", input_meta_graph_def=None, input_saved_model_dir=None, saved_model_tags=None, checkpoint_version=saver_pb2.SaverDef.V2): """Converts all variables in a graph and checkpoint into constants. Args: input_graph_def: A `GraphDef`. input_saver_def: A `SaverDef` (optional). input_checkpoint: The prefix of a V1 or V2 checkpoint, with V2 taking priority. Typically the result of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or V1/V2. output_node_names: The name(s) of the output nodes, comma separated. restore_op_name: Unused. filename_tensor_name: Unused. output_graph: String where to write the frozen `GraphDef`. clear_devices: A Bool whether to remove device specifications. initializer_nodes: Comma separated string of initializer nodes to run before freezing. variable_names_whitelist: The set of variable names to convert (optional, by default, all variables are converted). variable_names_blacklist: The set of variable names to omit converting to constants (optional). input_meta_graph_def: A `MetaGraphDef` (optional), input_saved_model_dir: Path to the dir with TensorFlow 'SavedModel' file and variables (optional). saved_model_tags: Group of comma separated tag(s) of the MetaGraphDef to load, in string format (optional). checkpoint_version: Tensorflow variable file format (saver_pb2.SaverDef.V1 or saver_pb2.SaverDef.V2) Returns: Location of the output_graph_def. """ del restore_op_name, filename_tensor_name # Unused by updated loading code. # 'input_checkpoint' may be a prefix if we're using Saver V2 format if (not input_saved_model_dir and not checkpoint_management.checkpoint_exists(input_checkpoint)): raise ValueError("Input checkpoint '" + input_checkpoint + "' doesn't exist!") if not output_node_names: raise ValueError( "You need to supply the name of a node to --output_node_names.") # Remove all the explicit device specifications for this node. This helps to # make the graph more portable. if clear_devices: if input_meta_graph_def: for node in input_meta_graph_def.graph_def.node: node.device = "" elif input_graph_def: for node in input_graph_def.node: node.device = "" if input_graph_def: _ = importer.import_graph_def(input_graph_def, name="") with session.Session() as sess: if input_saver_def: saver = saver_lib.Saver( saver_def=input_saver_def, write_version=checkpoint_version) saver.restore(sess, input_checkpoint) elif input_meta_graph_def: restorer = saver_lib.import_meta_graph( input_meta_graph_def, clear_devices=True) restorer.restore(sess, input_checkpoint) if initializer_nodes: sess.run(initializer_nodes.replace(" ", "").split(",")) elif input_saved_model_dir: if saved_model_tags is None: saved_model_tags = [] loader.load(sess, saved_model_tags, input_saved_model_dir) else: var_list = {} reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint) var_to_shape_map = reader.get_variable_to_shape_map() # List of all partition variables. Because the condition is heuristic # based, the list could include false positives. all_parition_variable_names = [ tensor.name.split(":")[0] for op in sess.graph.get_operations() for tensor in op.values() if re.search(r"/part_\d+/", tensor.name) ] has_partition_var = False for key in var_to_shape_map: try: tensor = sess.graph.get_tensor_by_name(key + ":0") if any(key in name for name in all_parition_variable_names): has_partition_var = True except KeyError: # This tensor doesn't exist in the graph (for example it's # 'global_step' or a similar housekeeping element) so skip it. continue var_list[key] = tensor try: saver = saver_lib.Saver( var_list=var_list, write_version=checkpoint_version) except TypeError as e: # `var_list` is required to be a map of variable names to Variable # tensors. Partition variables are Identity tensors that cannot be # handled by Saver. if has_partition_var: raise ValueError( "Models containing partition variables cannot be converted " "from checkpoint files. Please pass in a SavedModel using " "the flag --input_saved_model_dir.") # Models that have been frozen previously do not contain Variables. elif _has_no_variables(sess): raise ValueError( "No variables were found in this model. It is likely the model " "was frozen previously. You cannot freeze a graph twice.") return 0 else: raise e saver.restore(sess, input_checkpoint) if initializer_nodes: sess.run(initializer_nodes.replace(" ", "").split(",")) variable_names_whitelist = ( variable_names_whitelist.replace(" ", "").split(",") if variable_names_whitelist else None) variable_names_blacklist = ( variable_names_blacklist.replace(" ", "").split(",") if variable_names_blacklist else None) if input_meta_graph_def: output_graph_def = graph_util.convert_variables_to_constants( sess, input_meta_graph_def.graph_def, output_node_names.replace(" ", "").split(","), variable_names_whitelist=variable_names_whitelist, variable_names_blacklist=variable_names_blacklist) else: output_graph_def = graph_util.convert_variables_to_constants( sess, input_graph_def, output_node_names.replace(" ", "").split(","), variable_names_whitelist=variable_names_whitelist, variable_names_blacklist=variable_names_blacklist) # Write GraphDef to file if output path has been given. if output_graph: with gfile.GFile(output_graph, "wb") as f: f.write(output_graph_def.SerializeToString()) return output_graph_def def _parse_input_graph_proto(input_graph, input_binary): """Parses input tensorflow graph into GraphDef proto.""" if not gfile.Exists(input_graph): raise IOError("Input graph file '" + input_graph + "' does not exist!") input_graph_def = graph_pb2.GraphDef() mode = "rb" if input_binary else "r" with gfile.GFile(input_graph, mode) as f: if input_binary: input_graph_def.ParseFromString(f.read()) else: text_format.Merge(f.read(), input_graph_def) return input_graph_def def _parse_input_meta_graph_proto(input_graph, input_binary): """Parses input tensorflow graph into MetaGraphDef proto.""" if not gfile.Exists(input_graph): raise IOError("Input meta graph file '" + input_graph + "' does not exist!") input_meta_graph_def = MetaGraphDef() mode = "rb" if input_binary else "r" with gfile.GFile(input_graph, mode) as f: if input_binary: input_meta_graph_def.ParseFromString(f.read()) else: text_format.Merge(f.read(), input_meta_graph_def) print("Loaded meta graph file '" + input_graph) return input_meta_graph_def def _parse_input_saver_proto(input_saver, input_binary): """Parses input tensorflow Saver into SaverDef proto.""" if not gfile.Exists(input_saver): raise IOError("Input saver file '" + input_saver + "' does not exist!") mode = "rb" if input_binary else "r" with gfile.GFile(input_saver, mode) as f: saver_def = saver_pb2.SaverDef() if input_binary: saver_def.ParseFromString(f.read()) else: text_format.Merge(f.read(), saver_def) return saver_def def freeze_graph(input_graph, input_saver, input_binary, input_checkpoint, output_node_names, restore_op_name, filename_tensor_name, output_graph, clear_devices, initializer_nodes, variable_names_whitelist="", variable_names_blacklist="", input_meta_graph=None, input_saved_model_dir=None, saved_model_tags=tag_constants.SERVING, checkpoint_version=saver_pb2.SaverDef.V2): """Converts all variables in a graph and checkpoint into constants. Args: input_graph: A `GraphDef` file to load. input_saver: A TensorFlow Saver file. input_binary: A Bool. True means input_graph is .pb, False indicates .pbtxt. input_checkpoint: The prefix of a V1 or V2 checkpoint, with V2 taking priority. Typically the result of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or V1/V2. output_node_names: The name(s) of the output nodes, comma separated. restore_op_name: Unused. filename_tensor_name: Unused. output_graph: String where to write the frozen `GraphDef`. clear_devices: A Bool whether to remove device specifications. initializer_nodes: Comma separated list of initializer nodes to run before freezing. variable_names_whitelist: The set of variable names to convert (optional, by default, all variables are converted), variable_names_blacklist: The set of variable names to omit converting to constants (optional). input_meta_graph: A `MetaGraphDef` file to load (optional). input_saved_model_dir: Path to the dir with TensorFlow 'SavedModel' file and variables (optional). saved_model_tags: Group of comma separated tag(s) of the MetaGraphDef to load, in string format. checkpoint_version: Tensorflow variable file format (saver_pb2.SaverDef.V1 or saver_pb2.SaverDef.V2). Returns: String that is the location of frozen GraphDef. """ input_graph_def = None if input_saved_model_dir: input_graph_def = saved_model_utils.get_meta_graph_def( input_saved_model_dir, saved_model_tags).graph_def elif input_graph: input_graph_def = _parse_input_graph_proto(input_graph, input_binary) input_meta_graph_def = None if input_meta_graph: input_meta_graph_def = _parse_input_meta_graph_proto( input_meta_graph, input_binary) input_saver_def = None if input_saver: input_saver_def = _parse_input_saver_proto(input_saver, input_binary) return freeze_graph_with_def_protos( input_graph_def, input_saver_def, input_checkpoint, output_node_names, restore_op_name, filename_tensor_name, output_graph, clear_devices, initializer_nodes, variable_names_whitelist, variable_names_blacklist, input_meta_graph_def, input_saved_model_dir, saved_model_tags.replace(" ", "").split(","), checkpoint_version=checkpoint_version) def main(unused_args, flags): if flags.checkpoint_version == 1: checkpoint_version = saver_pb2.SaverDef.V1 elif flags.checkpoint_version == 2: checkpoint_version = saver_pb2.SaverDef.V2 else: raise ValueError("Invalid checkpoint version (must be '1' or '2'): %d" % flags.checkpoint_version) freeze_graph(flags.input_graph, flags.input_saver, flags.input_binary, flags.input_checkpoint, flags.output_node_names, flags.restore_op_name, flags.filename_tensor_name, flags.output_graph, flags.clear_devices, flags.initializer_nodes, flags.variable_names_whitelist, flags.variable_names_blacklist, flags.input_meta_graph, flags.input_saved_model_dir, flags.saved_model_tags, checkpoint_version) def run_main(): """Main function of freeze_graph.""" parser = argparse.ArgumentParser() parser.register("type", "bool", lambda v: v.lower() == "true") parser.add_argument( "--input_graph", type=str, default="", help="TensorFlow \'GraphDef\' file to load.") parser.add_argument( "--input_saver", type=str, default="", help="TensorFlow saver file to load.") parser.add_argument( "--input_checkpoint", type=str, default="", help="TensorFlow variables file to load.") parser.add_argument( "--checkpoint_version", type=int, default=2, help="Tensorflow variable file format") parser.add_argument( "--output_graph", type=str, default="", help="Output \'GraphDef\' file name.") parser.add_argument( "--input_binary", nargs="?", const=True, type="bool", default=False, help="Whether the input files are in binary format.") parser.add_argument( "--output_node_names", type=str, default="", help="The name of the output nodes, comma separated.") parser.add_argument( "--restore_op_name", type=str, default="save/restore_all", help="""\ The name of the master restore operator. Deprecated, unused by updated \ loading code. """) parser.add_argument( "--filename_tensor_name", type=str, default="save/Const:0", help="""\ The name of the tensor holding the save path. Deprecated, unused by \ updated loading code. """) parser.add_argument( "--clear_devices", nargs="?", const=True, type="bool", default=True, help="Whether to remove device specifications.") parser.add_argument( "--initializer_nodes", type=str, default="", help="Comma separated list of initializer nodes to run before freezing.") parser.add_argument( "--variable_names_whitelist", type=str, default="", help="""\ Comma separated list of variables to convert to constants. If specified, \ only those variables will be converted to constants.\ """) parser.add_argument( "--variable_names_blacklist", type=str, default="", help="""\ Comma separated list of variables to skip converting to constants.\ """) parser.add_argument( "--input_meta_graph", type=str, default="", help="TensorFlow \'MetaGraphDef\' file to load.") parser.add_argument( "--input_saved_model_dir", type=str, default="", help="Path to the dir with TensorFlow \'SavedModel\' file and variables.") parser.add_argument( "--saved_model_tags", type=str, default="serve", help="""\ Group of tag(s) of the MetaGraphDef to load, in string format,\ separated by \',\'. For tag-set contains multiple tags, all tags \ must be passed in.\ """) flags, unparsed = parser.parse_known_args() my_main = lambda unused_args: main(unused_args, flags) app.run(main=my_main, argv=[sys.argv[0]] + unparsed) if __name__ == "__main__": run_main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/freeze_graph.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Removes unneeded nodes from a GraphDef file. This script is designed to help streamline models, by taking the input and output nodes that will be used by an application and figuring out the smallest set of operations that are required to run for those arguments. The resulting minimal graph is then saved out. The advantages of running this script are: - You may be able to shrink the file size. - Operations that are unsupported on your platform but still present can be safely removed. The resulting graph may not be as flexible as the original though, since any input nodes that weren't explicitly mentioned may not be accessible any more. An example of command-line usage is: bazel build tensorflow/python/tools:strip_unused && \ bazel-bin/tensorflow/python/tools/strip_unused \ --input_graph=some_graph_def.pb \ --output_graph=/tmp/stripped_graph.pb \ --input_node_names=input0 --output_node_names=softmax You can also look at strip_unused_test.py for an example of how to use it. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys from tensorflow.python.framework import dtypes from tensorflow.python.platform import app from tensorflow.python.tools import strip_unused_lib FLAGS = None def main(unused_args): strip_unused_lib.strip_unused_from_files(FLAGS.input_graph, FLAGS.input_binary, FLAGS.output_graph, FLAGS.output_binary, FLAGS.input_node_names, FLAGS.output_node_names, FLAGS.placeholder_type_enum) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.register('type', 'bool', lambda v: v.lower() == 'true') parser.add_argument( '--input_graph', type=str, default='', help='TensorFlow \'GraphDef\' file to load.') parser.add_argument( '--input_binary', nargs='?', const=True, type='bool', default=False, help='Whether the input files are in binary format.') parser.add_argument( '--output_graph', type=str, default='', help='Output \'GraphDef\' file name.') parser.add_argument( '--output_binary', nargs='?', const=True, type='bool', default=True, help='Whether to write a binary format graph.') parser.add_argument( '--input_node_names', type=str, default='', help='The name of the input nodes, comma separated.') parser.add_argument( '--output_node_names', type=str, default='', help='The name of the output nodes, comma separated.') parser.add_argument( '--placeholder_type_enum', type=int, default=dtypes.float32.as_datatype_enum, help='The AttrValue enum to use for placeholders.') FLAGS, unparsed = parser.parse_known_args() app.run(main=main, argv=[sys.argv[0]] + unparsed)
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/strip_unused.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Computes a header file to be used with SELECTIVE_REGISTRATION. See the executable wrapper, print_selective_registration_header.py, for more information. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys from google.protobuf import text_format from tensorflow.core.framework import graph_pb2 from tensorflow.python import pywrap_tensorflow from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging # Usually, we use each graph node to induce registration of an op and # corresponding kernel; nodes without a corresponding kernel (perhaps due to # attr types) generate a warning but are otherwise ignored. Ops in this set are # registered even if there's no corresponding kernel. OPS_WITHOUT_KERNEL_WHITELIST = frozenset([ # AccumulateNV2 is rewritten away by AccumulateNV2RemovePass; see # core/common_runtime/accumulate_n_optimizer.cc. 'AccumulateNV2' ]) def get_ops_and_kernels(proto_fileformat, proto_files, default_ops_str): """Gets the ops and kernels needed from the model files.""" ops = set() for proto_file in proto_files: tf_logging.info('Loading proto file %s', proto_file) # Load GraphDef. file_data = gfile.GFile(proto_file, 'rb').read() if proto_fileformat == 'rawproto': graph_def = graph_pb2.GraphDef.FromString(file_data) else: assert proto_fileformat == 'textproto' graph_def = text_format.Parse(file_data, graph_pb2.GraphDef()) # Find all ops and kernels used by the graph. for node_def in graph_def.node: if not node_def.device: node_def.device = '/cpu:0' kernel_class = pywrap_tensorflow.TryFindKernelClass( node_def.SerializeToString()) op = str(node_def.op) if kernel_class or op in OPS_WITHOUT_KERNEL_WHITELIST: op_and_kernel = (op, str(kernel_class.decode('utf-8')) if kernel_class else None) if op_and_kernel not in ops: ops.add(op_and_kernel) else: print( 'Warning: no kernel found for op %s' % node_def.op, file=sys.stderr) # Add default ops. if default_ops_str and default_ops_str != 'all': for s in default_ops_str.split(','): op, kernel = s.split(':') op_and_kernel = (op, kernel) if op_and_kernel not in ops: ops.add(op_and_kernel) return list(sorted(ops)) def get_header_from_ops_and_kernels(ops_and_kernels, include_all_ops_and_kernels): """Returns a header for use with tensorflow SELECTIVE_REGISTRATION. Args: ops_and_kernels: a set of (op_name, kernel_class_name) pairs to include. include_all_ops_and_kernels: if True, ops_and_kernels is ignored and all op kernels are included. Returns: the string of the header that should be written as ops_to_register.h. """ ops = set([op for op, _ in ops_and_kernels]) result_list = [] def append(s): result_list.append(s) _, script_name = os.path.split(sys.argv[0]) append('// This file was autogenerated by %s' % script_name) append('#ifndef OPS_TO_REGISTER') append('#define OPS_TO_REGISTER') if include_all_ops_and_kernels: append('#define SHOULD_REGISTER_OP(op) true') append('#define SHOULD_REGISTER_OP_KERNEL(clz) true') append('#define SHOULD_REGISTER_OP_GRADIENT true') else: line = ''' namespace { constexpr const char* skip(const char* x) { return (*x) ? (*x == ' ' ? skip(x + 1) : x) : x; } constexpr bool isequal(const char* x, const char* y) { return (*skip(x) && *skip(y)) ? (*skip(x) == *skip(y) && isequal(skip(x) + 1, skip(y) + 1)) : (!*skip(x) && !*skip(y)); } template<int N> struct find_in { static constexpr bool f(const char* x, const char* const y[N]) { return isequal(x, y[0]) || find_in<N - 1>::f(x, y + 1); } }; template<> struct find_in<0> { static constexpr bool f(const char* x, const char* const y[]) { return false; } }; } // end namespace ''' line += 'constexpr const char* kNecessaryOpKernelClasses[] = {\n' for _, kernel_class in ops_and_kernels: if kernel_class is None: continue line += '"%s",\n' % kernel_class line += '};' append(line) append('#define SHOULD_REGISTER_OP_KERNEL(clz) ' '(find_in<sizeof(kNecessaryOpKernelClasses) ' '/ sizeof(*kNecessaryOpKernelClasses)>::f(clz, ' 'kNecessaryOpKernelClasses))') append('') append('constexpr inline bool ShouldRegisterOp(const char op[]) {') append(' return false') for op in sorted(ops): append(' || isequal(op, "%s")' % op) append(' ;') append('}') append('#define SHOULD_REGISTER_OP(op) ShouldRegisterOp(op)') append('') append('#define SHOULD_REGISTER_OP_GRADIENT ' + ( 'true' if 'SymbolicGradient' in ops else 'false')) append('#endif') return '\n'.join(result_list) def get_header(graphs, proto_fileformat='rawproto', default_ops='NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'): """Computes a header for use with tensorflow SELECTIVE_REGISTRATION. Args: graphs: a list of paths to GraphDef files to include. proto_fileformat: optional format of proto file, either 'textproto' or 'rawproto' (default). default_ops: optional comma-separated string of operator:kernel pairs to always include implementation for. Pass 'all' to have all operators and kernels included. Default: 'NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'. Returns: the string of the header that should be written as ops_to_register.h. """ ops_and_kernels = get_ops_and_kernels(proto_fileformat, graphs, default_ops) if not ops_and_kernels: print('Error reading graph!') return 1 return get_header_from_ops_and_kernels(ops_and_kernels, default_ops == 'all')
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/selective_registration_header_lib.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Prints a header file to be used with SELECTIVE_REGISTRATION. An example of command-line usage is: bazel build tensorflow/python/tools:print_selective_registration_header && \ bazel-bin/tensorflow/python/tools/print_selective_registration_header \ --graphs=path/to/graph.pb > ops_to_register.h Then when compiling tensorflow, include ops_to_register.h in the include search path and pass -DSELECTIVE_REGISTRATION and -DSUPPORT_SELECTIVE_REGISTRATION - see core/framework/selective_registration.h for more details. When compiling for Android: bazel build -c opt --copt="-DSELECTIVE_REGISTRATION" \ --copt="-DSUPPORT_SELECTIVE_REGISTRATION" \ //tensorflow/contrib/android:libtensorflow_inference.so \ --host_crosstool_top=@bazel_tools//tools/cpp:toolchain \ --crosstool_top=//external:android/crosstool --cpu=armeabi-v7a """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys from tensorflow.python.platform import app from tensorflow.python.tools import selective_registration_header_lib FLAGS = None def main(unused_argv): graphs = FLAGS.graphs.split(',') print(selective_registration_header_lib.get_header( graphs, FLAGS.proto_fileformat, FLAGS.default_ops)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.register('type', 'bool', lambda v: v.lower() == 'true') parser.add_argument( '--graphs', type=str, default='', help='Comma-separated list of paths to model files to be analyzed.', required=True) parser.add_argument( '--proto_fileformat', type=str, default='rawproto', help='Format of proto file, either textproto or rawproto.') parser.add_argument( '--default_ops', type=str, default='NoOp:NoOp,_Recv:RecvOp,_Send:SendOp', help='Default operator:kernel pairs to always include implementation for.' 'Pass "all" to have all operators and kernels included; note that this ' 'should be used only when it is useful compared with simply not using ' 'selective registration, as it can in some cases limit the effect of ' 'compilation caches') FLAGS, unparsed = parser.parse_known_args() app.run(main=main, argv=[sys.argv[0]] + unparsed)
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/print_selective_registration_header.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SavedModel utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.lib.io import file_io from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.saved_model import builder as saved_model_builder from tensorflow.python.saved_model import tag_constants from tensorflow.python.tools import saved_model_utils def tearDownModule(): file_io.delete_recursively(test.get_temp_dir()) class SavedModelUtilTest(test.TestCase): def _init_and_validate_variable(self, sess, variable_name, variable_value): v = variables.Variable(variable_value, name=variable_name) sess.run(variables.global_variables_initializer()) self.assertEqual(variable_value, v.eval()) @test_util.deprecated_graph_mode_only def testReadSavedModelValid(self): saved_model_dir = os.path.join(test.get_temp_dir(), "valid_saved_model") builder = saved_model_builder.SavedModelBuilder(saved_model_dir) with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING]) builder.save() actual_saved_model_pb = saved_model_utils.read_saved_model(saved_model_dir) self.assertEqual(len(actual_saved_model_pb.meta_graphs), 1) self.assertEqual( len(actual_saved_model_pb.meta_graphs[0].meta_info_def.tags), 1) self.assertEqual(actual_saved_model_pb.meta_graphs[0].meta_info_def.tags[0], tag_constants.TRAINING) def testReadSavedModelInvalid(self): saved_model_dir = os.path.join(test.get_temp_dir(), "invalid_saved_model") with self.assertRaisesRegexp( IOError, "SavedModel file does not exist at: %s" % saved_model_dir): saved_model_utils.read_saved_model(saved_model_dir) @test_util.deprecated_graph_mode_only def testGetSavedModelTagSets(self): saved_model_dir = os.path.join(test.get_temp_dir(), "test_tags") builder = saved_model_builder.SavedModelBuilder(saved_model_dir) # Graph with a single variable. SavedModel invoked to: # - add with weights. # - a single tag (from predefined constants). with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING]) # Graph that updates the single variable. SavedModel invoked to: # - simply add the model (weights are not updated). # - a single tag (from predefined constants). with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 43) builder.add_meta_graph([tag_constants.SERVING]) # Graph that updates the single variable. SavedModel is invoked: # - to add the model (weights are not updated). # - multiple predefined tags. with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 44) builder.add_meta_graph([tag_constants.SERVING, tag_constants.GPU]) # Graph that updates the single variable. SavedModel is invoked: # - to add the model (weights are not updated). # - multiple predefined tags for serving on TPU. with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 44) builder.add_meta_graph([tag_constants.SERVING, tag_constants.TPU]) # Graph that updates the single variable. SavedModel is invoked: # - to add the model (weights are not updated). # - multiple custom tags. with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 45) builder.add_meta_graph(["foo", "bar"]) # Save the SavedModel to disk. builder.save() actual_tags = saved_model_utils.get_saved_model_tag_sets(saved_model_dir) expected_tags = [["train"], ["serve"], ["serve", "gpu"], ["serve", "tpu"], ["foo", "bar"]] self.assertEqual(expected_tags, actual_tags) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/saved_model_utils_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests the graph freezing tool.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import re from tensorflow.core.example import example_pb2 from tensorflow.core.framework import graph_pb2 from tensorflow.core.protobuf import saver_pb2 from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.framework import graph_io from tensorflow.python.framework import importer from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.saved_model import builder as saved_model_builder from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.saved_model import tag_constants from tensorflow.python.tools import freeze_graph from tensorflow.python.training import saver as saver_lib class FreezeGraphTest(test_util.TensorFlowTestCase): def _testFreezeGraph(self, saver_write_version): checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint") checkpoint_state_name = "checkpoint_state" input_graph_name = "input_graph.pb" output_graph_name = "output_graph.pb" # We'll create an input graph that has a single variable containing 1.0, # and that then multiplies it by 2. with ops.Graph().as_default(): variable_node = variables.VariableV1(1.0, name="variable_node") output_node = math_ops.multiply(variable_node, 2.0, name="output_node") sess = session.Session() init = variables.global_variables_initializer() sess.run(init) output = sess.run(output_node) self.assertNear(2.0, output, 0.00001) saver = saver_lib.Saver(write_version=saver_write_version) checkpoint_path = saver.save( sess, checkpoint_prefix, global_step=0, latest_filename=checkpoint_state_name) graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name) # We save out the graph to disk, and then call the const conversion # routine. input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name) input_saver_def_path = "" input_binary = False output_node_names = "output_node" restore_op_name = "save/restore_all" filename_tensor_name = "save/Const:0" output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name) clear_devices = False freeze_graph.freeze_graph( input_graph_path, input_saver_def_path, input_binary, checkpoint_path, output_node_names, restore_op_name, filename_tensor_name, output_graph_path, clear_devices, "", "", "", checkpoint_version=saver_write_version) # Now we make sure the variable is now a constant, and that the graph still # produces the expected result. with ops.Graph().as_default(): output_graph_def = graph_pb2.GraphDef() with open(output_graph_path, "rb") as f: output_graph_def.ParseFromString(f.read()) _ = importer.import_graph_def(output_graph_def, name="") self.assertEqual(4, len(output_graph_def.node)) for node in output_graph_def.node: self.assertNotEqual("VariableV2", node.op) self.assertNotEqual("Variable", node.op) with session.Session() as sess: output_node = sess.graph.get_tensor_by_name("output_node:0") output = sess.run(output_node) self.assertNear(2.0, output, 0.00001) def _createTFExampleString(self, feature_name, feature_value): """Create a serialized tensorflow example.""" example = example_pb2.Example() example.features.feature[feature_name].float_list.value.extend([ feature_value]) return example.SerializeToString() def _writeDummySavedModel(self, path, feature_name): """Writes a classifier with two input features to the given path.""" with ops.Graph().as_default(): examples = array_ops.placeholder(dtypes.string, name="input_node") feature_configs = { feature_name: parsing_ops.FixedLenFeature(shape=[], dtype=dtypes.float32), } features = parsing_ops.parse_example(examples, feature_configs) feature = features[feature_name] variable_node = variables.VariableV1(1.0, name="variable_node") scores = math_ops.multiply(variable_node, feature, name="output_node") class_feature = array_ops.fill(array_ops.shape(feature), "class_%s" % feature_name) classes = array_ops.transpose(class_feature) with session.Session() as sess: sess.run(variables.global_variables_initializer()) signature = ( signature_def_utils.classification_signature_def( examples=examples, classes=classes, scores=scores,)) builder = saved_model_builder.SavedModelBuilder(path) builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature, },) builder.save(as_text=True) @test_util.run_v1_only("b/120545219") def testFreezeGraphV1(self): self._testFreezeGraph(saver_pb2.SaverDef.V1) @test_util.run_v1_only("b/120545219") def testFreezeGraphV2(self): self._testFreezeGraph(saver_pb2.SaverDef.V2) def testFreezeMetaGraph(self): tmp_dir = self.get_temp_dir() checkpoint_prefix = os.path.join(tmp_dir, "meta_graph_checkpoint") checkpoint_state_name = "checkpoint_state" output_graph_filename = os.path.join(tmp_dir, "output_graph.pb") with ops.Graph().as_default(): variable_node = variables.VariableV1(1.0, name="variable_node") output_node = math_ops.multiply(variable_node, 2.0, name="output_node") sess = session.Session() init = variables.global_variables_initializer() sess.run(init) output = sess.run(output_node) self.assertNear(2.0, output, 0.00001) saver = saver_lib.Saver() checkpoint_path = saver.save( sess, checkpoint_prefix, global_step=0, latest_filename=checkpoint_state_name) input_saver_def_path = "" input_binary = True output_node_names = "output_node" restore_op_name = "save/restore_all" filename_tensor_name = "save/Const:0" clear_devices = False input_meta_graph = checkpoint_path + ".meta" freeze_graph.freeze_graph( "", input_saver_def_path, input_binary, checkpoint_path, output_node_names, restore_op_name, filename_tensor_name, output_graph_filename, clear_devices, "", "", "", input_meta_graph) # Now we make sure the variable is now a constant, and that the graph still # produces the expected result. with ops.Graph().as_default(): output_graph_def = graph_pb2.GraphDef() with open(output_graph_filename, "rb") as f: output_graph_def.ParseFromString(f.read()) _ = importer.import_graph_def(output_graph_def, name="") self.assertEqual(4, len(output_graph_def.node)) for node in output_graph_def.node: self.assertNotEqual("VariableV2", node.op) self.assertNotEqual("Variable", node.op) with session.Session() as sess: output_node = sess.graph.get_tensor_by_name("output_node:0") output = sess.run(output_node) self.assertNear(2.0, output, 0.00001) def testFreezeSavedModel(self): tmp_dir = self.get_temp_dir() saved_model_dir = os.path.join(tmp_dir, "saved_model_dir") feature_name = "feature" self._writeDummySavedModel(saved_model_dir, feature_name) output_graph_filename = os.path.join(tmp_dir, "output_graph.pb") input_saved_model_dir = saved_model_dir output_node_names = "output_node" input_binary = False input_saver_def_path = False restore_op_name = None filename_tensor_name = None clear_devices = False input_meta_graph = False checkpoint_path = None input_graph_filename = None saved_model_tags = tag_constants.SERVING freeze_graph.freeze_graph(input_graph_filename, input_saver_def_path, input_binary, checkpoint_path, output_node_names, restore_op_name, filename_tensor_name, output_graph_filename, clear_devices, "", "", "", input_meta_graph, input_saved_model_dir, saved_model_tags) # Now we make sure the variable is now a constant, and that the graph still # produces the expected result. with ops.Graph().as_default(): output_graph_def = graph_pb2.GraphDef() with open(output_graph_filename, "rb") as f: output_graph_def.ParseFromString(f.read()) _ = importer.import_graph_def(output_graph_def, name="") self.assertEqual(8, len(output_graph_def.node)) for node in output_graph_def.node: self.assertNotEqual("VariableV2", node.op) self.assertNotEqual("Variable", node.op) feature_value = 2.0 example = self._createTFExampleString(feature_name, feature_value) with session.Session() as sess: input_node = sess.graph.get_tensor_by_name("input_node:0") output_node = sess.graph.get_tensor_by_name("output_node:0") output = sess.run(output_node, feed_dict={input_node: [example]}) self.assertNear(feature_value, output, 0.00001) def testSinglePartitionedVariable(self): """Ensures partitioned variables fail cleanly with freeze graph.""" checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint") checkpoint_state_name = "checkpoint_state" input_graph_name = "input_graph.pb" output_graph_name = "output_graph.pb" # Create a graph with partition variables. When weights are partitioned into # a single partition, the weights variable is followed by a identity -> # identity (an additional identity node). partitioner = partitioned_variables.fixed_size_partitioner(1) with ops.Graph().as_default(): with variable_scope.variable_scope("part", partitioner=partitioner): batch_size, height, width, depth = 5, 128, 128, 3 input1 = array_ops.zeros( (batch_size, height, width, depth), name="input1") input2 = array_ops.zeros( (batch_size, height, width, depth), name="input2") num_nodes = depth filter1 = variable_scope.get_variable("filter", [num_nodes, num_nodes]) filter2 = array_ops.reshape(filter1, [1, 1, num_nodes, num_nodes]) conv = nn.conv2d( input=input1, filter=filter2, strides=[1, 1, 1, 1], padding="SAME") node = math_ops.add(conv, input2, name="test/add") node = nn.relu6(node, name="test/relu6") # Save graph and checkpoints. sess = session.Session() sess.run(variables.global_variables_initializer()) saver = saver_lib.Saver() checkpoint_path = saver.save( sess, checkpoint_prefix, global_step=0, latest_filename=checkpoint_state_name) graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name) # Ensure this graph has partition variables. self.assertTrue([ tensor.name.split(":")[0] for op in sess.graph.get_operations() for tensor in op.values() if re.search(r"/part_\d+/", tensor.name) ]) # Test freezing graph doesn't make it crash. output_node_names = "save/restore_all" output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name) with self.assertRaises(ValueError): freeze_graph.freeze_graph_with_def_protos( input_graph_def=sess.graph_def, input_saver_def=None, input_checkpoint=checkpoint_path, output_node_names=output_node_names, restore_op_name="save/restore_all", # default value filename_tensor_name="save/Const:0", # default value output_graph=output_graph_path, clear_devices=False, initializer_nodes="") if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/freeze_graph_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SavedModelCLI tool. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import os import pickle import shutil import sys import numpy as np from six import StringIO from tensorflow.core.framework import types_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.python.debug.wrappers import local_cli_wrapper from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_spec from tensorflow.python.platform import test from tensorflow.python.saved_model import save from tensorflow.python.tools import saved_model_cli from tensorflow.python.training.tracking import tracking SAVED_MODEL_PATH = ('cc/saved_model/testdata/half_plus_two/00000123') @contextlib.contextmanager def captured_output(): new_out, new_err = StringIO(), StringIO() old_out, old_err = sys.stdout, sys.stderr try: sys.stdout, sys.stderr = new_out, new_err yield sys.stdout, sys.stderr finally: sys.stdout, sys.stderr = old_out, old_err class SavedModelCLITestCase(test.TestCase): def testShowCommandAll(self): base_path = test.test_src_dir_path(SAVED_MODEL_PATH) self.parser = saved_model_cli.create_parser() args = self.parser.parse_args(['show', '--dir', base_path, '--all']) with captured_output() as (out, err): saved_model_cli.show(args) output = out.getvalue().strip() # pylint: disable=line-too-long exp_out = """MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs: signature_def['classify_x2_to_y3']: The given SavedModel SignatureDef contains the following input(s): inputs['inputs'] tensor_info: dtype: DT_FLOAT shape: (-1, 1) name: x2:0 The given SavedModel SignatureDef contains the following output(s): outputs['scores'] tensor_info: dtype: DT_FLOAT shape: (-1, 1) name: y3:0 Method name is: tensorflow/serving/classify signature_def['classify_x_to_y']: The given SavedModel SignatureDef contains the following input(s): inputs['inputs'] tensor_info: dtype: DT_STRING shape: unknown_rank name: tf_example:0 The given SavedModel SignatureDef contains the following output(s): outputs['scores'] tensor_info: dtype: DT_FLOAT shape: (-1, 1) name: y:0 Method name is: tensorflow/serving/classify signature_def['regress_x2_to_y3']: The given SavedModel SignatureDef contains the following input(s): inputs['inputs'] tensor_info: dtype: DT_FLOAT shape: (-1, 1) name: x2:0 The given SavedModel SignatureDef contains the following output(s): outputs['outputs'] tensor_info: dtype: DT_FLOAT shape: (-1, 1) name: y3:0 Method name is: tensorflow/serving/regress signature_def['regress_x_to_y']: The given SavedModel SignatureDef contains the following input(s): inputs['inputs'] tensor_info: dtype: DT_STRING shape: unknown_rank name: tf_example:0 The given SavedModel SignatureDef contains the following output(s): outputs['outputs'] tensor_info: dtype: DT_FLOAT shape: (-1, 1) name: y:0 Method name is: tensorflow/serving/regress signature_def['regress_x_to_y2']: The given SavedModel SignatureDef contains the following input(s): inputs['inputs'] tensor_info: dtype: DT_STRING shape: unknown_rank name: tf_example:0 The given SavedModel SignatureDef contains the following output(s): outputs['outputs'] tensor_info: dtype: DT_FLOAT shape: (-1, 1) name: y2:0 Method name is: tensorflow/serving/regress signature_def['serving_default']: The given SavedModel SignatureDef contains the following input(s): inputs['x'] tensor_info: dtype: DT_FLOAT shape: (-1, 1) name: x:0 The given SavedModel SignatureDef contains the following output(s): outputs['y'] tensor_info: dtype: DT_FLOAT shape: (-1, 1) name: y:0 Method name is: tensorflow/serving/predict""" # pylint: enable=line-too-long self.maxDiff = None # Produce a useful error msg if the comparison fails self.assertMultiLineEqual(output, exp_out) self.assertEqual(err.getvalue().strip(), '') def testShowAllWithConcreteFunctions(self): class DummyModel(tracking.AutoTrackable): """Model with callable polymorphic functions specified.""" @def_function.function def func1(self, a, b, c): if c: return a + b else: return a * b @def_function.function(input_signature=[ tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32) ]) def func2(self, x): return x + 2 @def_function.function def __call__(self, y, c=7): return y + 2 * c saved_model_dir = os.path.join(test.get_temp_dir(), 'dummy_model') dummy_model = DummyModel() # Call with specific values to create new polymorphic function traces. dummy_model.func1( constant_op.constant(5), constant_op.constant(9), True) dummy_model(constant_op.constant(5)) save.save(dummy_model, saved_model_dir) self.parser = saved_model_cli.create_parser() args = self.parser.parse_args(['show', '--dir', saved_model_dir, '--all']) with captured_output() as (out, err): saved_model_cli.show(args) output = out.getvalue().strip() exp_out = """MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs: signature_def['__saved_model_init_op']: The given SavedModel SignatureDef contains the following input(s): The given SavedModel SignatureDef contains the following output(s): outputs['__saved_model_init_op'] tensor_info: dtype: DT_INVALID shape: unknown_rank name: NoOp Method name is: signature_def['serving_default']: The given SavedModel SignatureDef contains the following input(s): inputs['x'] tensor_info: dtype: DT_FLOAT shape: (2, 2) name: serving_default_x:0 The given SavedModel SignatureDef contains the following output(s): outputs['output_0'] tensor_info: dtype: DT_FLOAT shape: (2, 2) name: PartitionedCall:0 Method name is: tensorflow/serving/predict Defined Functions: Function Name: '__call__' Option #1 Callable with: Argument #1 y: TensorSpec(shape=(), dtype=tf.int32, name='y') Argument #2 DType: int Value: 7 Function Name: 'func1' Option #1 Callable with: Argument #1 a: TensorSpec(shape=(), dtype=tf.int32, name='a') Argument #2 b: TensorSpec(shape=(), dtype=tf.int32, name='b') Argument #3 DType: bool Value: True Function Name: 'func2' Option #1 Callable with: Argument #1 x: TensorSpec(shape=(2, 2), dtype=tf.float32, name='x') """.strip() # pylint: enable=line-too-long self.maxDiff = None # Produce a useful error msg if the comparison fails self.assertMultiLineEqual(output, exp_out) self.assertEqual(err.getvalue().strip(), '') def testShowCommandTags(self): base_path = test.test_src_dir_path(SAVED_MODEL_PATH) self.parser = saved_model_cli.create_parser() args = self.parser.parse_args(['show', '--dir', base_path]) with captured_output() as (out, err): saved_model_cli.show(args) output = out.getvalue().strip() exp_out = 'The given SavedModel contains the following tag-sets:\nserve' self.assertMultiLineEqual(output, exp_out) self.assertEqual(err.getvalue().strip(), '') def testShowCommandSignature(self): base_path = test.test_src_dir_path(SAVED_MODEL_PATH) self.parser = saved_model_cli.create_parser() args = self.parser.parse_args( ['show', '--dir', base_path, '--tag_set', 'serve']) with captured_output() as (out, err): saved_model_cli.show(args) output = out.getvalue().strip() exp_header = ('The given SavedModel MetaGraphDef contains SignatureDefs ' 'with the following keys:') exp_start = 'SignatureDef key: ' exp_keys = [ '"classify_x2_to_y3"', '"classify_x_to_y"', '"regress_x2_to_y3"', '"regress_x_to_y"', '"regress_x_to_y2"', '"serving_default"' ] # Order of signatures does not matter self.assertMultiLineEqual( output, '\n'.join([exp_header] + [exp_start + exp_key for exp_key in exp_keys])) self.assertEqual(err.getvalue().strip(), '') def testShowCommandErrorNoTagSet(self): base_path = test.test_src_dir_path(SAVED_MODEL_PATH) self.parser = saved_model_cli.create_parser() args = self.parser.parse_args( ['show', '--dir', base_path, '--tag_set', 'badtagset']) with self.assertRaises(RuntimeError): saved_model_cli.show(args) def testShowCommandInputsOutputs(self): base_path = test.test_src_dir_path(SAVED_MODEL_PATH) self.parser = saved_model_cli.create_parser() args = self.parser.parse_args([ 'show', '--dir', base_path, '--tag_set', 'serve', '--signature_def', 'serving_default' ]) with captured_output() as (out, err): saved_model_cli.show(args) output = out.getvalue().strip() expected_output = ( 'The given SavedModel SignatureDef contains the following input(s):\n' ' inputs[\'x\'] tensor_info:\n' ' dtype: DT_FLOAT\n shape: (-1, 1)\n name: x:0\n' 'The given SavedModel SignatureDef contains the following output(s):\n' ' outputs[\'y\'] tensor_info:\n' ' dtype: DT_FLOAT\n shape: (-1, 1)\n name: y:0\n' 'Method name is: tensorflow/serving/predict') self.assertEqual(output, expected_output) self.assertEqual(err.getvalue().strip(), '') def testPrintREFTypeTensor(self): ref_tensor_info = meta_graph_pb2.TensorInfo() ref_tensor_info.dtype = types_pb2.DT_FLOAT_REF with captured_output() as (out, err): saved_model_cli._print_tensor_info(ref_tensor_info) self.assertTrue('DT_FLOAT_REF' in out.getvalue().strip()) self.assertEqual(err.getvalue().strip(), '') def testInputPreProcessFormats(self): input_str = 'input1=/path/file.txt[ab3];input2=file2' input_expr_str = 'input3=np.zeros([2,2]);input4=[4,5]' input_dict = saved_model_cli.preprocess_inputs_arg_string(input_str) input_expr_dict = saved_model_cli.preprocess_input_exprs_arg_string( input_expr_str, safe=False) self.assertTrue(input_dict['input1'] == ('/path/file.txt', 'ab3')) self.assertTrue(input_dict['input2'] == ('file2', None)) print(input_expr_dict['input3']) self.assertAllClose(input_expr_dict['input3'], np.zeros([2, 2])) self.assertAllClose(input_expr_dict['input4'], [4, 5]) self.assertTrue(len(input_dict) == 2) self.assertTrue(len(input_expr_dict) == 2) def testInputPreprocessExampleWithCodeInjection(self): input_examples_str = 'inputs=os.system("echo hacked")' with self.assertRaisesRegex(RuntimeError, 'not a valid python literal.'): saved_model_cli.preprocess_input_examples_arg_string(input_examples_str) def testInputPreProcessFileNames(self): input_str = (r'inputx=C:\Program Files\data.npz[v:0];' r'input:0=c:\PROGRA~1\data.npy') input_dict = saved_model_cli.preprocess_inputs_arg_string(input_str) self.assertTrue(input_dict['inputx'] == (r'C:\Program Files\data.npz', 'v:0')) self.assertTrue(input_dict['input:0'] == (r'c:\PROGRA~1\data.npy', None)) def testInputPreProcessErrorBadFormat(self): input_str = 'inputx=file[[v1]v2' with self.assertRaises(RuntimeError): saved_model_cli.preprocess_inputs_arg_string(input_str) input_str = 'inputx:file' with self.assertRaises(RuntimeError): saved_model_cli.preprocess_inputs_arg_string(input_str) input_str = 'inputx:np.zeros((5))' with self.assertRaisesRegex(RuntimeError, 'format is incorrect'): saved_model_cli.preprocess_input_exprs_arg_string(input_str, safe=False) def testInputParserNPY(self): x0 = np.array([[1], [2]]) x1 = np.array(range(6)).reshape(2, 3) input0_path = os.path.join(test.get_temp_dir(), 'input0.npy') input1_path = os.path.join(test.get_temp_dir(), 'input1.npy') np.save(input0_path, x0) np.save(input1_path, x1) input_str = 'x0=' + input0_path + '[x0];x1=' + input1_path feed_dict = saved_model_cli.load_inputs_from_input_arg_string( input_str, '', '') self.assertTrue(np.all(feed_dict['x0'] == x0)) self.assertTrue(np.all(feed_dict['x1'] == x1)) def testInputParserNPZ(self): x0 = np.array([[1], [2]]) input_path = os.path.join(test.get_temp_dir(), 'input.npz') np.savez(input_path, a=x0) input_str = 'x=' + input_path + '[a];y=' + input_path feed_dict = saved_model_cli.load_inputs_from_input_arg_string( input_str, '', '') self.assertTrue(np.all(feed_dict['x'] == x0)) self.assertTrue(np.all(feed_dict['y'] == x0)) def testInputParserPickle(self): pkl0 = {'a': 5, 'b': np.array(range(4))} pkl1 = np.array([1]) pkl2 = np.array([[1], [3]]) input_path0 = os.path.join(test.get_temp_dir(), 'pickle0.pkl') input_path1 = os.path.join(test.get_temp_dir(), 'pickle1.pkl') input_path2 = os.path.join(test.get_temp_dir(), 'pickle2.pkl') with open(input_path0, 'wb') as f: pickle.dump(pkl0, f) with open(input_path1, 'wb') as f: pickle.dump(pkl1, f) with open(input_path2, 'wb') as f: pickle.dump(pkl2, f) input_str = 'x=' + input_path0 + '[b];y=' + input_path1 + '[c];' input_str += 'z=' + input_path2 feed_dict = saved_model_cli.load_inputs_from_input_arg_string( input_str, '', '') self.assertTrue(np.all(feed_dict['x'] == pkl0['b'])) self.assertTrue(np.all(feed_dict['y'] == pkl1)) self.assertTrue(np.all(feed_dict['z'] == pkl2)) def testInputParserErrorNoName(self): x0 = np.array([[1], [2]]) x1 = np.array(range(5)) input_path = os.path.join(test.get_temp_dir(), 'input.npz') np.savez(input_path, a=x0, b=x1) input_str = 'x=' + input_path with self.assertRaises(RuntimeError): saved_model_cli.load_inputs_from_input_arg_string(input_str, '', '') def testInputParserErrorWrongName(self): x0 = np.array([[1], [2]]) x1 = np.array(range(5)) input_path = os.path.join(test.get_temp_dir(), 'input.npz') np.savez(input_path, a=x0, b=x1) input_str = 'x=' + input_path + '[c]' with self.assertRaises(RuntimeError): saved_model_cli.load_inputs_from_input_arg_string(input_str, '', '') def testRunCommandInputExamples(self): self.parser = saved_model_cli.create_parser() base_path = test.test_src_dir_path(SAVED_MODEL_PATH) output_dir = os.path.join(test.get_temp_dir(), 'new_dir') args = self.parser.parse_args([ 'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def', 'regress_x_to_y', '--input_examples', 'inputs=[{"x":[8.0],"x2":[5.0]}, {"x":[4.0],"x2":[3.0]}]', '--outdir', output_dir ]) saved_model_cli.run(args) y_actual = np.load(os.path.join(output_dir, 'outputs.npy')) y_expected = np.array([[6.0], [4.0]]) self.assertAllEqual(y_expected, y_actual) def testRunCommandExistingOutdir(self): self.parser = saved_model_cli.create_parser() base_path = test.test_src_dir_path(SAVED_MODEL_PATH) x = np.array([[1], [2]]) x_notused = np.zeros((6, 3)) input_path = os.path.join(test.get_temp_dir(), 'testRunCommand_inputs.npz') np.savez(input_path, x0=x, x1=x_notused) output_file = os.path.join(test.get_temp_dir(), 'outputs.npy') if os.path.exists(output_file): os.remove(output_file) args = self.parser.parse_args([ 'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def', 'regress_x2_to_y3', '--inputs', 'inputs=' + input_path + '[x0]', '--outdir', test.get_temp_dir() ]) saved_model_cli.run(args) y_actual = np.load(output_file) y_expected = np.array([[3.5], [4.0]]) self.assertAllClose(y_expected, y_actual) def testRunCommandNewOutdir(self): self.parser = saved_model_cli.create_parser() base_path = test.test_src_dir_path(SAVED_MODEL_PATH) x = np.array([[1], [2]]) x_notused = np.zeros((6, 3)) input_path = os.path.join(test.get_temp_dir(), 'testRunCommandNewOutdir_inputs.npz') output_dir = os.path.join(test.get_temp_dir(), 'new_dir') if os.path.isdir(output_dir): shutil.rmtree(output_dir) np.savez(input_path, x0=x, x1=x_notused) args = self.parser.parse_args([ 'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def', 'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir', output_dir ]) saved_model_cli.run(args) y_actual = np.load(os.path.join(output_dir, 'y.npy')) y_expected = np.array([[2.5], [3.0]]) self.assertAllClose(y_expected, y_actual) def testRunCommandOutOverwrite(self): self.parser = saved_model_cli.create_parser() base_path = test.test_src_dir_path(SAVED_MODEL_PATH) x = np.array([[1], [2]]) x_notused = np.zeros((6, 3)) input_path = os.path.join(test.get_temp_dir(), 'testRunCommandOutOverwrite_inputs.npz') np.savez(input_path, x0=x, x1=x_notused) output_file = os.path.join(test.get_temp_dir(), 'y.npy') open(output_file, 'a').close() args = self.parser.parse_args([ 'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def', 'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir', test.get_temp_dir(), '--overwrite' ]) saved_model_cli.run(args) y_actual = np.load(output_file) y_expected = np.array([[2.5], [3.0]]) self.assertAllClose(y_expected, y_actual) def testRunCommandInvalidInputKeyError(self): self.parser = saved_model_cli.create_parser() base_path = test.test_src_dir_path(SAVED_MODEL_PATH) args = self.parser.parse_args([ 'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def', 'regress_x2_to_y3', '--input_exprs', 'x2=[1,2,3]' ]) with self.assertRaises(ValueError): saved_model_cli.run(args) def testRunCommandInputExamplesNotListError(self): self.parser = saved_model_cli.create_parser() base_path = test.test_src_dir_path(SAVED_MODEL_PATH) output_dir = os.path.join(test.get_temp_dir(), 'new_dir') args = self.parser.parse_args([ 'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def', 'regress_x_to_y', '--input_examples', 'inputs={"x":8.0,"x2":5.0}', '--outdir', output_dir ]) with self.assertRaisesRegexp(ValueError, 'must be a list'): saved_model_cli.run(args) def testRunCommandInputExamplesFeatureValueNotListError(self): self.parser = saved_model_cli.create_parser() base_path = test.test_src_dir_path(SAVED_MODEL_PATH) output_dir = os.path.join(test.get_temp_dir(), 'new_dir') args = self.parser.parse_args([ 'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def', 'regress_x_to_y', '--input_examples', 'inputs=[{"x":8.0,"x2":5.0}]', '--outdir', output_dir ]) with self.assertRaisesRegexp(ValueError, 'feature value must be a list'): saved_model_cli.run(args) def testRunCommandInputExamplesFeatureBadType(self): self.parser = saved_model_cli.create_parser() base_path = test.test_src_dir_path(SAVED_MODEL_PATH) output_dir = os.path.join(test.get_temp_dir(), 'new_dir') args = self.parser.parse_args([ 'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def', 'regress_x_to_y', '--input_examples', 'inputs=[{"x":[[1],[2]]}]', '--outdir', output_dir ]) with self.assertRaisesRegexp(ValueError, 'is not supported'): saved_model_cli.run(args) def testRunCommandOutputFileExistError(self): self.parser = saved_model_cli.create_parser() base_path = test.test_src_dir_path(SAVED_MODEL_PATH) x = np.array([[1], [2]]) x_notused = np.zeros((6, 3)) input_path = os.path.join(test.get_temp_dir(), 'testRunCommandOutOverwrite_inputs.npz') np.savez(input_path, x0=x, x1=x_notused) output_file = os.path.join(test.get_temp_dir(), 'y.npy') open(output_file, 'a').close() args = self.parser.parse_args([ 'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def', 'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir', test.get_temp_dir() ]) with self.assertRaises(RuntimeError): saved_model_cli.run(args) def testRunCommandInputNotGivenError(self): self.parser = saved_model_cli.create_parser() base_path = test.test_src_dir_path(SAVED_MODEL_PATH) args = self.parser.parse_args([ 'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def', 'serving_default' ]) with self.assertRaises(AttributeError): saved_model_cli.run(args) def testRunCommandWithDebuggerEnabled(self): self.parser = saved_model_cli.create_parser() base_path = test.test_src_dir_path(SAVED_MODEL_PATH) x = np.array([[1], [2]]) x_notused = np.zeros((6, 3)) input_path = os.path.join(test.get_temp_dir(), 'testRunCommandNewOutdir_inputs.npz') output_dir = os.path.join(test.get_temp_dir(), 'new_dir') if os.path.isdir(output_dir): shutil.rmtree(output_dir) np.savez(input_path, x0=x, x1=x_notused) args = self.parser.parse_args([ 'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def', 'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir', output_dir, '--tf_debug' ]) def fake_wrapper_session(sess): return sess with test.mock.patch.object(local_cli_wrapper, 'LocalCLIDebugWrapperSession', side_effect=fake_wrapper_session, autospec=True) as fake: saved_model_cli.run(args) fake.assert_called_with(test.mock.ANY) y_actual = np.load(os.path.join(output_dir, 'y.npy')) y_expected = np.array([[2.5], [3.0]]) self.assertAllClose(y_expected, y_actual) def testScanCommand(self): self.parser = saved_model_cli.create_parser() base_path = test.test_src_dir_path(SAVED_MODEL_PATH) args = self.parser.parse_args(['scan', '--dir', base_path]) with captured_output() as (out, _): saved_model_cli.scan(args) output = out.getvalue().strip() self.assertTrue('does not contain blacklisted ops' in output) def testScanCommandFoundBlacklistedOp(self): self.parser = saved_model_cli.create_parser() base_path = test.test_src_dir_path(SAVED_MODEL_PATH) args = self.parser.parse_args( ['scan', '--dir', base_path, '--tag_set', 'serve']) op_blacklist = saved_model_cli._OP_BLACKLIST saved_model_cli._OP_BLACKLIST = set(['VariableV2']) with captured_output() as (out, _): saved_model_cli.scan(args) saved_model_cli._OP_BLACKLIST = op_blacklist output = out.getvalue().strip() self.assertTrue('\'VariableV2\'' in output) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/saved_model_cli_test.py
# pylint: disable=g-bad-file-header # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities to remove unneeded nodes from a GraphDefs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy from google.protobuf import text_format from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.python.framework import graph_util from tensorflow.python.platform import gfile def strip_unused(input_graph_def, input_node_names, output_node_names, placeholder_type_enum): """Removes unused nodes from a GraphDef. Args: input_graph_def: A graph with nodes we want to prune. input_node_names: A list of the nodes we use as inputs. output_node_names: A list of the output nodes. placeholder_type_enum: The AttrValue enum for the placeholder data type, or a list that specifies one value per input node name. Returns: A `GraphDef` with all unnecessary ops removed. Raises: ValueError: If any element in `input_node_names` refers to a tensor instead of an operation. KeyError: If any element in `input_node_names` is not found in the graph. """ for name in input_node_names: if ":" in name: raise ValueError("Name '%s' appears to refer to a Tensor, " "not a Operation." % name) # Here we replace the nodes we're going to override as inputs with # placeholders so that any unused nodes that are inputs to them are # automatically stripped out by extract_sub_graph(). not_found = {name for name in input_node_names} inputs_replaced_graph_def = graph_pb2.GraphDef() for node in input_graph_def.node: if node.name in input_node_names: not_found.remove(node.name) placeholder_node = node_def_pb2.NodeDef() placeholder_node.op = "Placeholder" placeholder_node.name = node.name if isinstance(placeholder_type_enum, list): input_node_index = input_node_names.index(node.name) placeholder_node.attr["dtype"].CopyFrom( attr_value_pb2.AttrValue(type=placeholder_type_enum[ input_node_index])) else: placeholder_node.attr["dtype"].CopyFrom( attr_value_pb2.AttrValue(type=placeholder_type_enum)) if "_output_shapes" in node.attr: placeholder_node.attr["_output_shapes"].CopyFrom(node.attr[ "_output_shapes"]) if "shape" in node.attr: placeholder_node.attr["shape"].CopyFrom(node.attr["shape"]) inputs_replaced_graph_def.node.extend([placeholder_node]) else: inputs_replaced_graph_def.node.extend([copy.deepcopy(node)]) if not_found: raise KeyError("The following input nodes were not found: %s" % not_found) output_graph_def = graph_util.extract_sub_graph(inputs_replaced_graph_def, output_node_names) return output_graph_def def strip_unused_from_files(input_graph, input_binary, output_graph, output_binary, input_node_names, output_node_names, placeholder_type_enum): """Removes unused nodes from a graph file.""" if not gfile.Exists(input_graph): print("Input graph file '" + input_graph + "' does not exist!") return -1 if not output_node_names: print("You need to supply the name of a node to --output_node_names.") return -1 input_graph_def = graph_pb2.GraphDef() mode = "rb" if input_binary else "r" with gfile.GFile(input_graph, mode) as f: if input_binary: input_graph_def.ParseFromString(f.read()) else: text_format.Merge(f.read(), input_graph_def) output_graph_def = strip_unused(input_graph_def, input_node_names.split(","), output_node_names.split(","), placeholder_type_enum) if output_binary: with gfile.GFile(output_graph, "wb") as f: f.write(output_graph_def.SerializeToString()) else: with gfile.GFile(output_graph, "w") as f: f.write(text_format.MessageToString(output_graph_def)) print("%d ops in the final graph." % len(output_graph_def.node))
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/strip_unused_lib.py
# pylint: disable=g-bad-file-header # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Removes parts of a graph that are only needed for training. There are several common transformations that can be applied to GraphDefs created to train a model, that help reduce the amount of computation needed when the network is used only for inference. These include: - Removing training-only operations like checkpoint saving. - Stripping out parts of the graph that are never reached. - Removing debug operations like CheckNumerics. - Folding batch normalization ops into the pre-calculated weights. - Fusing common operations into unified versions. This script takes either a frozen binary GraphDef file (where the weight variables have been converted into constants by the freeze_graph script), or a text GraphDef proto file (the weight variables are stored in a separate checkpoint file), and outputs a new GraphDef with the optimizations applied. If the input graph is a text graph file, make sure to include the node that restores the variable weights in output_names. That node is usually named "restore_all". An example of command-line usage is: bazel build tensorflow/python/tools:optimize_for_inference && \ bazel-bin/tensorflow/python/tools/optimize_for_inference \ --input=frozen_inception_graph.pb \ --output=optimized_inception_graph.pb \ --frozen_graph=True \ --input_names=Mul \ --output_names=softmax """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import sys from google.protobuf import text_format from tensorflow.core.framework import graph_pb2 from tensorflow.python.framework import dtypes from tensorflow.python.framework import graph_io from tensorflow.python.platform import app from tensorflow.python.platform import gfile from tensorflow.python.tools import optimize_for_inference_lib FLAGS = None def main(unused_args): if not gfile.Exists(FLAGS.input): print("Input graph file '" + FLAGS.input + "' does not exist!") return -1 input_graph_def = graph_pb2.GraphDef() with gfile.Open(FLAGS.input, "rb") as f: data = f.read() if FLAGS.frozen_graph: input_graph_def.ParseFromString(data) else: text_format.Merge(data.decode("utf-8"), input_graph_def) output_graph_def = optimize_for_inference_lib.optimize_for_inference( input_graph_def, FLAGS.input_names.split(","), FLAGS.output_names.split(","), _parse_placeholder_types(FLAGS.placeholder_type_enum), FLAGS.toco_compatible) if FLAGS.frozen_graph: f = gfile.GFile(FLAGS.output, "w") f.write(output_graph_def.SerializeToString()) else: graph_io.write_graph(output_graph_def, os.path.dirname(FLAGS.output), os.path.basename(FLAGS.output)) return 0 def _parse_placeholder_types(values): """Extracts placeholder types from a comma separate list.""" values = [int(value) for value in values.split(",")] return values if len(values) > 1 else values[0] def parse_args(): """Parses command line arguments.""" parser = argparse.ArgumentParser() parser.register("type", "bool", lambda v: v.lower() == "true") parser.add_argument( "--input", type=str, default="", help="TensorFlow \'GraphDef\' file to load.") parser.add_argument( "--output", type=str, default="", help="File to save the output graph to.") parser.add_argument( "--input_names", type=str, default="", help="Input node names, comma separated.") parser.add_argument( "--output_names", type=str, default="", help="Output node names, comma separated.") parser.add_argument( "--frozen_graph", nargs="?", const=True, type="bool", default=True, help="""\ If true, the input graph is a binary frozen GraphDef file; if false, it is a text GraphDef proto file.\ """) parser.add_argument( "--placeholder_type_enum", type=str, default=str(dtypes.float32.as_datatype_enum), help="""\ The AttrValue enum to use for placeholders. Or a comma separated list, one value for each placeholder.\ """) parser.add_argument( "--toco_compatible", type=bool, default=False, help="""\ If true, only use ops compatible with Tensorflow Lite Optimizing Converter.\ """) return parser.parse_known_args() if __name__ == "__main__": FLAGS, unparsed = parse_args() app.run(main=main, argv=[sys.argv[0]] + unparsed)
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/optimize_for_inference.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Generates and prints out imports and constants for new TensorFlow python api. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import collections import importlib import os import sys from tensorflow.python.tools.api.generator import doc_srcs from tensorflow.python.util import tf_decorator from tensorflow.python.util import tf_export API_ATTRS = tf_export.API_ATTRS API_ATTRS_V1 = tf_export.API_ATTRS_V1 _LAZY_LOADING = False _API_VERSIONS = [1, 2] _COMPAT_MODULE_TEMPLATE = 'compat.v%d' _COMPAT_MODULE_PREFIX = 'compat.v' _DEFAULT_PACKAGE = 'tensorflow.python' _GENFILES_DIR_SUFFIX = 'genfiles/' _SYMBOLS_TO_SKIP_EXPLICITLY = { # Overrides __getattr__, so that unwrapping tf_decorator # would have side effects. 'tensorflow.python.platform.flags.FLAGS' } _GENERATED_FILE_HEADER = """# This file is MACHINE GENERATED! Do not edit. # Generated by: tensorflow/python/tools/api/generator/create_python_api.py script. \"\"\"%s \"\"\" from __future__ import print_function as _print_function import sys as _sys """ _GENERATED_FILE_FOOTER = '\n\ndel _print_function\n' _DEPRECATION_FOOTER = """ from tensorflow.python.util import module_wrapper as _module_wrapper if not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper): _sys.modules[__name__] = _module_wrapper.TFModuleWrapper( _sys.modules[__name__], "%s", public_apis=%s, deprecation=%s, has_lite=%s) """ _LAZY_LOADING_MODULE_TEXT_TEMPLATE = """ # Inform pytype that this module is dynamically populated (b/111239204). _HAS_DYNAMIC_ATTRIBUTES = True _PUBLIC_APIS = { %s } """ class SymbolExposedTwiceError(Exception): """Raised when different symbols are exported with the same name.""" pass def get_canonical_import(import_set): """Obtain one single import from a set of possible sources of a symbol. One symbol might come from multiple places as it is being imported and reexported. To simplify API changes, we always use the same import for the same module, and give preference based on higher priority and alphabetical ordering. Args: import_set: (set) Imports providing the same symbol. This is a set of tuples in the form (import, priority). We want to pick an import with highest priority. Returns: A module name to import """ # We use the fact that list sorting is stable, so first we convert the set to # a sorted list of the names and then we resort this list to move elements # not in core tensorflow to the end. # Here we sort by priority (higher preferred) and then alphabetically by # import string. import_list = sorted( import_set, key=lambda imp_and_priority: (-imp_and_priority[1], imp_and_priority[0])) return import_list[0][0] class _ModuleInitCodeBuilder(object): """Builds a map from module name to imports included in that module.""" def __init__(self, output_package, api_version, lazy_loading=_LAZY_LOADING): self._output_package = output_package # Maps API module to API symbol name to set of tuples of the form # (module name, priority). # The same symbol can be imported from multiple locations. Higher # "priority" indicates that import location is preferred over others. self._module_imports = collections.defaultdict( lambda: collections.defaultdict(set)) self._dest_import_to_id = collections.defaultdict(int) # Names that start with underscore in the root module. self._underscore_names_in_root = [] self._api_version = api_version # Controls whether or not exported symbols are lazily loaded or statically # imported. self._lazy_loading = lazy_loading def _check_already_imported(self, symbol_id, api_name): if (api_name in self._dest_import_to_id and symbol_id != self._dest_import_to_id[api_name] and symbol_id != -1): raise SymbolExposedTwiceError( 'Trying to export multiple symbols with same name: %s.' % api_name) self._dest_import_to_id[api_name] = symbol_id def add_import( self, symbol, source_module_name, source_name, dest_module_name, dest_name): """Adds this import to module_imports. Args: symbol: TensorFlow Python symbol. source_module_name: (string) Module to import from. source_name: (string) Name of the symbol to import. dest_module_name: (string) Module name to add import to. dest_name: (string) Import the symbol using this name. Raises: SymbolExposedTwiceError: Raised when an import with the same dest_name has already been added to dest_module_name. """ import_str = self.format_import(source_module_name, source_name, dest_name) # Check if we are trying to expose two different symbols with same name. full_api_name = dest_name if dest_module_name: full_api_name = dest_module_name + '.' + full_api_name symbol_id = -1 if not symbol else id(symbol) self._check_already_imported(symbol_id, full_api_name) if not dest_module_name and dest_name.startswith('_'): self._underscore_names_in_root.append(dest_name) # The same symbol can be available in multiple modules. # We store all possible ways of importing this symbol and later pick just # one. priority = 0 if symbol and hasattr(symbol, '__module__'): # Give higher priority to source module if it matches # symbol's original module. priority = int(source_module_name == symbol.__module__) self._module_imports[dest_module_name][full_api_name].add( (import_str, priority)) def _import_submodules(self): """Add imports for all destination modules in self._module_imports.""" # Import all required modules in their parent modules. # For e.g. if we import 'foo.bar.Value'. Then, we also # import 'bar' in 'foo'. imported_modules = set(self._module_imports.keys()) for module in imported_modules: if not module: continue module_split = module.split('.') parent_module = '' # we import submodules in their parent_module for submodule_index in range(len(module_split)): if submodule_index > 0: submodule = module_split[submodule_index-1] parent_module += '.' + submodule if parent_module else submodule import_from = self._output_package if self._lazy_loading: import_from += '.' + '.'.join(module_split[:submodule_index + 1]) self.add_import( symbol=None, source_module_name='', source_name=import_from, dest_module_name=parent_module, dest_name=module_split[submodule_index]) else: if submodule_index > 0: import_from += '.' + '.'.join(module_split[:submodule_index]) self.add_import( symbol=None, source_module_name=import_from, source_name=module_split[submodule_index], dest_module_name=parent_module, dest_name=module_split[submodule_index]) def build(self): """Get a map from destination module to __init__.py code for that module. Returns: A dictionary where key: (string) destination module (for e.g. tf or tf.consts). value: (string) text that should be in __init__.py files for corresponding modules. """ self._import_submodules() module_text_map = {} footer_text_map = {} for dest_module, dest_name_to_imports in self._module_imports.items(): # Sort all possible imports for a symbol and pick the first one. imports_list = [ get_canonical_import(imports) for _, imports in dest_name_to_imports.items() ] if self._lazy_loading: module_text_map[ dest_module] = _LAZY_LOADING_MODULE_TEXT_TEMPLATE % '\n'.join( sorted(imports_list)) else: module_text_map[dest_module] = '\n'.join(sorted(imports_list)) # Expose exported symbols with underscores in root module since we import # from it using * import. Don't need this for lazy_loading because the # underscore symbols are already included in __all__ when passed in and # handled by TFModuleWrapper. if not self._lazy_loading: underscore_names_str = ', '.join( '\'%s\'' % name for name in self._underscore_names_in_root) module_text_map[''] = module_text_map.get('', '') + ''' _names_with_underscore = [%s] __all__ = [_s for _s in dir() if not _s.startswith('_')] __all__.extend([_s for _s in _names_with_underscore]) ''' % underscore_names_str for dest_module, _ in self._module_imports.items(): deprecation = 'False' has_lite = 'False' if self._api_version == 1: # Add 1.* deprecations. if not dest_module.startswith(_COMPAT_MODULE_PREFIX): deprecation = 'True' # Workaround to make sure not load lite from lite/__init__.py if (not dest_module and 'lite' in self._module_imports and self._lazy_loading): has_lite = 'True' if self._lazy_loading: public_apis_name = '_PUBLIC_APIS' else: public_apis_name = 'None' footer_text_map[dest_module] = _DEPRECATION_FOOTER % ( dest_module, public_apis_name, deprecation, has_lite) return module_text_map, footer_text_map def format_import(self, source_module_name, source_name, dest_name): """Formats import statement. Args: source_module_name: (string) Source module to import from. source_name: (string) Source symbol name to import. dest_name: (string) Destination alias name. Returns: An import statement string. """ if self._lazy_loading: return " '%s': ('%s', '%s')," % (dest_name, source_module_name, source_name) else: if source_module_name: if source_name == dest_name: return 'from %s import %s' % (source_module_name, source_name) else: return 'from %s import %s as %s' % (source_module_name, source_name, dest_name) else: if source_name == dest_name: return 'import %s' % source_name else: return 'import %s as %s' % (source_name, dest_name) def _get_name_and_module(full_name): """Split full_name into module and short name. Args: full_name: Full name of symbol that includes module. Returns: Full module name and short symbol name. """ name_segments = full_name.split('.') return '.'.join(name_segments[:-1]), name_segments[-1] def _join_modules(module1, module2): """Concatenate 2 module components. Args: module1: First module to join. module2: Second module to join. Returns: Given two modules aaa.bbb and ccc.ddd, returns a joined module aaa.bbb.ccc.ddd. """ if not module1: return module2 if not module2: return module1 return '%s.%s' % (module1, module2) def add_imports_for_symbol( module_code_builder, symbol, source_module_name, source_name, api_name, api_version, output_module_prefix=''): """Add imports for the given symbol to `module_code_builder`. Args: module_code_builder: `_ModuleInitCodeBuilder` instance. symbol: A symbol. source_module_name: Module that we can import the symbol from. source_name: Name we can import the symbol with. api_name: API name. Currently, must be either `tensorflow` or `estimator`. api_version: API version. output_module_prefix: Prefix to prepend to destination module. """ if api_version == 1: names_attr = API_ATTRS_V1[api_name].names constants_attr = API_ATTRS_V1[api_name].constants else: names_attr = API_ATTRS[api_name].names constants_attr = API_ATTRS[api_name].constants # If symbol is _tf_api_constants attribute, then add the constants. if source_name == constants_attr: for exports, name in symbol: for export in exports: dest_module, dest_name = _get_name_and_module(export) dest_module = _join_modules(output_module_prefix, dest_module) module_code_builder.add_import( None, source_module_name, name, dest_module, dest_name) # If symbol has _tf_api_names attribute, then add import for it. if (hasattr(symbol, '__dict__') and names_attr in symbol.__dict__): # Generate import statements for symbols. for export in getattr(symbol, names_attr): # pylint: disable=protected-access dest_module, dest_name = _get_name_and_module(export) dest_module = _join_modules(output_module_prefix, dest_module) module_code_builder.add_import( symbol, source_module_name, source_name, dest_module, dest_name) def get_api_init_text(packages, output_package, api_name, api_version, compat_api_versions=None, lazy_loading=_LAZY_LOADING): """Get a map from destination module to __init__.py code for that module. Args: packages: Base python packages containing python with target tf_export decorators. output_package: Base output python package where generated API will be added. api_name: API you want to generate (e.g. `tensorflow` or `estimator`). api_version: API version you want to generate (1 or 2). compat_api_versions: Additional API versions to generate under compat/ directory. lazy_loading: Boolean flag. If True, a lazy loading `__init__.py` file is produced and if `False`, static imports are used. Returns: A dictionary where key: (string) destination module (for e.g. tf or tf.consts). value: (string) text that should be in __init__.py files for corresponding modules. """ if compat_api_versions is None: compat_api_versions = [] module_code_builder = _ModuleInitCodeBuilder( output_package, api_version, lazy_loading) # Traverse over everything imported above. Specifically, # we want to traverse over TensorFlow Python modules. def in_packages(m): return any(package in m for package in packages) for module in list(sys.modules.values()): # Only look at tensorflow modules. if (not module or not hasattr(module, '__name__') or module.__name__ is None or not in_packages(module.__name__)): continue # Do not generate __init__.py files for contrib modules for now. if (('.contrib.' in module.__name__ or module.__name__.endswith('.contrib')) and '.lite' not in module.__name__): continue for module_contents_name in dir(module): if (module.__name__ + '.' + module_contents_name in _SYMBOLS_TO_SKIP_EXPLICITLY): continue attr = getattr(module, module_contents_name) _, attr = tf_decorator.unwrap(attr) add_imports_for_symbol( module_code_builder, attr, module.__name__, module_contents_name, api_name, api_version) for compat_api_version in compat_api_versions: add_imports_for_symbol( module_code_builder, attr, module.__name__, module_contents_name, api_name, compat_api_version, _COMPAT_MODULE_TEMPLATE % compat_api_version) return module_code_builder.build() def get_module(dir_path, relative_to_dir): """Get module that corresponds to path relative to relative_to_dir. Args: dir_path: Path to directory. relative_to_dir: Get module relative to this directory. Returns: Name of module that corresponds to the given directory. """ dir_path = dir_path[len(relative_to_dir):] # Convert path separators to '/' for easier parsing below. dir_path = dir_path.replace(os.sep, '/') return dir_path.replace('/', '.').strip('.') def get_module_docstring(module_name, package, api_name): """Get docstring for the given module. This method looks for docstring in the following order: 1. Checks if module has a docstring specified in doc_srcs. 2. Checks if module has a docstring source module specified in doc_srcs. If it does, gets docstring from that module. 3. Checks if module with module_name exists under base package. If it does, gets docstring from that module. 4. Returns a default docstring. Args: module_name: module name relative to tensorflow (excluding 'tensorflow.' prefix) to get a docstring for. package: Base python package containing python with target tf_export decorators. api_name: API you want to generate (e.g. `tensorflow` or `estimator`). Returns: One-line docstring to describe the module. """ # Get the same module doc strings for any version. That is, for module # 'compat.v1.foo' we can get docstring from module 'foo'. for version in _API_VERSIONS: compat_prefix = _COMPAT_MODULE_TEMPLATE % version if module_name.startswith(compat_prefix): module_name = module_name[len(compat_prefix):].strip('.') # Module under base package to get a docstring from. docstring_module_name = module_name doc_sources = doc_srcs.get_doc_sources(api_name) if module_name in doc_sources: docsrc = doc_sources[module_name] if docsrc.docstring: return docsrc.docstring if docsrc.docstring_module_name: docstring_module_name = docsrc.docstring_module_name docstring_module_name = package + '.' + docstring_module_name if (docstring_module_name in sys.modules and sys.modules[docstring_module_name].__doc__): return sys.modules[docstring_module_name].__doc__ return 'Public API for tf.%s namespace.' % module_name def create_api_files(output_files, packages, root_init_template, output_dir, output_package, api_name, api_version, compat_api_versions, compat_init_templates, lazy_loading=_LAZY_LOADING): """Creates __init__.py files for the Python API. Args: output_files: List of __init__.py file paths to create. packages: Base python packages containing python with target tf_export decorators. root_init_template: Template for top-level __init__.py file. "# API IMPORTS PLACEHOLDER" comment in the template file will be replaced with imports. output_dir: output API root directory. output_package: Base output package where generated API will be added. api_name: API you want to generate (e.g. `tensorflow` or `estimator`). api_version: API version to generate (`v1` or `v2`). compat_api_versions: Additional API versions to generate in compat/ subdirectory. compat_init_templates: List of templates for top level compat init files in the same order as compat_api_versions. lazy_loading: Boolean flag. If True, a lazy loading `__init__.py` file is produced and if `False`, static imports are used. Raises: ValueError: if output_files list is missing a required file. """ module_name_to_file_path = {} for output_file in output_files: module_name = get_module(os.path.dirname(output_file), output_dir) module_name_to_file_path[module_name] = os.path.normpath(output_file) # Create file for each expected output in genrule. for module, file_path in module_name_to_file_path.items(): if not os.path.isdir(os.path.dirname(file_path)): os.makedirs(os.path.dirname(file_path)) open(file_path, 'a').close() module_text_map, deprecation_footer_map = get_api_init_text( packages, output_package, api_name, api_version, compat_api_versions, lazy_loading) # Add imports to output files. missing_output_files = [] # Root modules are "" and "compat.v*". root_module = '' compat_module_to_template = { _COMPAT_MODULE_TEMPLATE % v: t for v, t in zip(compat_api_versions, compat_init_templates) } for module, text in module_text_map.items(): # Make sure genrule output file list is in sync with API exports. if module not in module_name_to_file_path: module_file_path = '"%s/__init__.py"' % ( module.replace('.', '/')) missing_output_files.append(module_file_path) continue contents = '' if module == root_module and root_init_template: # Read base init file for root module with open(root_init_template, 'r') as root_init_template_file: contents = root_init_template_file.read() contents = contents.replace('# API IMPORTS PLACEHOLDER', text) elif module in compat_module_to_template: # Read base init file for compat module with open(compat_module_to_template[module], 'r') as init_template_file: contents = init_template_file.read() contents = contents.replace('# API IMPORTS PLACEHOLDER', text) else: contents = ( _GENERATED_FILE_HEADER % get_module_docstring( module, packages[0], api_name) + text + _GENERATED_FILE_FOOTER) if module in deprecation_footer_map: if '# WRAPPER_PLACEHOLDER' in contents: contents = contents.replace('# WRAPPER_PLACEHOLDER', deprecation_footer_map[module]) else: contents += deprecation_footer_map[module] with open(module_name_to_file_path[module], 'w') as fp: fp.write(contents) if missing_output_files: raise ValueError( """Missing outputs for genrule:\n%s. Be sure to add these targets to tensorflow/python/tools/api/generator/api_init_files_v1.bzl and tensorflow/python/tools/api/generator/api_init_files.bzl (tensorflow repo), or tensorflow_estimator/python/estimator/api/api_gen.bzl (estimator repo)""" % ',\n'.join(sorted(missing_output_files))) def main(): parser = argparse.ArgumentParser() parser.add_argument( 'outputs', metavar='O', type=str, nargs='+', help='If a single file is passed in, then we we assume it contains a ' 'semicolon-separated list of Python files that we expect this script to ' 'output. If multiple files are passed in, then we assume output files ' 'are listed directly as arguments.') parser.add_argument( '--packages', default=_DEFAULT_PACKAGE, type=str, help='Base packages that import modules containing the target tf_export ' 'decorators.') parser.add_argument( '--root_init_template', default='', type=str, help='Template for top level __init__.py file. ' '"#API IMPORTS PLACEHOLDER" comment will be replaced with imports.') parser.add_argument( '--apidir', type=str, required=True, help='Directory where generated output files are placed. ' 'gendir should be a prefix of apidir. Also, apidir ' 'should be a prefix of every directory in outputs.') parser.add_argument( '--apiname', required=True, type=str, choices=API_ATTRS.keys(), help='The API you want to generate.') parser.add_argument( '--apiversion', default=2, type=int, choices=_API_VERSIONS, help='The API version you want to generate.') parser.add_argument( '--compat_apiversions', default=[], type=int, action='append', help='Additional versions to generate in compat/ subdirectory. ' 'If set to 0, then no additional version would be generated.') parser.add_argument( '--compat_init_templates', default=[], type=str, action='append', help='Templates for top-level __init__ files under compat modules. ' 'The list of init file templates must be in the same order as ' 'list of versions passed with compat_apiversions.') parser.add_argument( '--output_package', default='tensorflow', type=str, help='Root output package.') parser.add_argument( '--loading', default='default', type=str, choices=['lazy', 'static', 'default'], help='Controls how the generated __init__.py file loads the exported ' 'symbols. \'lazy\' means the symbols are loaded when first used. ' '\'static\' means all exported symbols are loaded in the ' '__init__.py file. \'default\' uses the value of the ' '_LAZY_LOADING constant in create_python_api.py.') args = parser.parse_args() if len(args.outputs) == 1: # If we only get a single argument, then it must be a file containing # list of outputs. with open(args.outputs[0]) as output_list_file: outputs = [line.strip() for line in output_list_file.read().split(';')] else: outputs = args.outputs # Populate `sys.modules` with modules containing tf_export(). packages = args.packages.split(',') for package in packages: importlib.import_module(package) # Determine if the modules shall be loaded lazily or statically. if args.loading == 'default': lazy_loading = _LAZY_LOADING elif args.loading == 'lazy': lazy_loading = True elif args.loading == 'static': lazy_loading = False else: # This should never happen (tm). raise ValueError('Invalid value for --loading flag: %s. Must be one of ' 'lazy, static, default.' % args.loading) create_api_files(outputs, packages, args.root_init_template, args.apidir, args.output_package, args.apiname, args.apiversion, args.compat_apiversions, args.compat_init_templates, lazy_loading) if __name__ == '__main__': main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/api/generator/create_python_api.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Specifies sources of doc strings for API modules.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from tensorflow.python.util import tf_export # Specifies docstring source for a module. # Only one of docstring or docstring_module_name should be set. # * If docstring is set, then we will use this docstring when # for the module. # * If docstring_module_name is set, then we will copy the docstring # from docstring source module. DocSource = collections.namedtuple( 'DocSource', ['docstring', 'docstring_module_name']) # Each attribute of DocSource is optional. DocSource.__new__.__defaults__ = (None,) * len(DocSource._fields) _TENSORFLOW_DOC_SOURCES = { 'app': DocSource(docstring_module_name='platform.app'), 'bitwise': DocSource(docstring_module_name='ops.bitwise_ops'), 'compat': DocSource(docstring_module_name='util.compat'), 'distribute': DocSource(docstring_module_name='distribute.distribute_lib'), 'distributions': DocSource( docstring_module_name='ops.distributions.distributions'), 'errors': DocSource(docstring_module_name='framework.errors'), 'gfile': DocSource(docstring_module_name='platform.gfile'), 'graph_util': DocSource(docstring_module_name='framework.graph_util'), 'image': DocSource(docstring_module_name='ops.image_ops'), 'keras.estimator': DocSource(docstring_module_name='keras.estimator'), 'linalg': DocSource(docstring_module_name='ops.linalg_ops'), 'logging': DocSource(docstring_module_name='ops.logging_ops'), 'losses': DocSource(docstring_module_name='ops.losses.losses'), 'manip': DocSource(docstring_module_name='ops.manip_ops'), 'math': DocSource(docstring_module_name='ops.math_ops'), 'metrics': DocSource(docstring_module_name='ops.metrics'), 'nn': DocSource(docstring_module_name='ops.nn_ops'), 'nn.rnn_cell': DocSource(docstring_module_name='ops.rnn_cell'), 'python_io': DocSource(docstring_module_name='lib.io.python_io'), 'ragged': DocSource(docstring_module_name='ops.ragged'), 'resource_loader': DocSource( docstring_module_name='platform.resource_loader'), 'sets': DocSource(docstring_module_name='ops.sets'), 'signal': DocSource(docstring_module_name='ops.signal.signal'), 'sparse': DocSource(docstring_module_name='ops.sparse_ops'), 'strings': DocSource(docstring_module_name='ops.string_ops'), 'summary': DocSource(docstring_module_name='summary.summary'), 'sysconfig': DocSource(docstring_module_name='platform.sysconfig'), 'test': DocSource(docstring_module_name='platform.test'), 'train': DocSource(docstring_module_name='training.training'), } _ESTIMATOR_DOC_SOURCES = { 'estimator': DocSource( docstring_module_name='estimator_lib'), 'estimator.export': DocSource( docstring_module_name='export.export_lib'), 'estimator.inputs': DocSource( docstring_module_name='inputs.inputs'), } def get_doc_sources(api_name): """Get a map from module to a DocSource object. Args: api_name: API you want to generate (e.g. `tensorflow` or `estimator`). Returns: Map from module name to DocSource object. """ if api_name == tf_export.TENSORFLOW_API_NAME: return _TENSORFLOW_DOC_SOURCES if api_name == tf_export.ESTIMATOR_API_NAME: return _ESTIMATOR_DOC_SOURCES return {}
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/api/generator/doc_srcs.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Tests for create_python_api.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import imp import sys from tensorflow.python.platform import test from tensorflow.python.tools.api.generator import create_python_api from tensorflow.python.util.tf_export import tf_export @tf_export('test_op', 'test_op1', 'test.test_op2') def test_op(): pass @tf_export('test1.foo', v1=['test.foo']) def deprecated_test_op(): pass @tf_export('TestClass', 'NewTestClass') class TestClass(object): pass _TEST_CONSTANT = 5 _MODULE_NAME = 'tensorflow.python.test_module' class CreatePythonApiTest(test.TestCase): def setUp(self): # Add fake op to a module that has 'tensorflow' in the name. sys.modules[_MODULE_NAME] = imp.new_module(_MODULE_NAME) setattr(sys.modules[_MODULE_NAME], 'test_op', test_op) setattr(sys.modules[_MODULE_NAME], 'deprecated_test_op', deprecated_test_op) setattr(sys.modules[_MODULE_NAME], 'TestClass', TestClass) test_op.__module__ = _MODULE_NAME TestClass.__module__ = _MODULE_NAME tf_export('consts._TEST_CONSTANT').export_constant( _MODULE_NAME, '_TEST_CONSTANT') def tearDown(self): del sys.modules[_MODULE_NAME] def testFunctionImportIsAdded(self): imports, _ = create_python_api.get_api_init_text( packages=[create_python_api._DEFAULT_PACKAGE], output_package='tensorflow', api_name='tensorflow', api_version=1) if create_python_api._LAZY_LOADING: expected_import = ( '\'test_op1\': ' '(\'tensorflow.python.test_module\',' ' \'test_op\')') else: expected_import = ( 'from tensorflow.python.test_module ' 'import test_op as test_op1') self.assertTrue( expected_import in str(imports), msg='%s not in %s' % (expected_import, str(imports))) if create_python_api._LAZY_LOADING: expected_import = ( '\'test_op\': ' '(\'tensorflow.python.test_module\',' ' \'test_op\')') else: expected_import = ( 'from tensorflow.python.test_module ' 'import test_op') self.assertTrue( expected_import in str(imports), msg='%s not in %s' % (expected_import, str(imports))) # Also check that compat.v1 is not added to imports. self.assertFalse('compat.v1' in imports, msg='compat.v1 in %s' % str(imports.keys())) def testClassImportIsAdded(self): imports, _ = create_python_api.get_api_init_text( packages=[create_python_api._DEFAULT_PACKAGE], output_package='tensorflow', api_name='tensorflow', api_version=2) if create_python_api._LAZY_LOADING: expected_import = ( '\'NewTestClass\':' ' (\'tensorflow.python.test_module\',' ' \'TestClass\')') else: expected_import = ( 'from tensorflow.python.test_module ' 'import TestClass') self.assertTrue( 'TestClass' in str(imports), msg='%s not in %s' % (expected_import, str(imports))) def testConstantIsAdded(self): imports, _ = create_python_api.get_api_init_text( packages=[create_python_api._DEFAULT_PACKAGE], output_package='tensorflow', api_name='tensorflow', api_version=1) if create_python_api._LAZY_LOADING: expected = ('\'_TEST_CONSTANT\':' ' (\'tensorflow.python.test_module\',' ' \'_TEST_CONSTANT\')') else: expected = ('from tensorflow.python.test_module ' 'import _TEST_CONSTANT') self.assertTrue(expected in str(imports), msg='%s not in %s' % (expected, str(imports))) def testCompatModuleIsAdded(self): imports, _ = create_python_api.get_api_init_text( packages=[create_python_api._DEFAULT_PACKAGE], output_package='tensorflow', api_name='tensorflow', api_version=2, compat_api_versions=[1]) self.assertTrue('compat.v1' in imports, msg='compat.v1 not in %s' % str(imports.keys())) self.assertTrue('compat.v1.test' in imports, msg='compat.v1.test not in %s' % str(imports.keys())) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/api/generator/create_python_api_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Tests for tensorflow.python.tools.api.generator.doc_srcs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import importlib import sys from tensorflow.python.platform import test from tensorflow.python.tools.api.generator import doc_srcs FLAGS = None class DocSrcsTest(test.TestCase): def testModulesAreValidAPIModules(self): for module_name in doc_srcs.get_doc_sources(FLAGS.api_name): # Convert module_name to corresponding __init__.py file path. file_path = module_name.replace('.', '/') if file_path: file_path += '/' file_path += '__init__.py' self.assertIn( file_path, FLAGS.outputs, msg='%s is not a valid API module' % module_name) def testHaveDocstringOrDocstringModule(self): for module_name, docsrc in doc_srcs.get_doc_sources(FLAGS.api_name).items(): self.assertFalse( docsrc.docstring and docsrc.docstring_module_name, msg=('%s contains DocSource has both a docstring and a ' 'docstring_module_name. Only one of "docstring" or ' '"docstring_module_name" should be set.') % (module_name)) def testDocstringModulesAreValidModules(self): for _, docsrc in doc_srcs.get_doc_sources(FLAGS.api_name).items(): if docsrc.docstring_module_name: doc_module_name = '.'.join([ FLAGS.package, docsrc.docstring_module_name]) self.assertIn( doc_module_name, sys.modules, msg=('docsources_module %s is not a valid module under %s.' % (docsrc.docstring_module_name, FLAGS.package))) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( 'outputs', metavar='O', type=str, nargs='+', help='create_python_api output files.') parser.add_argument( '--package', type=str, help='Base package that imports modules containing the target tf_export ' 'decorators.') parser.add_argument( '--api_name', type=str, help='API name: tensorflow or estimator') FLAGS, unparsed = parser.parse_known_args() importlib.import_module(FLAGS.package) # Now update argv, so that unittest library does not get confused. sys.argv = [sys.argv[0]] + unparsed test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/api/generator/doc_srcs_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Tests for api_init_files.bzl and api_init_files_v1.bzl.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys # The unused imports are needed so that the python module is available in sys.modules # pylint: disable=unused-import from tensorflow import python as _tf_for_api_traversal # pylint: enable=unused-import from tensorflow.python.platform import test from tensorflow.python.util import tf_decorator def _get_module_from_symbol(symbol): if '.' not in symbol: return '' return '.'.join(symbol.split('.')[:-1]) def _get_modules(package, attr_name, constants_attr_name): """Get list of TF API modules. Args: package: We only look at modules that contain package in the name. attr_name: Attribute set on TF symbols that contains API names. constants_attr_name: Attribute set on TF modules that contains API constant names. Returns: Set of TensorFlow API modules. """ modules = set() # TODO(annarev): split up the logic in create_python_api.py so that # it can be reused in this test. for module in list(sys.modules.values()): if (not module or not hasattr(module, '__name__') or package not in module.__name__): continue for module_contents_name in dir(module): attr = getattr(module, module_contents_name) _, attr = tf_decorator.unwrap(attr) # Add modules to _tf_api_constants attribute. if module_contents_name == constants_attr_name: for exports, _ in attr: modules.update( [_get_module_from_symbol(export) for export in exports]) continue # Add modules for _tf_api_names attribute. if (hasattr(attr, '__dict__') and attr_name in attr.__dict__): modules.update([ _get_module_from_symbol(export) for export in getattr(attr, attr_name)]) return modules def _get_files_set(path, start_tag, end_tag): """Get set of file paths from the given file. Args: path: Path to file. File at `path` is expected to contain a list of paths where entire list starts with `start_tag` and ends with `end_tag`. List must be comma-separated and each path entry must be surrounded by double quotes. start_tag: String that indicates start of path list. end_tag: String that indicates end of path list. Returns: List of string paths. """ with open(path, 'r') as f: contents = f.read() start = contents.find(start_tag) + len(start_tag) + 1 end = contents.find(end_tag) contents = contents[start:end] file_paths = [ file_path.strip().strip('"') for file_path in contents.split(',')] return set(file_path for file_path in file_paths if file_path) def _module_to_paths(module): """Get all API __init__.py file paths for the given module. Args: module: Module to get file paths for. Returns: List of paths for the given module. For e.g. module foo.bar requires 'foo/__init__.py' and 'foo/bar/__init__.py'. """ submodules = [] module_segments = module.split('.') for i in range(len(module_segments)): submodules.append('.'.join(module_segments[:i+1])) paths = [] for submodule in submodules: if not submodule: paths.append('__init__.py') continue paths.append('%s/__init__.py' % (submodule.replace('.', '/'))) return paths class OutputInitFilesTest(test.TestCase): """Test that verifies files that list paths for TensorFlow API.""" def _validate_paths_for_modules( self, actual_paths, expected_paths, file_to_update_on_error): """Validates that actual_paths match expected_paths. Args: actual_paths: */__init__.py file paths listed in file_to_update_on_error. expected_paths: */__init__.py file paths that we need to create for TensorFlow API. file_to_update_on_error: File that contains list of */__init__.py files. We include it in error message printed if the file list needs to be updated. """ self.assertTrue(actual_paths) self.assertTrue(expected_paths) missing_paths = expected_paths - actual_paths extra_paths = actual_paths - expected_paths # Surround paths with quotes so that they can be copy-pasted # from error messages as strings. missing_paths = ['\'%s\'' % path for path in missing_paths] extra_paths = ['\'%s\'' % path for path in extra_paths] self.assertFalse( missing_paths, 'Please add %s to %s.' % ( ',\n'.join(sorted(missing_paths)), file_to_update_on_error)) self.assertFalse( extra_paths, 'Redundant paths, please remove %s in %s.' % ( ',\n'.join(sorted(extra_paths)), file_to_update_on_error)) def test_V2_init_files(self): modules = _get_modules( 'tensorflow', '_tf_api_names', '_tf_api_constants') file_path = ( 'tensorflow/python/tools/api/generator/api_init_files.bzl') paths = _get_files_set( file_path, '# BEGIN GENERATED FILES', '# END GENERATED FILES') module_paths = set( f for module in modules for f in _module_to_paths(module)) self._validate_paths_for_modules( paths, module_paths, file_to_update_on_error=file_path) def test_V1_init_files(self): modules = _get_modules( 'tensorflow', '_tf_api_names_v1', '_tf_api_constants_v1') file_path = ( 'tensorflow/python/tools/api/generator/' 'api_init_files_v1.bzl') paths = _get_files_set( file_path, '# BEGIN GENERATED FILES', '# END GENERATED FILES') module_paths = set( f for module in modules for f in _module_to_paths(module)) self._validate_paths_for_modules( paths, module_paths, file_to_update_on_error=file_path) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/tools/api/generator/output_init_files_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for API compatibility between TensorFlow release versions. See [Version Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import os from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export # This value changes every day with an automatic CL. It can be modified in code # via `forward_compatibility_horizon()` or with the environment variable # TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date. _FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 8, 21) _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS" _FORWARD_COMPATIBILITY_DATE_NUMBER = None def _date_to_date_number(year, month, day): return (year << 9) | (month << 5) | day def _update_forward_compatibility_date_number(date_to_override=None): """Update the base date to compare in forward_compatible function.""" global _FORWARD_COMPATIBILITY_DATE_NUMBER if date_to_override: date = date_to_override else: date = _FORWARD_COMPATIBILITY_HORIZON delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME) if delta_days: date += datetime.timedelta(days=int(delta_days)) _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number( date.year, date.month, date.day) _update_forward_compatibility_date_number() @tf_export("compat.forward_compatible") def forward_compatible(year, month, day): """Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The "producer" is typically a Python program that constructs and trains a model while the "consumer" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibiltiy, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day). """ return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number( year, month, day) @tf_export("compat.forward_compatibility_horizon") @tf_contextlib.contextmanager def forward_compatibility_horizon(year, month, day): """Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Yields: Nothing. """ try: _update_forward_compatibility_date_number(datetime.date(year, month, day)) yield finally: _update_forward_compatibility_date_number()
tensorflow-r1.15.5-nv23.03
tensorflow/python/compat/compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Switching v2 features on and off.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import tf2 from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import control_flow_v2_toggles from tensorflow.python.ops import variable_scope from tensorflow.python.util.tf_export import tf_export @tf_export(v1=["enable_v2_behavior"]) def enable_v2_behavior(): """Enables TensorFlow 2.x behaviors. This function can be called at the beginning of the program (before `Tensors`, `Graphs` or other structures have been created, and before devices have been initialized. It switches all global behaviors that are different between TensorFlow 1.x and 2.x to behave as intended for 2.x. This function is called in the main TensorFlow `__init__.py` file, user should not need to call it, except during complex migrations. """ # TF2 behavior is enabled if either 1) enable_v2_behavior() is called or # 2) the TF2_BEHAVIOR=1 environment variable is set. In the latter case, # the modules below independently check if tf2.enabled(). tf2.enable() ops.enable_eager_execution() tensor_shape.enable_v2_tensorshape() # Also switched by tf2 variable_scope.enable_resource_variables() ops.enable_tensor_equality() # Enables TensorArrayV2 and control flow V2. control_flow_v2_toggles.enable_control_flow_v2() @tf_export(v1=["disable_v2_behavior"]) def disable_v2_behavior(): """Disables TensorFlow 2.x behaviors. This function can be called at the beginning of the program (before `Tensors`, `Graphs` or other structures have been created, and before devices have been initialized. It switches all global behaviors that are different between TensorFlow 1.x and 2.x to behave as intended for 1.x. User can call this function to disable 2.x behavior during complex migrations. """ tf2.disable() ops.disable_eager_execution() tensor_shape.disable_v2_tensorshape() # Also switched by tf2 variable_scope.disable_resource_variables() ops.disable_tensor_equality() # Disables TensorArrayV2 and control flow V2. control_flow_v2_toggles.disable_control_flow_v2()
tensorflow-r1.15.5-nv23.03
tensorflow/python/compat/v2_compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for forward and backwards compatibility utilties.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.compat import v2_compat from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.platform import test class DisableV2BehaviorTest(test.TestCase): def test_basic(self): t = constant_op.constant([1, 2, 3]) # creates a hidden context self.assertTrue(isinstance(t, ops.EagerTensor)) v2_compat.disable_v2_behavior() t = constant_op.constant([1, 2, 3]) self.assertFalse(isinstance(t, ops.EagerTensor)) if __name__ == '__main__': v2_compat.enable_v2_behavior() test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/compat/disable_v2_behavior_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for forward and backwards compatibility utilties.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import os from tensorflow.python.compat import compat from tensorflow.python.platform import test class CompatTest(test.TestCase): def _compatibility_date(self): date = compat._FORWARD_COMPATIBILITY_HORIZON # pylint: disable=protected-access return (date.year, date.month, date.day) def _n_days_after(self, n): date = compat._FORWARD_COMPATIBILITY_HORIZON + datetime.timedelta(days=n) # pylint: disable=protected-access return (date.year, date.month, date.day) def test_basic(self): compatibility_date = self._compatibility_date() one_day_before = self._n_days_after(-1) self.assertTrue(compat.forward_compatible(*one_day_before)) self.assertFalse(compat.forward_compatible(*compatibility_date)) def test_decorator(self): compatibility_date = self._compatibility_date() one_day_after = self._n_days_after(1) with compat.forward_compatibility_horizon(*one_day_after): self.assertTrue(compat.forward_compatible(*compatibility_date)) self.assertFalse(compat.forward_compatible(*one_day_after)) # After exiting context manager, value should be reset. self.assertFalse(compat.forward_compatible(*compatibility_date)) def test_decorator_with_failure(self): compatibility_date = self._compatibility_date() one_day_after = self._n_days_after(1) class DummyError(Exception): pass try: with compat.forward_compatibility_horizon(*one_day_after): raise DummyError() except DummyError: pass # silence DummyError # After exiting context manager, value should be reset. self.assertFalse(compat.forward_compatible(*compatibility_date)) def test_environment_override(self): var_name = 'TF_FORWARD_COMPATIBILITY_DELTA_DAYS' def remove_os_environment_var(): try: del os.environ[var_name] except KeyError: pass self.addCleanup(remove_os_environment_var) compatibility_date = self._compatibility_date() one_day_before = self._n_days_after(-1) one_day_after = self._n_days_after(1) ten_days_after = self._n_days_after(10) nine_days_after = self._n_days_after(9) self.assertTrue(compat.forward_compatible(*one_day_before)) self.assertFalse(compat.forward_compatible(*compatibility_date)) self.assertFalse(compat.forward_compatible(*one_day_after)) self.assertFalse(compat.forward_compatible(*nine_days_after)) self.assertFalse(compat.forward_compatible(*ten_days_after)) os.environ[var_name] = '10' compat._update_forward_compatibility_date_number() self.assertTrue(compat.forward_compatible(*one_day_before)) self.assertTrue(compat.forward_compatible(*compatibility_date)) self.assertTrue(compat.forward_compatible(*one_day_after)) self.assertTrue(compat.forward_compatible(*nine_days_after)) self.assertFalse(compat.forward_compatible(*ten_days_after)) del os.environ[var_name] compat._update_forward_compatibility_date_number() self.assertTrue(compat.forward_compatible(*one_day_before)) self.assertFalse(compat.forward_compatible(*compatibility_date)) self.assertFalse(compat.forward_compatible(*one_day_after)) self.assertFalse(compat.forward_compatible(*nine_days_after)) self.assertFalse(compat.forward_compatible(*ten_days_after)) # Now test interaction between environment variable and context func. os.environ[var_name] = '10' compat._update_forward_compatibility_date_number() self.assertTrue(compat.forward_compatible(*one_day_after)) with compat.forward_compatibility_horizon(*one_day_after): self.assertTrue(compat.forward_compatible(*one_day_before)) self.assertTrue(compat.forward_compatible(*compatibility_date)) self.assertFalse(compat.forward_compatible(*one_day_after)) self.assertFalse(compat.forward_compatible(*nine_days_after)) self.assertFalse(compat.forward_compatible(*ten_days_after)) self.assertTrue(compat.forward_compatible(*one_day_after)) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/compat/compat_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Exports a SavedModel from a Trackable Python object.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os from tensorflow.core.framework import versions_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.core.protobuf import saved_model_pb2 from tensorflow.core.protobuf import saved_object_graph_pb2 from tensorflow.python.distribute import values as ds_values from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import function as defun from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import meta_graph from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.framework import versions from tensorflow.python.lib.io import file_io from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.saved_model import builder_impl from tensorflow.python.saved_model import constants from tensorflow.python.saved_model import function_serialization from tensorflow.python.saved_model import nested_structure_coder from tensorflow.python.saved_model import revived_types from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.saved_model import signature_serialization from tensorflow.python.saved_model import tag_constants from tensorflow.python.saved_model import utils_impl from tensorflow.python.training.saving import functional_saver from tensorflow.python.training.tracking import base from tensorflow.python.training.tracking import graph_view from tensorflow.python.training.tracking import tracking from tensorflow.python.training.tracking import util from tensorflow.python.util import compat from tensorflow.python.util import object_identity from tensorflow.python.util.tf_export import tf_export _UNCOPIABLE_DTYPES = frozenset((dtypes.resource, dtypes.variant)) # A container for an EagerTensor constant which has been copied to the exported # Graph. _CapturedConstant = collections.namedtuple( "_CapturedConstant", ["eager_tensor", "graph_tensor"]) class _AugmentedGraphView(graph_view.ObjectGraphView): """An extendable graph which also tracks functions attached to objects. Extensions through `add_object` appear in the object graph and any checkpoints generated from it, even if they are not dependencies of the node they were attached to in the saving program. For example a `.signatures` attribute is added to exported SavedModel root objects without modifying the root object itself. Also tracks functions attached to objects in the graph, through the caching `list_functions` method. Enumerating functions only through this method ensures that we get a consistent view of functions, even if object attributes create new functions every time they are accessed. """ def __init__(self, root): if (not context.executing_eagerly() and not ops.inside_function()): saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary() else: saveables_cache = None super(_AugmentedGraphView, self).__init__(root, saveables_cache) # Object -> (name -> dep) self._extra_dependencies = object_identity.ObjectIdentityDictionary() self._functions = object_identity.ObjectIdentityDictionary() # Cache shared between objects in the same object graph. This is passed to # each trackable object's `_list_extra_dependencies_for_serialization` and # `_list_functions_for_serialization` function. self._serialization_cache = object_identity.ObjectIdentityDictionary() def add_object(self, parent_node, name_in_parent, subgraph_root): """Attach an object to `parent_node`, overriding any existing dependency.""" self._extra_dependencies.setdefault( parent_node, {})[name_in_parent] = subgraph_root def list_dependencies(self, obj): """Overrides a parent method to include `add_object` objects.""" extra_dependencies = self.list_extra_dependencies(obj) extra_dependencies.update(self._extra_dependencies.get(obj, {})) used_names = set() for name, dep in super(_AugmentedGraphView, self).list_dependencies(obj): used_names.add(name) if name in extra_dependencies: # Extra dependencies (except for `.signatures`, which is always added # when saving) should not have naming conflicts with dependencies # defined by the user. if name != signature_serialization.SIGNATURE_ATTRIBUTE_NAME: raise ValueError( "Error when exporting object {} of with identifier={}. The object" " has an attribute named {}, which is reserved. List of all " "reserved attributes: {}".format( obj, obj._object_identifier, # pylint: disable=protected-access name, extra_dependencies.keys())) yield base.TrackableReference(name, extra_dependencies[name]) else: yield base.TrackableReference(name, dep) for name, dep in extra_dependencies.items(): if name in used_names: continue yield base.TrackableReference(name, dep) def list_extra_dependencies(self, obj): return obj._list_extra_dependencies_for_serialization( # pylint: disable=protected-access self._serialization_cache) def list_functions(self, obj): obj_functions = self._functions.get(obj, None) if obj_functions is None: obj_functions = obj._list_functions_for_serialization( # pylint: disable=protected-access self._serialization_cache) self._functions[obj] = obj_functions return obj_functions class _SaveableView(object): """Provides a frozen view over a trackable root. This class helps creating a single stable view over an object to save. The saving code should access properties and functions via this class and not via the original object as there are cases where an object construct their trackable attributes and functions dynamically per call and will yield different objects if invoked more than once. Changes to the graph, for example adding objects, must happen in `checkpoint_view` (an `_AugmentedGraphView`) before the `_SaveableView` is constructed. Changes after the `_SaveableView` has been constructed will be ignored. """ def __init__(self, checkpoint_view): self.checkpoint_view = checkpoint_view trackable_objects, node_ids, slot_variables = ( self.checkpoint_view.objects_ids_and_slot_variables()) self.nodes = trackable_objects self.node_ids = node_ids self.captured_tensor_node_ids = object_identity.ObjectIdentityDictionary() self.slot_variables = slot_variables self.concrete_functions = [] # Also add `Function`s as nodes. nodes_without_functions = list(self.nodes) seen_function_names = set() for node in nodes_without_functions: for function in checkpoint_view.list_functions(node).values(): if function not in self.node_ids: self.node_ids[function] = len(self.nodes) self.nodes.append(function) if isinstance(function, def_function.Function): # Force listing the concrete functions for the side effects: # - populate the cache for functions that have an input_signature # and have not been called. # - force side effects of creation of concrete functions, e.g. create # variables on first run. concrete_functions = ( function._list_all_concrete_functions_for_serialization()) # pylint: disable=protected-access else: concrete_functions = [function] for concrete_function in concrete_functions: if concrete_function.name not in seen_function_names: seen_function_names.add(concrete_function.name) self.concrete_functions.append(concrete_function) @property def root(self): return self.nodes[0] def fill_object_graph_proto(self, proto): """Populate the nodes, children and slot_variables of a SavedObjectGraph.""" for node_id, node in enumerate(self.nodes): assert self.node_ids[node] == node_id object_proto = proto.nodes.add() object_proto.slot_variables.extend(self.slot_variables.get(node, ())) if isinstance(node, (def_function.Function, defun.ConcreteFunction, _CapturedConstant)): continue for child in self.checkpoint_view.list_dependencies(node): child_proto = object_proto.children.add() child_proto.node_id = self.node_ids[child.ref] child_proto.local_name = child.name for local_name, ref_function in ( self.checkpoint_view.list_functions(node).items()): child_proto = object_proto.children.add() child_proto.node_id = self.node_ids[ref_function] child_proto.local_name = local_name def map_resources(self): """Makes new resource handle ops corresponding to existing resource tensors. Creates resource handle ops in the current default graph, whereas `accessible_objects` will be from an eager context. Resource mapping adds resource handle ops to the main GraphDef of a SavedModel, which allows the C++ loader API to interact with variables. Returns: A tuple of (object_map, resource_map, asset_info): object_map: A dictionary mapping from object in `accessible_objects` to replacement objects created to hold the new resource tensors. resource_map: A dictionary mapping from resource tensors extracted from `accessible_objects` to newly created resource tensors. asset_info: An _AssetInfo tuple describing external assets referenced from accessible_objects. """ # Only makes sense when adding to the export Graph assert not context.executing_eagerly() # TODO(allenl): Handle MirroredVariables and other types of variables which # may need special casing. object_map = object_identity.ObjectIdentityDictionary() resource_map = {} asset_info = _AssetInfo( asset_defs=[], asset_initializers_by_resource={}, asset_filename_map={}, asset_index={}) for node_id, obj in enumerate(self.nodes): if isinstance(obj, tracking.CapturableResource): # pylint: disable=protected-access with ops.device(obj._resource_device): new_resource = obj._create_resource() # pylint: enable=protected-access resource_map[obj.resource_handle] = new_resource self.captured_tensor_node_ids[obj.resource_handle] = node_id elif ds_values.is_distributed_variable(obj): # Put both the distributed variable and component variable handles in # `captured_tensor_node_ids`. # Also create a new distributed variable for `object_map` with newly # created component variables. new_vars = [] for v in obj.values: new_variable = resource_variable_ops.copy_to_graph_uninitialized(v) object_map[v] = new_variable new_vars.append(new_variable) resource_map[v.handle] = new_variable.handle self.captured_tensor_node_ids[v.handle] = node_id object_map[obj] = obj._clone_with_new_values(new_vars) # pylint: disable=protected-access self.captured_tensor_node_ids[obj] = node_id elif resource_variable_ops.is_resource_variable(obj): new_variable = resource_variable_ops.copy_to_graph_uninitialized(obj) object_map[obj] = new_variable resource_map[obj.handle] = new_variable.handle self.captured_tensor_node_ids[obj.handle] = node_id elif isinstance(obj, tracking.TrackableAsset): _process_asset(obj, asset_info, resource_map) self.captured_tensor_node_ids[obj.asset_path] = node_id for concrete_function in self.concrete_functions: if not concrete_function.graph.saveable: raise ValueError( ("Unable to save function {name} for the following reason(s):\n" + "\n".join(concrete_function.graph.saving_errors)) .format(name=concrete_function.name)) for capture in concrete_function.captured_inputs: if (tensor_util.is_tensor(capture) and capture.dtype not in _UNCOPIABLE_DTYPES and capture not in self.captured_tensor_node_ids): capture_constant_value = tensor_util.constant_value(capture) if capture_constant_value is None: raise ValueError( ("Attempted to save a function {} which references a symbolic " "Tensor {} that is not a simple constant. This is not " "supported.").format(concrete_function.name, capture)) copied_tensor = constant_op.constant(capture_constant_value) node_id = len(self.nodes) node = _CapturedConstant( eager_tensor=capture, graph_tensor=copied_tensor) self.nodes.append(node) self.node_ids[capture] = node_id self.node_ids[node] = node_id self.captured_tensor_node_ids[capture] = node_id resource_map[capture] = copied_tensor return object_map, resource_map, asset_info def _tensor_dict_to_tensorinfo(tensor_dict): return {key: utils_impl.build_tensor_info_internal(value) for key, value in tensor_dict.items()} def _map_captures_to_created_tensors( original_captures, resource_map): """Maps eager tensors captured by a function to Graph resources for export. Args: original_captures: A dictionary mapping from tensors captured by the function to interior placeholders for those tensors (inside the function body). resource_map: A dictionary mapping from resource tensors owned by the eager context to resource tensors in the exported graph. Returns: A list of stand-in tensors which belong to the exported graph, corresponding to the function's captures. Raises: AssertionError: If the function references a resource which is not part of `resource_map`. """ export_captures = [] for exterior, interior in original_captures: mapped_resource = resource_map.get(exterior, None) if mapped_resource is None: raise AssertionError( ("Tried to export a function which references untracked object {}." "TensorFlow objects (e.g. tf.Variable) captured by functions must " "be tracked by assigning them to an attribute of a tracked object " "or assigned to an attribute of the main object directly.") .format(interior)) export_captures.append(mapped_resource) return export_captures def _map_function_arguments_to_created_inputs( function_arguments, signature_key, function_name): """Creates exterior placeholders in the exported graph for function arguments. Functions have two types of inputs: tensors captured from the outside (eager) context, and arguments to the function which we expect to receive from the user at each call. `_map_captures_to_created_tensors` replaces captured tensors with stand-ins (typically these are resource dtype tensors associated with variables). `_map_function_inputs_to_created_inputs` runs over every argument, creating a new placeholder for each which will belong to the exported graph rather than the function body. Args: function_arguments: A list of argument placeholders in the function body. signature_key: The name of the signature being exported, for error messages. function_name: The name of the function, for error messages. Returns: A tuple of (mapped_inputs, exterior_placeholders) mapped_inputs: A list with entries corresponding to `function_arguments` containing all of the inputs of the function gathered from the exported graph (both captured resources and arguments). exterior_argument_placeholders: A dictionary mapping from argument names to placeholders in the exported graph, containing the explicit arguments to the function which a user is expected to provide. Raises: ValueError: If argument names are not unique. """ # `exterior_argument_placeholders` holds placeholders which are outside the # function body, directly contained in a MetaGraph of the SavedModel. The # function body itself contains nearly identical placeholders used when # running the function, but these exterior placeholders allow Session-based # APIs to call the function using feeds and fetches which name Tensors in the # MetaGraph. exterior_argument_placeholders = {} mapped_inputs = [] for placeholder in function_arguments: # `export_captures` contains an exhaustive set of captures, so if we don't # find the input there then we now know we have an argument. user_input_name = compat.as_str_any( placeholder.op.get_attr("_user_specified_name")) # If the internal placeholders for a function have names which were # uniquified by TensorFlow, then a single user-specified argument name # must refer to multiple Tensors. The resulting signatures would be # confusing to call. Instead, we throw an exception telling the user to # specify explicit names. if user_input_name != placeholder.op.name: # This should be unreachable, since concrete functions may not be # generated with non-unique argument names. raise ValueError( ("Got non-flat/non-unique argument names for SavedModel " "signature '{}': more than one argument to '{}' was named '{}'. " "Signatures have one Tensor per named input, so to have " "predictable names Python functions used to generate these " "signatures should avoid *args and Tensors in nested " "structures unless unique names are specified for each. Use " "tf.TensorSpec(..., name=...) to provide a name for a Tensor " "input.") .format(signature_key, compat.as_str_any(function_name), user_input_name)) arg_placeholder = array_ops.placeholder( shape=placeholder.shape, dtype=placeholder.dtype, name="{}_{}".format(signature_key, user_input_name)) exterior_argument_placeholders[user_input_name] = arg_placeholder mapped_inputs.append(arg_placeholder) return mapped_inputs, exterior_argument_placeholders def _call_function_with_mapped_captures(function, args, resource_map): """Calls `function` in the exported graph, using mapped resource captures.""" export_captures = _map_captures_to_created_tensors( function.graph.captures, resource_map) # Calls the function quite directly, since we have new captured resource # tensors we need to feed in which weren't part of the original function # definition. # pylint: disable=protected-access outputs = function._call_flat(args, export_captures) # pylint: enable=protected-access return outputs def _generate_signatures(signature_functions, resource_map): """Validates and calls `signature_functions` in the default graph. Args: signature_functions: A dictionary mapping string keys to concrete TensorFlow functions (e.g. from `signature_serialization.canonicalize_signatures`) which will be used to generate SignatureDefs. resource_map: A dictionary mapping from resource tensors in the eager context to resource tensors in the Graph being exported. This dictionary is used to re-bind resources captured by functions to tensors which will exist in the SavedModel. Returns: Each function in the `signature_functions` dictionary is called with placeholder Tensors, generating a function call operation and output Tensors. The placeholder Tensors, the function call operation, and the output Tensors from the function call are part of the default Graph. This function then returns a dictionary with the same structure as `signature_functions`, with the concrete functions replaced by SignatureDefs implicitly containing information about how to call each function from a TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference the generated placeholders and Tensor outputs by name. The caller is expected to include the default Graph set while calling this function as a MetaGraph in a SavedModel, including the returned SignatureDefs as part of that MetaGraph. """ signatures = {} for signature_key, function in sorted(signature_functions.items()): if function.graph.captures: argument_inputs = function.graph.inputs[:-len(function.graph.captures)] else: argument_inputs = function.graph.inputs mapped_inputs, exterior_argument_placeholders = ( _map_function_arguments_to_created_inputs( argument_inputs, signature_key, function.name)) outputs = _call_function_with_mapped_captures( function, mapped_inputs, resource_map) signatures[signature_key] = signature_def_utils.build_signature_def( _tensor_dict_to_tensorinfo(exterior_argument_placeholders), _tensor_dict_to_tensorinfo(outputs), method_name=signature_constants.PREDICT_METHOD_NAME) return signatures def _trace_resource_initializers(accessible_objects): """Create concrete functions from `CapturableResource` objects.""" resource_initializers = [] def _wrap_initializer(obj): obj._initialize() # pylint: disable=protected-access return constant_op.constant(1.) # Dummy control output def _wrap_obj_initializer(obj): return lambda: _wrap_initializer(obj) for obj in accessible_objects: if isinstance(obj, tracking.CapturableResource): resource_initializers.append(def_function.function( _wrap_obj_initializer(obj), # All inputs are captures. input_signature=[]).get_concrete_function()) return resource_initializers _AssetInfo = collections.namedtuple( "_AssetInfo", [ # List of AssetFileDef protocol buffers "asset_defs", # Map from asset variable resource Tensors to their init ops "asset_initializers_by_resource", # Map from base asset filenames to full paths "asset_filename_map", # Map from TrackableAsset to index of corresponding AssetFileDef "asset_index"]) def _process_asset(trackable_asset, asset_info, resource_map): """Add `trackable_asset` to `asset_info` and `resource_map`.""" original_path_tensor = trackable_asset.asset_path original_path = tensor_util.constant_value(original_path_tensor) try: original_path = str(original_path.astype(str)) except AttributeError: # Already a string rather than a numpy array pass path = builder_impl.get_asset_filename_to_add( asset_filepath=original_path, asset_filename_map=asset_info.asset_filename_map) # TODO(andresp): Instead of mapping 1-1 between trackable asset # and asset in the graph def consider deduping the assets that # point to the same file. asset_path_initializer = array_ops.placeholder( shape=original_path_tensor.shape, dtype=dtypes.string, name="asset_path_initializer") asset_variable = resource_variable_ops.ResourceVariable( asset_path_initializer) asset_info.asset_filename_map[path] = original_path asset_def = meta_graph_pb2.AssetFileDef() asset_def.filename = path asset_def.tensor_info.name = asset_path_initializer.name asset_info.asset_defs.append(asset_def) asset_info.asset_initializers_by_resource[original_path_tensor] = ( asset_variable.initializer) asset_info.asset_index[trackable_asset] = len(asset_info.asset_defs) - 1 resource_map[original_path_tensor] = asset_variable def _fill_meta_graph_def(meta_graph_def, saveable_view, signature_functions): """Generates a MetaGraph which calls `signature_functions`. Args: meta_graph_def: The MetaGraphDef proto to fill. saveable_view: The _SaveableView being exported. signature_functions: A dictionary mapping signature keys to concrete functions containing signatures to add to the MetaGraph. Returns: An _AssetInfo, which contains information to help creating the SavedModel. """ # List objects from the eager context to make sure Optimizers give us the # right Graph-dependent variables. accessible_objects = saveable_view.nodes resource_initializer_functions = _trace_resource_initializers( accessible_objects) exported_graph = ops.Graph() resource_initializer_ops = [] with exported_graph.as_default(): object_map, resource_map, asset_info = saveable_view.map_resources() for resource_initializer_function in resource_initializer_functions: asset_dependencies = [] for capture in resource_initializer_function.graph.external_captures: asset_initializer = asset_info.asset_initializers_by_resource.get( capture, None) if asset_initializer is not None: asset_dependencies.append(asset_initializer) with ops.control_dependencies(asset_dependencies): resource_initializer_ops.append( _call_function_with_mapped_captures( resource_initializer_function, [], resource_map)) resource_initializer_ops.extend( asset_info.asset_initializers_by_resource.values()) with ops.control_dependencies(resource_initializer_ops): init_op = control_flow_ops.no_op() # Add the same op to the main_op collection and to the init_op # signature. The collection is for compatibility with older loader APIs; # only one will be executed. meta_graph_def.collection_def[constants.MAIN_OP_KEY].node_list.value.append( init_op.name) meta_graph_def.signature_def[constants.INIT_OP_SIGNATURE_KEY].CopyFrom( signature_def_utils.op_signature_def( init_op, constants.INIT_OP_SIGNATURE_KEY)) # Saving an object-based checkpoint again gathers variables. We need to do the # gathering from the eager context so Optimizers save the right set of # variables, but want any operations associated with the save/restore to be in # the exported graph (thus the `to_graph` argument). saver = functional_saver.MultiDeviceSaver( saveable_view.checkpoint_view.frozen_saveable_objects( object_map=object_map, to_graph=exported_graph)) with exported_graph.as_default(): signatures = _generate_signatures(signature_functions, resource_map) for concrete_function in saveable_view.concrete_functions: concrete_function.add_to_graph() saver_def = saver.to_proto() meta_graph_def.saver_def.CopyFrom(saver_def) graph_def = exported_graph.as_graph_def(add_shapes=True) meta_graph_def.graph_def.CopyFrom(graph_def) meta_graph_def.meta_info_def.tags.append(tag_constants.SERVING) meta_graph_def.meta_info_def.tensorflow_version = versions.__version__ meta_graph_def.meta_info_def.tensorflow_git_version = ( versions.__git_version__) # We currently always strip default attributes. meta_graph_def.meta_info_def.stripped_default_attrs = True meta_graph_def.meta_info_def.stripped_op_list.MergeFrom( meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def)) meta_graph_def.asset_file_def.extend(asset_info.asset_defs) for signature_key, signature in signatures.items(): meta_graph_def.signature_def[signature_key].CopyFrom(signature) meta_graph.strip_graph_default_valued_attrs(meta_graph_def) return asset_info, exported_graph def _serialize_object_graph(saveable_view, asset_file_def_index): """Save a SavedObjectGraph proto for `root`.""" # SavedObjectGraph is similar to the TrackableObjectGraph proto in the # checkpoint. It will eventually go into the SavedModel. proto = saved_object_graph_pb2.SavedObjectGraph() saveable_view.fill_object_graph_proto(proto) coder = nested_structure_coder.StructureCoder() for concrete_function in saveable_view.concrete_functions: serialized = function_serialization.serialize_concrete_function( concrete_function, saveable_view.captured_tensor_node_ids, coder) if serialized is not None: proto.concrete_functions[concrete_function.name].CopyFrom( serialized) for obj, obj_proto in zip(saveable_view.nodes, proto.nodes): _write_object_proto(obj, obj_proto, asset_file_def_index) return proto def _write_object_proto(obj, proto, asset_file_def_index): """Saves an object into SavedObject proto.""" if isinstance(obj, tracking.TrackableAsset): proto.asset.SetInParent() proto.asset.asset_file_def_index = asset_file_def_index[obj] elif resource_variable_ops.is_resource_variable(obj): proto.variable.SetInParent() if not obj.name.endswith(":0"): raise ValueError("Cowardly refusing to save variable %s because of" " unexpected suffix which won't be restored.") proto.variable.name = meta_graph._op_name(obj.name) # pylint: disable=protected-access proto.variable.trainable = obj.trainable proto.variable.dtype = obj.dtype.as_datatype_enum proto.variable.synchronization = obj.synchronization.value proto.variable.aggregation = obj.aggregation.value proto.variable.shape.CopyFrom(obj.shape.as_proto()) elif isinstance(obj, def_function.Function): proto.function.CopyFrom( function_serialization.serialize_function(obj)) elif isinstance(obj, defun.ConcreteFunction): proto.bare_concrete_function.CopyFrom( function_serialization.serialize_bare_concrete_function(obj)) elif isinstance(obj, _CapturedConstant): proto.constant.operation = obj.graph_tensor.op.name elif isinstance(obj, tracking.CapturableResource): proto.resource.device = obj._resource_device # pylint: disable=protected-access else: registered_type_proto = revived_types.serialize(obj) if registered_type_proto is None: # Fallback for types with no matching registration # pylint:disable=protected-access registered_type_proto = saved_object_graph_pb2.SavedUserObject( identifier=obj._object_identifier, version=versions_pb2.VersionDef( producer=1, min_consumer=1, bad_consumers=[]), metadata=obj._tracking_metadata) # pylint:enable=protected-access proto.user_object.CopyFrom(registered_type_proto) @tf_export("saved_model.save", v1=["saved_model.save", "saved_model.experimental.save"]) def save(obj, export_dir, signatures=None): # pylint: disable=line-too-long """Exports the Trackable object `obj` to [SavedModel format](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md). Example usage: ```python class Adder(tf.Module): @tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)]) def add(self, x): return x + x + 1. to_export = Adder() tf.saved_model.save(to_export, '/tmp/adder') ``` The resulting SavedModel is then servable with an input named "x", its value having any shape and dtype float32. The optional `signatures` argument controls which methods in `obj` will be available to programs which consume `SavedModel`s, for example serving APIs. Python functions may be decorated with `@tf.function(input_signature=...)` and passed as signatures directly, or lazily with a call to `get_concrete_function` on the method decorated with `@tf.function`. If the `signatures` argument is omitted, `obj` will be searched for `@tf.function`-decorated methods. If exactly one `@tf.function` is found, that method will be used as the default signature for the SavedModel. This behavior is expected to change in the future, when a corresponding `tf.saved_model.load` symbol is added. At that point signatures will be completely optional, and any `@tf.function` attached to `obj` or its dependencies will be exported for use with `load`. When invoking a signature in an exported SavedModel, `Tensor` arguments are identified by name. These names will come from the Python function's argument names by default. They may be overridden by specifying a `name=...` argument in the corresponding `tf.TensorSpec` object. Explicit naming is required if multiple `Tensor`s are passed through a single argument to the Python function. The outputs of functions used as `signatures` must either be flat lists, in which case outputs will be numbered, or a dictionary mapping string keys to `Tensor`, in which case the keys will be used to name outputs. Signatures are available in objects returned by `tf.saved_model.load` as a `.signatures` attribute. This is a reserved attribute: `tf.saved_model.save` on an object with a custom `.signatures` attribute will raise an exception. Since `tf.keras.Model` objects are also Trackable, this function can be used to export Keras models. For example, exporting with a signature specified: ```python class Model(tf.keras.Model): @tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)]) def serve(self, serialized): ... m = Model() tf.saved_model.save(m, '/tmp/saved_model/') ``` Exporting from a function without a fixed signature: ```python class Model(tf.keras.Model): @tf.function def call(self, x): ... m = Model() tf.saved_model.save( m, '/tmp/saved_model/', signatures=m.call.get_concrete_function( tf.TensorSpec(shape=[None, 3], dtype=tf.float32, name="inp"))) ``` `tf.keras.Model` instances constructed from inputs and outputs already have a signature and so do not require a `@tf.function` decorator or a `signatures` argument. If neither are specified, the model's forward pass is exported. ```python x = input_layer.Input((4,), name="x") y = core.Dense(5, name="out")(x) model = training.Model(x, y) tf.saved_model.save(model, '/tmp/saved_model/') # The exported SavedModel takes "x" with shape [None, 4] and returns "out" # with shape [None, 5] ``` Variables must be tracked by assigning them to an attribute of a tracked object or to an attribute of `obj` directly. TensorFlow objects (e.g. layers from `tf.keras.layers`, optimizers from `tf.train`) track their variables automatically. This is the same tracking scheme that `tf.train.Checkpoint` uses, and an exported `Checkpoint` object may be restored as a training checkpoint by pointing `tf.train.Checkpoint.restore` to the SavedModel's "variables/" subdirectory. Currently variables are the only stateful objects supported by `tf.saved_model.save`, but others (e.g. tables) will be supported in the future. `tf.function` does not hard-code device annotations from outside the function body, instead using the calling context's device. This means for example that exporting a model which runs on a GPU and serving it on a CPU will generally work, with some exceptions. `tf.device` annotations inside the body of the function will be hard-coded in the exported model; this type of annotation is discouraged. Device-specific operations, e.g. with "cuDNN" in the name or with device-specific layouts, may cause issues. Currently a `DistributionStrategy` is another exception: active distribution strategies will cause device placements to be hard-coded in a function. Exporting a single-device computation and importing under a `DistributionStrategy` is not currently supported, but may be in the future. SavedModels exported with `tf.saved_model.save` [strip default-valued attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes) automatically, which removes one source of incompatibilities when the consumer of a SavedModel is running an older TensorFlow version than the producer. There are however other sources of incompatibilities which are not handled automatically, such as when the exported model contains operations which the consumer does not have definitions for. Args: obj: A trackable object to export. export_dir: A directory in which to write the SavedModel. signatures: Optional, either a `tf.function` with an input signature specified or the result of `f.get_concrete_function` on a `@tf.function`-decorated function `f`, in which case `f` will be used to generate a signature for the SavedModel under the default serving signature key. `signatures` may also be a dictionary, in which case it maps from signature keys to either `tf.function` instances with input signatures or concrete functions. The keys of such a dictionary may be arbitrary strings, but will typically be from the `tf.saved_model.signature_constants` module. Raises: ValueError: If `obj` is not trackable. @compatibility(eager) Not well supported when graph building. From TensorFlow 1.x, `tf.compat.v1.enable_eager_execution()` should run first. Calling tf.saved_model.save in a loop when graph building from TensorFlow 1.x will add new save operations to the default graph each iteration. May not be called from within a function body. @end_compatibility """ if ops.inside_function(): raise AssertionError( "tf.saved_model.save is not supported inside a traced " "@tf.function. Move the call to the outer eagerly-executed " "context.") # pylint: enable=line-too-long if not isinstance(obj, base.Trackable): raise ValueError( "Expected a Trackable object for export, got {}.".format(obj)) checkpoint_graph_view = _AugmentedGraphView(obj) if signatures is None: signatures = signature_serialization.find_function_to_export( checkpoint_graph_view) signatures = signature_serialization.canonicalize_signatures(signatures) signature_serialization.validate_saveable_view(checkpoint_graph_view) signature_map = signature_serialization.create_signature_map(signatures) checkpoint_graph_view.add_object( parent_node=checkpoint_graph_view.root, name_in_parent=signature_serialization.SIGNATURE_ATTRIBUTE_NAME, subgraph_root=signature_map) # Use _SaveableView to provide a frozen listing of properties and functions. # Note we run this twice since, while constructing the view the first time # there can be side effects of creating variables. _ = _SaveableView(checkpoint_graph_view) saveable_view = _SaveableView(checkpoint_graph_view) # TODO(allenl): Factor out some subset of SavedModelBuilder which is 2.x # compatible (no sessions) and share it with this export API rather than # making a SavedModel proto and writing it directly. saved_model = saved_model_pb2.SavedModel() meta_graph_def = saved_model.meta_graphs.add() object_saver = util.TrackableSaver(checkpoint_graph_view) asset_info, exported_graph = _fill_meta_graph_def( meta_graph_def, saveable_view, signatures) saved_model.saved_model_schema_version = ( constants.SAVED_MODEL_SCHEMA_VERSION) # So far we've just been generating protocol buffers with no I/O. Now we write # the checkpoint, copy assets into the assets directory, and write out the # SavedModel proto itself. utils_impl.get_or_create_variables_dir(export_dir) object_saver.save(utils_impl.get_variables_path(export_dir)) builder_impl.copy_assets_to_destination_dir(asset_info.asset_filename_map, export_dir) path = os.path.join( compat.as_str(export_dir), compat.as_str(constants.SAVED_MODEL_FILENAME_PB)) object_graph_proto = _serialize_object_graph( saveable_view, asset_info.asset_index) meta_graph_def.object_graph_def.CopyFrom(object_graph_proto) file_io.atomic_write_string_to_file(path, saved_model.SerializeToString()) # Clean reference cycles so repeated export()s don't make work for the garbage # collector. Before this point we need to keep references to captured # constants in the saved graph. ops.dismantle_graph(exported_graph)
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/save.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common tags used for graphs in SavedModel. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.util.tf_export import tf_export # Tag for the `serving` graph. SERVING = "serve" tf_export( "saved_model.SERVING", v1=["saved_model.SERVING", "saved_model.tag_constants.SERVING"]).export_constant( __name__, "SERVING") # Tag for the `training` graph. TRAINING = "train" tf_export( "saved_model.TRAINING", v1=["saved_model.TRAINING", "saved_model.tag_constants.TRAINING"]).export_constant( __name__, "TRAINING") # Tag for the `eval` graph. Not exported while the export logic is in contrib. EVAL = "eval" # Tag for the `gpu` graph. GPU = "gpu" tf_export( "saved_model.GPU", v1=["saved_model.GPU", "saved_model.tag_constants.GPU"]).export_constant( __name__, "GPU") # Tag for the `tpu` graph. TPU = "tpu" tf_export( "saved_model.TPU", v1=["saved_model.TPU", "saved_model.tag_constants.TPU"]).export_constant( __name__, "TPU")
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/tag_constants.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SavedModel utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import types_pb2 from tensorflow.core.protobuf import struct_pb2 from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.platform import test from tensorflow.python.saved_model import nested_structure_coder from tensorflow.python.saved_model import utils class UtilsTest(test.TestCase): @test_util.run_v1_only("b/120545219") def testBuildTensorInfoOp(self): x = constant_op.constant(1, name="x") y = constant_op.constant(2, name="y") z = control_flow_ops.group([x, y], name="op_z") z_op_info = utils.build_tensor_info_from_op(z) self.assertEqual("op_z", z_op_info.name) self.assertEqual(types_pb2.DT_INVALID, z_op_info.dtype) self.assertEqual(0, len(z_op_info.tensor_shape.dim)) @test_util.run_v1_only("b/120545219") def testBuildTensorInfoDefunOp(self): @function.defun def my_init_fn(x, y): self.x_var = x self.y_var = y x = constant_op.constant(1, name="x") y = constant_op.constant(2, name="y") init_op_info = utils.build_tensor_info_from_op(my_init_fn(x, y)) self.assertEqual("PartitionedFunctionCall", init_op_info.name) self.assertEqual(types_pb2.DT_INVALID, init_op_info.dtype) self.assertEqual(0, len(init_op_info.tensor_shape.dim)) @test_util.run_v1_only("b/120545219") def testBuildTensorInfoDense(self): x = array_ops.placeholder(dtypes.float32, 1, name="x") x_tensor_info = utils.build_tensor_info(x) self.assertEqual("x:0", x_tensor_info.name) self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype) self.assertEqual(1, len(x_tensor_info.tensor_shape.dim)) self.assertEqual(1, x_tensor_info.tensor_shape.dim[0].size) @test_util.run_v1_only("b/120545219") def testBuildTensorInfoSparse(self): x = array_ops.sparse_placeholder(dtypes.float32, [42, 69], name="x") x_tensor_info = utils.build_tensor_info(x) self.assertEqual(x.values.name, x_tensor_info.coo_sparse.values_tensor_name) self.assertEqual(x.indices.name, x_tensor_info.coo_sparse.indices_tensor_name) self.assertEqual(x.dense_shape.name, x_tensor_info.coo_sparse.dense_shape_tensor_name) self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype) self.assertEqual(2, len(x_tensor_info.tensor_shape.dim)) self.assertEqual(42, x_tensor_info.tensor_shape.dim[0].size) self.assertEqual(69, x_tensor_info.tensor_shape.dim[1].size) @test_util.run_v1_only("b/120545219") def testBuildTensorInfoRagged(self): x = ragged_factory_ops.constant([[1, 2], [3]]) x_tensor_info = utils.build_tensor_info(x) # Check components self.assertEqual(x.values.name, x_tensor_info.composite_tensor.components[0].name) self.assertEqual(types_pb2.DT_INT32, x_tensor_info.composite_tensor.components[0].dtype) self.assertEqual(x.row_splits.name, x_tensor_info.composite_tensor.components[1].name) self.assertEqual(types_pb2.DT_INT64, x_tensor_info.composite_tensor.components[1].dtype) # Check type_spec. struct_coder = nested_structure_coder.StructureCoder() spec_proto = struct_pb2.StructuredValue( type_spec_value=x_tensor_info.composite_tensor.type_spec) spec = struct_coder.decode_proto(spec_proto) self.assertEqual(spec, x._type_spec) def testBuildTensorInfoEager(self): x = constant_op.constant(1, name="x") with context.eager_mode(), self.assertRaisesRegexp( RuntimeError, "build_tensor_info is not supported in Eager mode"): utils.build_tensor_info(x) @test_util.run_v1_only("b/120545219") def testGetTensorFromInfoDense(self): expected = array_ops.placeholder(dtypes.float32, 1, name="x") tensor_info = utils.build_tensor_info(expected) actual = utils.get_tensor_from_tensor_info(tensor_info) self.assertIsInstance(actual, ops.Tensor) self.assertEqual(expected.name, actual.name) @test_util.run_v1_only("b/120545219") def testGetTensorFromInfoSparse(self): expected = array_ops.sparse_placeholder(dtypes.float32, name="x") tensor_info = utils.build_tensor_info(expected) actual = utils.get_tensor_from_tensor_info(tensor_info) self.assertIsInstance(actual, sparse_tensor.SparseTensor) self.assertEqual(expected.values.name, actual.values.name) self.assertEqual(expected.indices.name, actual.indices.name) self.assertEqual(expected.dense_shape.name, actual.dense_shape.name) def testGetTensorFromInfoInOtherGraph(self): with ops.Graph().as_default() as expected_graph: expected = array_ops.placeholder(dtypes.float32, 1, name="right") tensor_info = utils.build_tensor_info(expected) with ops.Graph().as_default(): # Some other graph. array_ops.placeholder(dtypes.float32, 1, name="other") actual = utils.get_tensor_from_tensor_info(tensor_info, graph=expected_graph) self.assertIsInstance(actual, ops.Tensor) self.assertIs(actual.graph, expected_graph) self.assertEqual(expected.name, actual.name) def testGetTensorFromInfoInScope(self): # Build a TensorInfo with name "bar/x:0". with ops.Graph().as_default(): with ops.name_scope("bar"): unscoped = array_ops.placeholder(dtypes.float32, 1, name="x") tensor_info = utils.build_tensor_info(unscoped) self.assertEqual("bar/x:0", tensor_info.name) # Build a graph with node "foo/bar/x:0", akin to importing into scope foo. with ops.Graph().as_default(): with ops.name_scope("foo"): with ops.name_scope("bar"): expected = array_ops.placeholder(dtypes.float32, 1, name="x") self.assertEqual("foo/bar/x:0", expected.name) # Test that tensor is found by prepending the import scope. actual = utils.get_tensor_from_tensor_info(tensor_info, import_scope="foo") self.assertEqual(expected.name, actual.name) @test_util.run_v1_only("b/120545219") def testGetTensorFromInfoRaisesErrors(self): expected = array_ops.placeholder(dtypes.float32, 1, name="x") tensor_info = utils.build_tensor_info(expected) tensor_info.name = "blah:0" # Nonexistant name. with self.assertRaises(KeyError): utils.get_tensor_from_tensor_info(tensor_info) tensor_info.ClearField("name") # Malformed (missing encoding). with self.assertRaises(ValueError): utils.get_tensor_from_tensor_info(tensor_info) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/utils_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for revived type matching.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import versions_pb2 from tensorflow.core.protobuf import saved_object_graph_pb2 from tensorflow.python.platform import test from tensorflow.python.saved_model import revived_types from tensorflow.python.training.tracking import tracking class CustomTestClass(tracking.AutoTrackable): def __init__(self, version): self.version = version revived_types.register_revived_type( "test_type", lambda obj: isinstance(obj, CustomTestClass), versions=[ revived_types.VersionedTypeRegistration( object_factory=lambda _: CustomTestClass(1), version=1, min_producer_version=1, min_consumer_version=1), revived_types.VersionedTypeRegistration( object_factory=lambda _: CustomTestClass(2), version=2, min_producer_version=2, min_consumer_version=1), revived_types.VersionedTypeRegistration( object_factory=lambda _: CustomTestClass(3), version=3, min_producer_version=3, min_consumer_version=2), revived_types.VersionedTypeRegistration( object_factory=lambda _: CustomTestClass(4), version=4, min_producer_version=4, min_consumer_version=2, bad_consumers=[3]), ] ) class RegistrationMatchingTest(test.TestCase): def test_save_typecheck(self): self.assertIs(revived_types.serialize(tracking.AutoTrackable()), None) def test_load_identifier_not_found(self): nothing_matches = revived_types.deserialize( saved_object_graph_pb2.SavedUserObject( identifier="_unregistered_type", version=versions_pb2.VersionDef( producer=1, min_consumer=1, bad_consumers=[]))) self.assertIs(nothing_matches, None) def test_most_recent_version_saved(self): serialized = revived_types.serialize(CustomTestClass(None)) self.assertEqual([3], serialized.version.bad_consumers) deserialized, _ = revived_types.deserialize(serialized) self.assertIsInstance(deserialized, CustomTestClass) self.assertEqual(4, deserialized.version) def test_min_consumer_version(self): nothing_matches = revived_types.deserialize( saved_object_graph_pb2.SavedUserObject( identifier="test_type", version=versions_pb2.VersionDef( producer=5, min_consumer=5, bad_consumers=[]))) self.assertIs(nothing_matches, None) def test_bad_versions(self): deserialized, _ = revived_types.deserialize( saved_object_graph_pb2.SavedUserObject( identifier="test_type", version=versions_pb2.VersionDef( producer=5, min_consumer=1, bad_consumers=[4, 3]))) self.assertEqual(2, deserialized.version) def test_min_producer_version(self): deserialized, _ = revived_types.deserialize( saved_object_graph_pb2.SavedUserObject( identifier="test_type", version=versions_pb2.VersionDef( producer=3, min_consumer=0, bad_consumers=[]))) self.assertEqual(3, deserialized.version) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/revived_types_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for nested structure coding.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from google.protobuf import text_format from tensorflow.core.protobuf import struct_pb2 from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import test from tensorflow.python.saved_model import nested_structure_coder class NestedStructureTest(test.TestCase): def setUp(self): self._coder = nested_structure_coder.StructureCoder() def testEncodeDecodeList(self): structure = [1.5, 2.5, 3.0] self.assertTrue(self._coder.can_encode(structure)) encoded = self._coder.encode_structure(structure) expected = struct_pb2.StructuredValue() expected.list_value.values.add().float64_value = 1.5 expected.list_value.values.add().float64_value = 2.5 expected.list_value.values.add().float64_value = 3.0 self.assertEqual(expected, encoded) decoded = self._coder.decode_proto(encoded) self.assertEqual(structure, decoded) def testEncodeDecodeTuple(self): structure = ("hello", [3, (2, 1)]) self.assertTrue(self._coder.can_encode(structure)) encoded = self._coder.encode_structure(structure) expected = struct_pb2.StructuredValue() expected.tuple_value.values.add().string_value = "hello" list_value = expected.tuple_value.values.add().list_value list_value.values.add().int64_value = 3 tuple_value = list_value.values.add().tuple_value tuple_value.values.add().int64_value = 2 tuple_value.values.add().int64_value = 1 self.assertEqual(expected, encoded) decoded = self._coder.decode_proto(encoded) self.assertEqual(structure, decoded) def testEncodeDecodeDict(self): structure = dict(a=3, b=[7, 2.5]) self.assertTrue(self._coder.can_encode(structure)) encoded = self._coder.encode_structure(structure) expected = struct_pb2.StructuredValue() expected.dict_value.fields["a"].int64_value = 3 list_value = expected.dict_value.fields["b"].list_value list_value.values.add().int64_value = 7 list_value.values.add().float64_value = 2.5 self.assertEqual(expected, encoded) decoded = self._coder.decode_proto(encoded) self.assertIsInstance(decoded["a"], int) self.assertEqual(structure, decoded) def testEncodeDecodeTensorShape(self): structure = [tensor_shape.TensorShape([1, 2, 3]), "hello"] self.assertTrue(self._coder.can_encode(structure)) encoded = self._coder.encode_structure(structure) expected = struct_pb2.StructuredValue() expected_list = expected.list_value expected_tensor_shape = expected_list.values.add().tensor_shape_value expected_tensor_shape.dim.add().size = 1 expected_tensor_shape.dim.add().size = 2 expected_tensor_shape.dim.add().size = 3 expected_tensor_shape = expected_list.values.add().string_value = "hello" self.assertEqual(expected, encoded) decoded = self._coder.decode_proto(encoded) self.assertEqual(structure, decoded) def testEncodeDecodeNamedTuple(self): named_tuple_type = collections.namedtuple("NamedTuple", ["x", "y"]) named_tuple = named_tuple_type(x=[1, 2], y="hello") self.assertTrue(self._coder.can_encode(named_tuple)) encoded = self._coder.encode_structure(named_tuple) expected = struct_pb2.StructuredValue() expected_named_tuple = expected.named_tuple_value expected_named_tuple.name = "NamedTuple" key_value_pair = expected_named_tuple.values.add() key_value_pair.key = "x" list_value = key_value_pair.value.list_value list_value.values.add().int64_value = 1 list_value.values.add().int64_value = 2 key_value_pair = expected_named_tuple.values.add() key_value_pair.key = "y" key_value_pair.value.string_value = "hello" self.assertEqual(expected, encoded) decoded = self._coder.decode_proto(encoded) self.assertEqual(named_tuple._asdict(), decoded._asdict()) self.assertEqual(named_tuple.__class__.__name__, decoded.__class__.__name__) def testNone(self): structure = [1.0, None] self.assertTrue(self._coder.can_encode(structure)) encoded = self._coder.encode_structure(structure) expected = struct_pb2.StructuredValue() expected.list_value.values.add().float64_value = 1.0 expected.list_value.values.add().none_value.CopyFrom(struct_pb2.NoneValue()) self.assertEqual(expected, encoded) decoded = self._coder.decode_proto(encoded) self.assertEqual(structure, decoded) def testBool(self): structure = [False] self.assertTrue(self._coder.can_encode(structure)) encoded = self._coder.encode_structure(structure) expected = struct_pb2.StructuredValue() expected.list_value.values.add().bool_value = False self.assertEqual(expected, encoded) decoded = self._coder.decode_proto(encoded) self.assertEqual(structure, decoded) def testEmptyStructures(self): structure = [list(), dict(), tuple()] self.assertTrue(self._coder.can_encode(structure)) encoded = self._coder.encode_structure(structure) expected = struct_pb2.StructuredValue() expected.list_value.values.add().list_value.CopyFrom(struct_pb2.ListValue()) expected.list_value.values.add().dict_value.CopyFrom(struct_pb2.DictValue()) expected.list_value.values.add().tuple_value.CopyFrom( struct_pb2.TupleValue()) self.assertEqual(expected, encoded) decoded = self._coder.decode_proto(encoded) self.assertEqual(structure, decoded) def testDtype(self): structure = [dtypes.int64] self.assertTrue(self._coder.can_encode(structure)) encoded = self._coder.encode_structure(structure) expected = struct_pb2.StructuredValue() list_value = expected.list_value.values.add() list_value.tensor_dtype_value = dtypes.int64.as_datatype_enum self.assertEqual(expected, encoded) decoded = self._coder.decode_proto(encoded) self.assertEqual(structure, decoded) def testEncodeDecodeTensorSpec(self): structure = [tensor_spec.TensorSpec([1, 2, 3], dtypes.int64, "hello")] self.assertTrue(self._coder.can_encode(structure)) encoded = self._coder.encode_structure(structure) expected = struct_pb2.StructuredValue() expected_list = expected.list_value expected_tensor_spec = expected_list.values.add().tensor_spec_value expected_tensor_spec.shape.dim.add().size = 1 expected_tensor_spec.shape.dim.add().size = 2 expected_tensor_spec.shape.dim.add().size = 3 expected_tensor_spec.name = "hello" expected_tensor_spec.dtype = dtypes.int64.as_datatype_enum self.assertEqual(expected, encoded) decoded = self._coder.decode_proto(encoded) self.assertEqual(structure, decoded) def testEncodeDecodeTensorSpecWithNoName(self): structure = [tensor_spec.TensorSpec([1, 2, 3], dtypes.int64)] self.assertTrue(self._coder.can_encode(structure)) encoded = self._coder.encode_structure(structure) expected = struct_pb2.StructuredValue() expected_list = expected.list_value expected_tensor_spec = expected_list.values.add().tensor_spec_value expected_tensor_spec.shape.dim.add().size = 1 expected_tensor_spec.shape.dim.add().size = 2 expected_tensor_spec.shape.dim.add().size = 3 expected_tensor_spec.name = "" expected_tensor_spec.dtype = dtypes.int64.as_datatype_enum self.assertEqual(expected, encoded) decoded = self._coder.decode_proto(encoded) self.assertEqual(structure, decoded) def testEncodeDecodeRaggedTensorSpec(self): structure = [ragged_tensor.RaggedTensorSpec( [1, 2, 3], dtypes.int64, 2, dtypes.int32)] self.assertTrue(self._coder.can_encode(structure)) encoded = self._coder.encode_structure(structure) expected_pbtxt = r""" list_value { values { type_spec_value { type_spec_class: RAGGED_TENSOR_SPEC type_spec_class_name: 'RaggedTensorSpec' type_state { tuple_value { # spec._shape values { tensor_shape_value { dim { size: 1 } dim { size: 2 } dim { size: 3 } } } # spec._dtype values { tensor_dtype_value: DT_INT64 } # spec._ragged_rank values { int64_value: 2 } # spec._row_splits_dtype values { tensor_dtype_value: DT_INT32 } } } } } } """ expected = struct_pb2.StructuredValue() text_format.Parse(expected_pbtxt, expected) self.assertEqual(expected, encoded) decoded = self._coder.decode_proto(encoded) self.assertEqual(structure, decoded) def testEncodeDecodeSparseTensorSpec(self): structure = [sparse_tensor.SparseTensorSpec([10, 20], dtypes.float32)] self.assertTrue(self._coder.can_encode(structure)) encoded = self._coder.encode_structure(structure) expected_pbtxt = r""" list_value { values { type_spec_value { type_spec_class: SPARSE_TENSOR_SPEC type_spec_class_name: 'SparseTensorSpec' type_state { tuple_value { # spec._shape values { tensor_shape_value { dim { size: 10 } dim { size: 20 } } } # spec._dtype values { tensor_dtype_value: DT_FLOAT } } } } } } """ expected = struct_pb2.StructuredValue() text_format.Parse(expected_pbtxt, expected) self.assertEqual(expected, encoded) decoded = self._coder.decode_proto(encoded) self.assertEqual(structure, decoded) def testDecodeUnknownTensorSpec(self): encoded = struct_pb2.StructuredValue() encoded.type_spec_value.type_spec_class = 0 encoded.type_spec_value.type_spec_class_name = "FutureTensorSpec" with self.assertRaisesRegexp( ValueError, "The type 'FutureTensorSpec' is not supported"): self._coder.decode_proto(encoded) def testEncodeDataSetSpec(self): structure = [dataset_ops.DatasetSpec( {"rt": ragged_tensor.RaggedTensorSpec([10, None], dtypes.int32), "st": sparse_tensor.SparseTensorSpec([10, 20], dtypes.float32), "t": tensor_spec.TensorSpec([10, 8], dtypes.string)})] self.assertTrue(self._coder.can_encode(structure)) encoded = self._coder.encode_structure(structure) decoded = self._coder.decode_proto(encoded) self.assertEqual(structure, decoded) def testNotEncodable(self): class NotEncodable(object): pass self.assertFalse(self._coder.can_encode([NotEncodable()])) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/nested_structure_coder_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SavedModel builder implementation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import os from google.protobuf.any_pb2 import Any from tensorflow.core.framework import types_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.core.protobuf import saved_model_pb2 from tensorflow.core.protobuf import saver_pb2 from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.lib.io import file_io from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging from tensorflow.python.saved_model import constants from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.saved_model import utils_impl as saved_model_utils from tensorflow.python.training import saver as tf_saver from tensorflow.python.util import compat from tensorflow.python.util.deprecation import deprecated_args from tensorflow.python.util.tf_export import tf_export # Base class for the SavedModelBuilder that is only used by Tensorflow # internally. Please use tf.compat.v1.saved_model.SavedModelBuilder instead. class _SavedModelBuilder(object): """Builds the `SavedModel` protocol buffer and saves variables and assets. The `SavedModelBuilder` class provides functionality to build a `SavedModel` protocol buffer. Specifically, this allows multiple meta graphs to be saved as part of a single language-neutral `SavedModel`, while sharing variables and assets. To build a SavedModel, the first meta graph must be saved with variables. Subsequent meta graphs will simply be saved with their graph definitions. If assets need to be saved and written or copied to disk, they can be provided when the meta graph def is added. If multiple meta graph defs are associated an asset of the same name, only the first version is retained. Each meta graph added to the SavedModel must be annotated with tags. The tags provide a means to identify the specific meta graph to load and restore, along with the shared set of variables and assets. Typical usage for the `SavedModelBuilder`: ```python ... builder = tf.compat.v1.saved_model.Builder(export_dir) with tf.compat.v1.Session(graph=tf.Graph()) as sess: ... builder.add_meta_graph_and_variables(sess, ["foo-tag"], signature_def_map=foo_signatures, assets_list=foo_assets) ... with tf.compat.v1.Session(graph=tf.Graph()) as sess: ... builder.add_meta_graph(["bar-tag", "baz-tag"]) ... builder.save() ``` Note: This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.builder.SavedModelBuilder or tf.compat.v1.saved_model.Builder. Tensorflow 2.0 will introduce a new object-based method of creating SavedModels. """ def __init__(self, export_dir): self._saved_model = saved_model_pb2.SavedModel() self._saved_model.saved_model_schema_version = ( constants.SAVED_MODEL_SCHEMA_VERSION) self._export_dir = export_dir if file_io.file_exists(export_dir): if file_io.list_directory(export_dir): raise AssertionError( "Export directory already exists, and isn't empty. Please choose " "a different export directory, or delete all the contents of the " "specified directory: %s" % export_dir) else: file_io.recursive_create_dir(self._export_dir) # Boolean to track whether variables and assets corresponding to the # SavedModel have been saved. Specifically, the first meta graph to be added # MUST use the add_meta_graph_and_variables() API. Subsequent add operations # on the SavedModel MUST use the add_meta_graph() API which does not save # weights. self._has_saved_variables = False def _save_and_write_assets(self, meta_graph_def, assets_list=None): """Saves asset to the meta graph and writes asset files to disk. Args: meta_graph_def: The meta graph def to which the assets will be added. assets_list: The list where the asset paths are setup. """ # Creates a function that adds assets into the meta graph def. write_fn = functools.partial(_add_asset_to_metagraph, meta_graph_def) asset_filename_map = _maybe_save_assets(write_fn, assets_list) # Return if there are no assets to write. if not asset_filename_map: tf_logging.info("No assets to write.") return # Copy assets from source path to destination path. copy_assets_to_destination_dir(asset_filename_map, self._export_dir) def _tag_and_add_meta_graph(self, meta_graph_def, tags, signature_def_map): """Tags the meta graph def and adds it to the SavedModel. Tags the meta graph def with the supplied tags, adds signature defs to it if provided and appends the meta graph def to the SavedModel proto. Args: meta_graph_def: The meta graph def to add to the SavedModel. tags: The set of tags to annotate the meta graph def with. signature_def_map: The map of signature defs to be added to the meta graph def. """ for tag in tags: meta_graph_def.meta_info_def.tags.append(tag) if signature_def_map is not None: for key in signature_def_map: meta_graph_def.signature_def[key].CopyFrom(signature_def_map[key]) proto_meta_graph_def = self._saved_model.meta_graphs.add() proto_meta_graph_def.CopyFrom(meta_graph_def) def _validate_tensor_info(self, tensor_info): """Validates the `TensorInfo` proto. Checks if the `encoding` (`name` or `coo_sparse` or `type_spec`) and `dtype` fields exist and are non-empty. Args: tensor_info: `TensorInfo` protocol buffer to validate. Raises: AssertionError: If the `encoding` or `dtype` fields of the supplied `TensorInfo` proto are not populated. """ if tensor_info is None: raise AssertionError( "All TensorInfo protos used in the SignatureDefs must have the name " "and dtype fields set.") if tensor_info.WhichOneof("encoding") is None: # TODO(soergel) validate each of the fields of coo_sparse raise AssertionError( "All TensorInfo protos used in the SignatureDefs must have one of " "the 'encoding' fields (e.g., name or coo_sparse) set: %s" % tensor_info) if tensor_info.WhichOneof("encoding") == "composite_tensor": for component in tensor_info.composite_tensor.components: self._validate_tensor_info(component) elif tensor_info.dtype == types_pb2.DT_INVALID: raise AssertionError( "All TensorInfo protos used in the SignatureDefs must have the dtype " "field set: %s" % tensor_info) def _validate_signature_def_map(self, signature_def_map): """Validates the `SignatureDef` entries in the signature def map. Validation of entries in the signature def map includes ensuring that the `name` and `dtype` fields of the TensorInfo protos of the `inputs` and `outputs` of each `SignatureDef` are populated. Also ensures that reserved SigantureDef keys for the initialization and train ops are not used. Args: signature_def_map: The map of signature defs to be validated. Raises: AssertionError: If a TensorInfo is not valid. KeyError: If a reserved signature key is used in the map. """ for signature_def_key in signature_def_map: signature_def = signature_def_map[signature_def_key] inputs = signature_def.inputs outputs = signature_def.outputs for inputs_key in inputs: self._validate_tensor_info(inputs[inputs_key]) for outputs_key in outputs: self._validate_tensor_info(outputs[outputs_key]) if constants.INIT_OP_SIGNATURE_KEY in signature_def_map: raise KeyError( "SignatureDef map key \"{}\" is reserved for initialization. Please " "use a different key.".format(constants.INIT_OP_SIGNATURE_KEY)) if constants.TRAIN_OP_SIGNATURE_KEY in signature_def_map: raise KeyError( "SignatureDef map key \"{}\" is reserved for the train op. Please " "use a different key.".format(constants.TRAIN_OP_SIGNATURE_KEY)) def _maybe_create_saver(self, saver=None): """Creates a sharded saver if one does not already exist.""" if not saver: # Initialize a saver to generate a sharded output for all saveables in the # current scope. saver = tf_saver.Saver( variables._all_saveable_objects(), # pylint: disable=protected-access sharded=True, write_version=saver_pb2.SaverDef.V2, allow_empty=True) return saver def add_meta_graph(self, tags, signature_def_map=None, assets_list=None, clear_devices=False, init_op=None, train_op=None, saver=None): """Adds the current meta graph to the SavedModel. Creates a Saver in the current scope and uses the Saver to export the meta graph def. Invoking this API requires the `add_meta_graph_and_variables()` API to have been invoked before. Args: tags: The set of tags to annotate the meta graph def with. signature_def_map: The map of signature defs to be added to the meta graph def. assets_list: Assets to be saved with SavedModel. Note that this list should be a subset of the assets saved as part of the first meta graph in the SavedModel. clear_devices: Set to true if the device info on the default graph should be cleared. init_op: Op or group of ops to execute when the graph is loaded. Note that when the init_op is specified it is run after the restore op at load-time. train_op: Op or group of opts that trains the model when run. This will not be run automatically when the graph is loaded, instead saved in a SignatureDef accessible through the exported MetaGraph. saver: An instance of tf.compat.v1.train.Saver that will be used to export the metagraph. If None, a sharded Saver that restores all variables will be used. Raises: AssertionError: If the variables for the SavedModel have not been saved yet, or if the graph already contains one or more legacy init ops. """ if not self._has_saved_variables: raise AssertionError( "Graph state including variables and assets has not been saved yet. " "Please invoke `add_meta_graph_and_variables()` first.") # Validate the signature def map to ensure all included TensorInfos are # properly populated. signature_def_map = signature_def_map or {} self._validate_signature_def_map(signature_def_map) # Create a SignatureDef pointing to the graph initialization op, which will # be added to the MetaGraphDef. _add_op_to_signature_def_map(signature_def_map, init_op, constants.INIT_OP_SIGNATURE_KEY) _add_op_to_signature_def_map(signature_def_map, train_op, constants.TRAIN_OP_SIGNATURE_KEY) saver = self._maybe_create_saver(saver) # The graph almost certainly previously contained at least one Saver, and # possibly several (e.g. one for loading a pretrained embedding, and another # for the model weights). Removing the preexisting ones was the # motivation for the clear_extraneous_savers option, but it turns out that # there are edge cases where that option breaks the graph. Until that is # resolved, we just leave the option set to False for now. # TODO(soergel): Reinstate clear_extraneous_savers=True when possible. meta_graph_def = saver.export_meta_graph( clear_devices=clear_devices, strip_default_attrs=True) # Save asset files and write them to disk, if any. self._save_and_write_assets(meta_graph_def, assets_list) # Tag the meta graph def and add it to the SavedModel. self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map) def add_meta_graph_and_variables(self, sess, tags, signature_def_map=None, assets_list=None, clear_devices=False, init_op=None, train_op=None, strip_default_attrs=False, saver=None): # pylint: disable=line-too-long """Adds the current meta graph to the SavedModel and saves variables. Creates a Saver to save the variables from the provided session. Exports the corresponding meta graph def. This function assumes that the variables to be saved have been initialized. For a given `SavedModelBuilder`, this API must be called exactly once and for the first meta graph to save. For subsequent meta graph defs to be added, the `add_meta_graph()` API must be used. Args: sess: The TensorFlow session from which to save the meta graph and variables. tags: The set of tags with which to save the meta graph. signature_def_map: The map of signature def map to add to the meta graph def. assets_list: Assets to be saved with SavedModel. clear_devices: Set to true if the device info on the default graph should be cleared. init_op: Op or group of ops to execute when the graph is loaded. Note that when the init_op is specified it is run after the restore op at load-time. train_op: Op or group of ops that trains the model when run. This will not be run automatically when the graph is loaded, instead saved in a SignatureDef accessible through the exported MetaGraph. strip_default_attrs: Boolean. If `True`, default-valued attributes will be removed from the NodeDefs. For a detailed guide, see [Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes). saver: An instance of tf.compat.v1.train.Saver that will be used to export the metagraph and save variables. If None, a sharded Saver that restores all variables will be used. """ # pylint: enable=line-too-long if self._has_saved_variables: raise AssertionError("Graph state including variables and assets has " "already been saved. Please invoke " "`add_meta_graph()` instead.") # Validate the signature def map to ensure all included TensorInfos are # properly populated. signature_def_map = signature_def_map or {} self._validate_signature_def_map(signature_def_map) # Create a SignatureDef pointing to the graph initialization op, which will # be added to the MetaGraphDef. _add_op_to_signature_def_map(signature_def_map, init_op, constants.INIT_OP_SIGNATURE_KEY) _add_op_to_signature_def_map(signature_def_map, train_op, constants.TRAIN_OP_SIGNATURE_KEY) saved_model_utils.get_or_create_variables_dir(self._export_dir) variables_path = saved_model_utils.get_variables_path(self._export_dir) saver = self._maybe_create_saver(saver) # Save the variables. Also, disable writing the checkpoint state proto. The # file is not used during SavedModel loading. In addition, since a # SavedModel can be copied or moved, this avoids the checkpoint state to # become outdated. saver.save(sess, variables_path, write_meta_graph=False, write_state=False) # Export the meta graph def. # The graph almost certainly previously contained at least one Saver, and # possibly several (e.g. one for loading a pretrained embedding, and another # for the model weights). Removing the preexisting ones was the # motivation for the clear_extraneous_savers option, but it turns out that # there are edge cases where that option breaks the graph. Until that is # resolved, we just leave the option set to False for now. # TODO(soergel): Reinstate clear_extraneous_savers=True when possible. meta_graph_def = saver.export_meta_graph( clear_devices=clear_devices, strip_default_attrs=strip_default_attrs) # Save asset files and write them to disk, if any. self._save_and_write_assets(meta_graph_def, assets_list) # Tag the meta graph def and add it to the SavedModel. self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map) # Mark this instance of SavedModel as having saved variables, such that # subsequent attempts to save variables will fail. self._has_saved_variables = True def save(self, as_text=False): """Writes a `SavedModel` protocol buffer to disk. The function writes the SavedModel protocol buffer to the export directory in serialized format. Args: as_text: Writes the SavedModel protocol buffer in text format to disk. Protocol buffers in text format are useful for debugging, but parsing fails when it encounters an unknown field and so is not forward compatible. This means changes to TensorFlow may prevent deployment of new text format SavedModels to existing serving binaries. Do not deploy `as_text` SavedModels to production. Returns: The path to which the SavedModel protocol buffer was written. """ if not file_io.file_exists(self._export_dir): file_io.recursive_create_dir(self._export_dir) if as_text: path = os.path.join( compat.as_bytes(self._export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT)) file_io.write_string_to_file(path, str(self._saved_model)) else: path = os.path.join( compat.as_bytes(self._export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB)) file_io.write_string_to_file(path, self._saved_model.SerializeToString()) tf_logging.info("SavedModel written to: %s", compat.as_text(path)) return path @tf_export(v1=["saved_model.Builder", "saved_model.builder.SavedModelBuilder"]) # pylint: disable=missing-docstring class SavedModelBuilder(_SavedModelBuilder): __doc__ = _SavedModelBuilder.__doc__.replace("assets_list", "assets_collection") def __init__(self, export_dir): super(SavedModelBuilder, self).__init__(export_dir=export_dir) def _add_collections(self, assets_collection, main_op, train_op): """Add asset and op collections to be saved.""" # Save asset files and write them to disk, if any. self._save_and_write_assets(assets_collection) self._maybe_add_main_op(main_op) self._add_train_op(train_op) def _save_and_write_assets(self, assets_collection_to_add=None): """Saves asset to the meta graph and writes asset files to disk. Args: assets_collection_to_add: The collection where the asset paths are setup. """ # Add assets to the collection with key `saved_model.ASSETS_KEY`, in the # graph. asset_filename_map = _maybe_save_assets(_add_asset_to_collection, assets_collection_to_add) # Return if there are no assets to write. if not asset_filename_map: tf_logging.info("No assets to write.") return # Copy assets from source path to destination path. copy_assets_to_destination_dir(asset_filename_map, self._export_dir) def _maybe_add_main_op(self, main_op): """Adds main op to the SavedModel. Args: main_op: Main op to run as part of graph initialization. If None, no main op will be added to the graph. Raises: TypeError: if main op is provided but is not of type `Operation`. ValueError: if the Graph already contains an init op. """ if main_op is None: return if not isinstance(main_op, ops.Operation): raise TypeError("main_op needs to be an Operation: %r" % main_op) # Validate that no other init ops have been added to this graph already. # We check main_op and legacy_init_op for thoroughness and explicitness. for init_op_key in (constants.MAIN_OP_KEY, constants.LEGACY_INIT_OP_KEY): if ops.get_collection(init_op_key): raise ValueError( "Graph already contains one or more main ops under the " "collection {}.".format(init_op_key)) ops.add_to_collection(constants.MAIN_OP_KEY, main_op) def _add_train_op(self, train_op): """Add train op to the SavedModel. Note that this functionality is in development, and liable to be moved elsewhere. Args: train_op: Op or group of ops that are used for training. These are stored as a collection with key TRAIN_OP_KEY, but not executed. Raises: TypeError if Train op is not of type `Operation`. """ if train_op is not None: if (not isinstance(train_op, ops.Tensor) and not isinstance(train_op, ops.Operation)): raise TypeError("train_op needs to be a Tensor or Op: %r" % train_op) ops.add_to_collection(constants.TRAIN_OP_KEY, train_op) @deprecated_args(None, "Pass your op to the equivalent parameter main_op instead.", "legacy_init_op") def add_meta_graph(self, tags, signature_def_map=None, assets_collection=None, legacy_init_op=None, clear_devices=False, main_op=None, strip_default_attrs=False, saver=None): if not self._has_saved_variables: raise AssertionError( "Graph state including variables and assets has not been saved yet. " "Please invoke `add_meta_graph_and_variables()` first.") # Validate the signature def map to ensure all included TensorInfos are # properly populated. signature_def_map = signature_def_map or {} self._validate_signature_def_map(signature_def_map) # legacy_init_op is deprecated, and going away in TF 2.0. # Re-mapping to main_op, as treatment is identical regardless. main_op = main_op or legacy_init_op # Add assets and ops self._add_collections(assets_collection, main_op, None) saver = self._maybe_create_saver(saver) # The graph almost certainly previously contained at least one Saver, and # possibly several (e.g. one for loading a pretrained embedding, and another # for the model weights). Removing the preexisting ones was the # motivation for the clear_extraneous_savers option, but it turns out that # there are edge cases where that option breaks the graph. Until that is # resolved, we just leave the option set to False for now. # TODO(soergel): Reinstate clear_extraneous_savers=True when possible. meta_graph_def = saver.export_meta_graph( clear_devices=clear_devices, strip_default_attrs=strip_default_attrs) # Tag the meta graph def and add it to the SavedModel. self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map) @deprecated_args(None, "Pass your op to the equivalent parameter main_op instead.", "legacy_init_op") def add_meta_graph_and_variables(self, sess, tags, signature_def_map=None, assets_collection=None, legacy_init_op=None, clear_devices=False, main_op=None, strip_default_attrs=False, saver=None): if self._has_saved_variables: raise AssertionError("Graph state including variables and assets has " "already been saved. Please invoke " "`add_meta_graph()` instead.") # Validate the signature def map to ensure all included TensorInfos are # properly populated. signature_def_map = signature_def_map or {} self._validate_signature_def_map(signature_def_map) # legacy_init_op is deprecated, and going away in TF 2.0. # Re-mapping to main_op, as treatment is identical regardless. main_op = main_op or legacy_init_op # Add assets and ops self._add_collections(assets_collection, main_op, None) saved_model_utils.get_or_create_variables_dir(self._export_dir) variables_path = saved_model_utils.get_variables_path(self._export_dir) saver = self._maybe_create_saver(saver) # Save the variables. Also, disable writing the checkpoint state proto. The # file is not used during SavedModel loading. In addition, since a # SavedModel can be copied or moved, this avoids the checkpoint state to # become outdated. saver.save(sess, variables_path, write_meta_graph=False, write_state=False) # Export the meta graph def. # The graph almost certainly previously contained at least one Saver, and # possibly several (e.g. one for loading a pretrained embedding, and another # for the model weights). Removing the preexisting ones was the # motivation for the clear_extraneous_savers option, but it turns out that # there are edge cases where that option breaks the graph. Until that is # resolved, we just leave the option set to False for now. # TODO(soergel): Reinstate clear_extraneous_savers=True when possible. meta_graph_def = saver.export_meta_graph( clear_devices=clear_devices, strip_default_attrs=strip_default_attrs) # Tag the meta graph def and add it to the SavedModel. self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map) # Mark this instance of SavedModel as having saved variables, such that # subsequent attempts to save variables will fail. self._has_saved_variables = True add_meta_graph.__doc__ = _SavedModelBuilder.add_meta_graph.__doc__.replace( "assets_list", "assets_collection") add_meta_graph_and_variables.__doc__ = \ _SavedModelBuilder.add_meta_graph_and_variables.__doc__.replace( "assets_list", "assets_collection") def _maybe_save_assets(write_fn, assets_to_add=None): """Saves assets to the meta graph. Args: write_fn: A function callback that writes asset into meta graph. assets_to_add: The list where the asset paths are setup. Returns: A dict of asset basenames for saving to the original full path to the asset. Raises: ValueError: Indicating an invalid filepath tensor. """ # Map of target file names to original filenames asset_filename_map = {} if assets_to_add is None: tf_logging.info("No assets to save.") return asset_filename_map # Iterate over the supplied assets, build the `AssetFile` proto and add them # to the meta graph. for asset_tensor in assets_to_add: asset_source_filepath = _asset_path_from_tensor(asset_tensor) if not asset_source_filepath: raise ValueError("Invalid asset filepath tensor %s" % asset_tensor) asset_filename = get_asset_filename_to_add( asset_source_filepath, asset_filename_map) # Call the passed-in function that builds AssetFileDef proto and adds it # to either the collection or asset_file_def field of the meta graph. # Note that this should be done even when the file is a duplicate of an # already-added file, as the tensor reference should still exist. write_fn(asset_filename, asset_tensor) # In the cases where we are adding a duplicate, this will result in the # last of the filepaths being the one used for copying the file to the # SavedModel. Since the files in question are the same, it doesn't matter # either way. asset_filename_map[asset_filename] = asset_source_filepath tf_logging.info("Assets added to graph.") return asset_filename_map def get_asset_filename_to_add(asset_filepath, asset_filename_map): """Get a unique basename to add to the SavedModel if this file is unseen. Assets come from users as full paths, and we save them out to the SavedModel as basenames. In some cases, the basenames collide. Here, we dedupe asset basenames by first checking if the file is the same, and, if different, generate and return an index-suffixed basename that can be used to add the asset to the SavedModel. Args: asset_filepath: the full path to the asset that is being saved asset_filename_map: a dict of filenames used for saving the asset in the SavedModel to full paths from which the filenames were derived. Returns: Uniquified filename string if the file is not a duplicate, or the original filename if the file has already been seen and saved. """ asset_filename = os.path.basename(asset_filepath) if asset_filename not in asset_filename_map: # This is an unseen asset. Safe to add. return asset_filename other_asset_filepath = asset_filename_map[asset_filename] if other_asset_filepath == asset_filepath: # This is the same file, stored twice in the list. No need # to make unique. return asset_filename # Else, asset_filename is in the map, and the filepath is different. Dedupe. if not file_io.filecmp(asset_filepath, other_asset_filepath): # Files are different; dedupe filenames. return _get_unique_asset_filename(asset_filename, asset_filename_map) # Files are the same; don't make unique. return asset_filename def _get_unique_asset_filename(asset_filename, asset_filename_map): i = 1 unique_filename = asset_filename while unique_filename in asset_filename_map: unique_filename = compat.as_bytes("_").join( [compat.as_bytes(asset_filename), compat.as_bytes(str(i))]) i += 1 return unique_filename def _asset_path_from_tensor(path_tensor): """Returns the filepath value stored in constant `path_tensor`. Args: path_tensor: Tensor of a file-path. Returns: The string value i.e. path of the tensor, if valid. Raises: TypeError if tensor does not match expected op type, dtype or value. """ if not isinstance(path_tensor, ops.Tensor): raise TypeError("Asset path tensor must be a Tensor.") if path_tensor.op.type != "Const": raise TypeError("Asset path tensor must be of type constant.") if path_tensor.dtype != dtypes.string: raise TypeError("Asset path tensor must be of dtype string.") str_values = path_tensor.op.get_attr("value").string_val if len(str_values) != 1: raise TypeError("Asset path tensor must be a scalar.") return str_values[0] def _add_asset_to_metagraph(meta_graph_def, asset_filename, asset_tensor): """Builds an asset proto and adds it to the meta graph def. Args: meta_graph_def: The meta graph def to which the asset will be added. asset_filename: The filename of the asset to be added. asset_tensor: The asset tensor used to populate the tensor info of the asset proto. """ asset_proto = meta_graph_def.asset_file_def.add() asset_proto.filename = asset_filename asset_proto.tensor_info.name = asset_tensor.name def copy_assets_to_destination_dir(asset_filename_map, destination_dir): """Copy all assets from source path to destination path.""" assets_destination_dir = saved_model_utils.get_or_create_assets_dir( destination_dir) # Copy each asset from source path to destination path. for asset_basename, asset_source_filepath in asset_filename_map.items(): asset_destination_filepath = os.path.join( compat.as_bytes(assets_destination_dir), compat.as_bytes(asset_basename)) # Only copy the asset file to the destination if it does not already # exist. This is to ensure that an asset with the same name defined as # part of multiple graphs is only copied the first time. if not file_io.file_exists(asset_destination_filepath): file_io.copy(asset_source_filepath, asset_destination_filepath) tf_logging.info("Assets written to: %s", compat.as_text(assets_destination_dir)) def _add_asset_to_collection(asset_filename, asset_tensor): """Builds an asset proto and adds it to the asset collection of the graph. Args: asset_filename: The filename of the asset to be added. asset_tensor: The asset tensor used to populate the tensor info of the asset proto. """ asset_proto = meta_graph_pb2.AssetFileDef() asset_proto.filename = asset_filename asset_proto.tensor_info.name = asset_tensor.name asset_any_proto = Any() asset_any_proto.Pack(asset_proto) ops.add_to_collection(constants.ASSETS_KEY, asset_any_proto) def _add_op_to_signature_def_map(signature_def_map, op, key): if op is not None: signature_def_map[key] = signature_def_utils.op_signature_def(op, key)
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/builder_impl.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SavedModel main op. Builds a main op that defines the sequence of ops to be run as part of the SavedModel load/restore operations. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.saved_model.main_op_impl import main_op from tensorflow.python.saved_model.main_op_impl import main_op_with_restore # pylint: enable=unused-import
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/main_op.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Constants for SavedModel save and restore operations. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.util.tf_export import tf_export # Subdirectory name containing the asset files. ASSETS_DIRECTORY = "assets" tf_export( "saved_model.ASSETS_DIRECTORY", v1=[ "saved_model.ASSETS_DIRECTORY", "saved_model.constants.ASSETS_DIRECTORY" ]).export_constant(__name__, "ASSETS_DIRECTORY") # Subdirectory name containing unmanaged files from higher-level APIs. EXTRA_ASSETS_DIRECTORY = "assets.extra" # CollectionDef key containing SavedModel assets. ASSETS_KEY = "saved_model_assets" tf_export( "saved_model.ASSETS_KEY", v1=["saved_model.ASSETS_KEY", "saved_model.constants.ASSETS_KEY"]).export_constant( __name__, "ASSETS_KEY") # CollectionDef key for the legacy init op. LEGACY_INIT_OP_KEY = "legacy_init_op" tf_export( v1=[ "saved_model.LEGACY_INIT_OP_KEY", "saved_model.constants.LEGACY_INIT_OP_KEY" ]).export_constant(__name__, "LEGACY_INIT_OP_KEY") # CollectionDef key for the SavedModel main op. MAIN_OP_KEY = "saved_model_main_op" tf_export( v1=["saved_model.MAIN_OP_KEY", "saved_model.constants.MAIN_OP_KEY"]).export_constant( __name__, "MAIN_OP_KEY") # CollectionDef key for the SavedModel train op. # Not exported while export_all_saved_models is experimental. TRAIN_OP_KEY = "saved_model_train_op" # Schema version for SavedModel. SAVED_MODEL_SCHEMA_VERSION = 1 tf_export( "saved_model.SAVED_MODEL_SCHEMA_VERSION", v1=[ "saved_model.SAVED_MODEL_SCHEMA_VERSION", "saved_model.constants.SAVED_MODEL_SCHEMA_VERSION" ]).export_constant(__name__, "SAVED_MODEL_SCHEMA_VERSION") # File name for SavedModel protocol buffer. SAVED_MODEL_FILENAME_PB = "saved_model.pb" tf_export( "saved_model.SAVED_MODEL_FILENAME_PB", v1=[ "saved_model.SAVED_MODEL_FILENAME_PB", "saved_model.constants.SAVED_MODEL_FILENAME_PB" ]).export_constant(__name__, "SAVED_MODEL_FILENAME_PB") # File name for text version of SavedModel protocol buffer. SAVED_MODEL_FILENAME_PBTXT = "saved_model.pbtxt" tf_export( "saved_model.SAVED_MODEL_FILENAME_PBTXT", v1=[ "saved_model.SAVED_MODEL_FILENAME_PBTXT", "saved_model.constants.SAVED_MODEL_FILENAME_PBTXT" ]).export_constant(__name__, "SAVED_MODEL_FILENAME_PBTXT") # File name for json format of SavedModel. # Not exported while keras_saved_model is in contrib. SAVED_MODEL_FILENAME_JSON = "saved_model.json" # Subdirectory name containing the variables/checkpoint files. VARIABLES_DIRECTORY = "variables" tf_export( "saved_model.VARIABLES_DIRECTORY", v1=[ "saved_model.VARIABLES_DIRECTORY", "saved_model.constants.VARIABLES_DIRECTORY" ]).export_constant(__name__, "VARIABLES_DIRECTORY") # File name used for variables. VARIABLES_FILENAME = "variables" tf_export( "saved_model.VARIABLES_FILENAME", v1=[ "saved_model.VARIABLES_FILENAME", "saved_model.constants.VARIABLES_FILENAME" ]).export_constant(__name__, "VARIABLES_FILENAME") # The initialization and train ops for a MetaGraph are stored in the # signature def map. The ops are added to the map with the following keys. INIT_OP_SIGNATURE_KEY = "__saved_model_init_op" TRAIN_OP_SIGNATURE_KEY = "__saved_model_train_op"
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/constants.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for trackable object SavedModel loading.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools import os import sys import tempfile import weakref from absl.testing import parameterized from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import backprop from tensorflow.python.eager import def_function from tensorflow.python.eager import test from tensorflow.python.eager import wrap_function from tensorflow.python.feature_column import feature_column_lib from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import function as framework_function from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util from tensorflow.python.framework import versions from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras.engine import base_layer from tensorflow.python.keras.engine import input_layer from tensorflow.python.keras.engine import sequential from tensorflow.python.keras.engine import training as training_lib from tensorflow.python.keras.layers import convolutional from tensorflow.python.keras.layers import core from tensorflow.python.keras.optimizer_v2 import adam from tensorflow.python.lib.io import file_io from tensorflow.python.module import module from tensorflow.python.ops import array_ops from tensorflow.python.ops import cond_v2 from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.saved_model import load from tensorflow.python.saved_model import save from tensorflow.python.saved_model import tag_constants from tensorflow.python.training import monitored_session from tensorflow.python.training.tracking import tracking from tensorflow.python.training.tracking import util from tensorflow.python.util import tf_inspect def cycle(obj, cycles, signatures=None): to_save = obj # TODO(vbardiovsky): It would be nice if exported protos reached a fixed # point w.r.t. saving/restoring, ideally after 2nd saving. for _ in range(cycles): path = tempfile.mkdtemp(prefix=test.get_temp_dir()) # If available, we'll run the save and restore preferring the GPU. This # just makes sure we aren't throwing errors and have enough # device("CPU") blocks to satisfy the placer. with test_util.use_gpu(): save.save(to_save, path, signatures) loaded = load.load(path) to_save = loaded return loaded @parameterized.named_parameters( dict(testcase_name="ReloadOnce", cycles=1), dict(testcase_name="ReloadTwice", cycles=2), dict(testcase_name="ReloadThrice", cycles=3)) class LoadTest(test.TestCase, parameterized.TestCase): def test_structure_import(self, cycles): root = tracking.AutoTrackable() root.dep_one = tracking.AutoTrackable() root.dep_two = tracking.AutoTrackable() root.dep_two.dep = tracking.AutoTrackable() root.dep_three = root.dep_two.dep imported = cycle(root, cycles) self.assertIs(imported.dep_three, imported.dep_two.dep) self.assertIsNot(imported.dep_one, imported.dep_two) def test_variables(self, cycles): root = tracking.AutoTrackable() root.v1 = variables.Variable(1., trainable=True) root.v2 = variables.Variable(2., trainable=False) imported = cycle(root, cycles) self.assertEqual(imported.v1.numpy(), 1.0) self.assertTrue(imported.v1.trainable) self.assertEqual(imported.v2.numpy(), 2.0) self.assertFalse(imported.v2.trainable) def test_variables_name(self, cycles): root = tracking.AutoTrackable() # Test 2 variables with same name: should work as the checkpoint # is based on object name and not on variable name. root.v1 = variables.Variable(1., trainable=True, name="v1") root.v2 = variables.Variable(2., trainable=False, name="v1") imported = cycle(root, cycles) self.assertEqual(imported.v1.numpy(), 1.0) self.assertEqual(imported.v2.numpy(), 2.0) self.assertEqual(imported.v1.name, root.v1.name) self.assertEqual(imported.v2.name, root.v2.name) with variable_scope.variable_scope("foo"): imported = cycle(root, cycles) self.assertTrue(imported.v1.name.startswith("foo/")) self.assertTrue(imported.v2.name.startswith("foo/")) def test_partially_defined_variable_shape(self, cycles): class MakeVariable(module.Module): def __init__(self): self.v = None @def_function.function( input_signature=[tensor_spec.TensorSpec([None], dtypes.int64)]) def make_variable(self, initial_value): if self.v is None: self.v = variables.Variable(initial_value) m = MakeVariable() m.make_variable([1, 2, 3]) m = cycle(m, cycles) m.v.assign([1, 2, 3, 4]) self.assertEqual([None], tensor_shape.as_shape(m.v.shape).as_list()) @test_util.run_in_graph_and_eager_modes def test_capture_variables(self, cycles): root = tracking.AutoTrackable() root.weights = variables.Variable(2.) self.evaluate(root.weights.initializer) root.f = def_function.function( lambda x: root.weights * x, input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]) for _ in range(cycles): imported = cycle(root, 1) self.evaluate(imported.weights.initializer) self.assertEqual(4., self.evaluate(imported.f(constant_op.constant(2.)))) self.evaluate(imported.weights.assign(4.0)) self.assertEqual(8., self.evaluate(imported.f(constant_op.constant(2.)))) @test_util.run_in_graph_and_eager_modes def test_capture_constant(self, cycles): root = tracking.AutoTrackable() captured_constant = constant_op.constant(2.) root.f = def_function.function( lambda x: captured_constant * x, input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]) imported = cycle(root, cycles) self.assertEqual(4., self.evaluate(imported.f(constant_op.constant(2.)))) def test_control_outputs(self, cycles): exported = tracking.AutoTrackable() exported.v = variables.Variable(1.) exported.f = def_function.function( lambda: exported.v.assign(2., name="should_be_control_output")) exported_graph = exported.f.get_concrete_function().graph self.assertIn( exported_graph.get_operation_by_name("should_be_control_output"), exported_graph.control_outputs) imported = cycle(exported, cycles) # Calling get_concrete_function wraps in a second call operation; we want to # inspect the original function body for the control output; digging into # graph.as_graph_def() and its FunctionDefLibrary is another option. imported_concrete, = imported.f.concrete_functions imported_graph = imported_concrete.graph self.assertIn( imported_graph.get_operation_by_name("should_be_control_output"), imported_graph.control_outputs) def _make_asset(self, contents): filename = tempfile.mktemp(prefix=self.get_temp_dir()) with open(filename, "w") as f: f.write(contents) return filename @test_util.run_in_graph_and_eager_modes def test_assets(self, cycles): file1 = self._make_asset("contents 1") file2 = self._make_asset("contents 2") root = tracking.AutoTrackable() root.asset1 = tracking.TrackableAsset(file1) root.asset2 = tracking.TrackableAsset(file2) save_dir = os.path.join(self.get_temp_dir(), "save_dir") save.save(root, save_dir) file_io.delete_file(file1) file_io.delete_file(file2) load_dir = os.path.join(self.get_temp_dir(), "load_dir") file_io.rename(save_dir, load_dir) imported = load.load(load_dir) with open(self.evaluate(imported.asset1.asset_path), "r") as f: self.assertEqual("contents 1", f.read()) with open(self.evaluate(imported.asset2.asset_path), "r") as f: self.assertEqual("contents 2", f.read()) def test_cond_prune(self, cycles): x_in = [] x_out = [] def f(x, y): x_in.append(x) xx = cond_v2.cond_v2( math_ops.less(1, 2), lambda: x + 1, lambda: x + 2, ) x_out.append(xx) return xx, 2 * y f_wrapped = wrap_function.wrap_function( f, [tensor_spec.TensorSpec((), dtypes.float32)] * 2) f_pruned = f_wrapped.prune(x_in[0], [x_out[0]]) class Adder(module.Module): @def_function.function(input_signature=[ tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)]) def add(self, x): return f_pruned(x) root = Adder() root.add(constant_op.constant(1.)) root = cycle(root, cycles) root.add(constant_op.constant(1.)) def test_capture_assets(self, cycles): root = tracking.AutoTrackable() root.vocab = tracking.TrackableAsset(self._make_asset("contents")) root.f = def_function.function( lambda: root.vocab.asset_path, input_signature=[]) imported = cycle(root, cycles) original_output = root.f().numpy() imported_output = imported.f().numpy() self.assertNotEqual(original_output, imported_output) with open(imported_output, "r") as f: self.assertEqual("contents", f.read()) def test_capture_assets_in_graph(self, cycles): root = tracking.AutoTrackable() root.vocab = tracking.TrackableAsset(self._make_asset("contents")) root.f = def_function.function( lambda: root.vocab.asset_path, input_signature=[]) original_output = root.f().numpy() if cycles > 1: root = cycle(root, cycles - 1) path = tempfile.mkdtemp(prefix=self.get_temp_dir()) save.save(root, path) with ops.Graph().as_default(): imported = load.load(path) imported_tensor = imported.f() with monitored_session.MonitoredSession() as sess: imported_output = sess.run(imported_tensor) self.assertNotEqual(original_output, imported_output) with open(imported_output, "r") as f: self.assertEqual("contents", f.read()) def test_dedup_assets(self, cycles): vocab = self._make_asset("contents") root = tracking.AutoTrackable() root.asset1 = tracking.TrackableAsset(vocab) root.asset2 = tracking.TrackableAsset(vocab) imported = cycle(root, cycles) self.assertEqual(imported.asset1.asset_path.numpy(), imported.asset2.asset_path.numpy()) def test_implicit_input_signature(self, cycles): @def_function.function def func(x): return 2 * x root = tracking.AutoTrackable() root.f = func # Add two traces. root.f(constant_op.constant(1.)) root.f(constant_op.constant(1)) imported = cycle(root, cycles) self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy()) self.assertEqual(14, imported.f(constant_op.constant(7)).numpy()) def test_explicit_input_signature(self, cycles): @def_function.function( input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]) def func(x): return 2 * x root = tracking.AutoTrackable() root.f = func imported = cycle(root, cycles) self.assertEqual(4., imported.f(constant_op.constant(2.0)).numpy()) def test_explicit_save_signature(self, cycles): @def_function.function def func(x): return 2 * x root = tracking.AutoTrackable() root.f = func imported = cycle( root, cycles, { "f": root.f.get_concrete_function( tensor_spec.TensorSpec(None, dtypes.float32)) }) self.assertEqual(4., imported.f(constant_op.constant(2.0)).numpy()) def test_nested_functions(self, cycles): f = def_function.function( lambda x: x*2.0, input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]) g = def_function.function( lambda x: f(x) + 1.0, input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]) root = tracking.AutoTrackable() root.g = g imported = cycle(root, cycles) imported.g(constant_op.constant([1.0])) def test_function_with_default_bool_input(self, cycles): def func(x, training=False): if training: return 2 * x else: return 7 root = tracking.AutoTrackable() root.f = def_function.function(func) self.assertEqual(20, root.f(constant_op.constant(10), True).numpy()) self.assertEqual(7, root.f(constant_op.constant(1)).numpy()) self.assertEqual(2, root.f(constant_op.constant(1), True).numpy()) imported = cycle(root, cycles) self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy()) self.assertEqual(7, imported.f(constant_op.constant(2)).numpy()) def test_function_with_default_none_input(self, cycles): def func(x, dtype=None): if dtype: return array_ops.zeros(shape=x.shape, dtype=dtype) else: return array_ops.zeros(shape=x.shape, dtype=dtypes.float32) root = tracking.AutoTrackable() root.f = def_function.function(func) self.assertAllEqual([0.0, 0.0, 0.0], root.f(constant_op.constant([1, 2, 3])).numpy()) self.assertAllEqual([0.0, 0.0, 0.0], root.f(constant_op.constant([1.0, 2.0, 3.0])).numpy()) self.assertAllEqual([0.0, 0.0, 0.0, 0.0], root.f(constant_op.constant([1, 2, 3, 4])).numpy()) self.assertAllEqual([0, 0, 0], root.f( constant_op.constant([1.0, 2.0, 3.0]), dtype=dtypes.int32).numpy()) concrete_functions = root.f._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access self.assertEqual(4, len(concrete_functions)) imported = cycle(root, cycles) self.assertAllEqual([0.0, 0.0, 0.0], imported.f(constant_op.constant([1, 2, 3]), None).numpy()) self.assertAllEqual([0.0, 0.0, 0.0], imported.f(constant_op.constant([1.0, 2.0, 3.0])).numpy()) self.assertAllEqual([0.0, 0.0, 0.0, 0.0], imported.f(constant_op.constant([1, 2, 3, 4])).numpy()) self.assertAllEqual([0, 0, 0], imported.f( constant_op.constant([1.0, 2.0, 3.0]), dtype=dtypes.int32).numpy()) def test_function_no_return(self, cycles): class TrackableWithOneVariable(tracking.AutoTrackable): def __init__(self, initial_value=0.0): super(TrackableWithOneVariable, self).__init__() self.variable = variables.Variable(initial_value) @def_function.function def increase(self, by=1.0): self.variable.assign_add(by) obj = TrackableWithOneVariable(5.0) obj.increase(constant_op.constant(10.0)) self.assertEqual(15.0, obj.variable.numpy()) obj.increase() self.assertEqual(16.0, obj.variable.numpy()) imported = cycle(obj, cycles) imported.increase(constant_op.constant(10.0)) self.assertEqual(26.0, imported.variable.numpy()) imported.increase(constant_op.constant(1.0)) self.assertEqual(27.0, imported.variable.numpy()) def test_structured_inputs(self, cycles): def func(x, training=True): # x is a nested structure, we care about one particular tensor. _, (a, b) = x if training: return 2 * a["a"] + b else: return 7 root = tracking.AutoTrackable() root.f = def_function.function(func) x = constant_op.constant(10) y = constant_op.constant(11) input1 = [6, ({"a": x}, y)] input2 = [7, ({"a": x}, y)] # Not compatible with input1 signature. input3 = [6, ({"a": y}, x)] # Compatible with input1 signature. # Note: by only calling f(input1) before serialization, only inputs with # matching signature will be valid on the loaded model. self.assertEqual(31, root.f(input1).numpy()) imported = cycle(root, cycles) with self.assertRaisesRegexp(ValueError, "Could not find matching function to call"): imported.f(input2) self.assertEqual(31, imported.f(input1).numpy()) self.assertEqual(32, imported.f(input3).numpy()) def test_structured_output(self, cycles): # Use fields with non-alphabetical order named_tuple_type = collections.namedtuple("NamedTupleHello", ["b", "a"]) def func(input1, input2): named_tuple = named_tuple_type(a=input1 + input2, b=input1 * input2) return [named_tuple, input2, {"x": 0.5}] root = tracking.AutoTrackable() root.f = def_function.function(func) result = root.f(constant_op.constant(2), constant_op.constant(3)) self.assertEqual(5, result[0].a.numpy()) self.assertEqual(6, result[0].b.numpy()) self.assertEqual(["b", "a"], list(result[0]._asdict().keys())) self.assertEqual(3, result[1].numpy()) self.assertEqual(0.5, result[2]["x"].numpy()) imported = cycle(root, cycles) result = imported.f(constant_op.constant(2), constant_op.constant(5)) self.assertEqual(7, result[0].a.numpy()) self.assertEqual(10, result[0].b.numpy()) self.assertEqual(["b", "a"], list(result[0]._asdict().keys())) self.assertEqual(5, result[1].numpy()) self.assertEqual(0.5, result[2]["x"].numpy()) def test_optimizer(self, cycles): class _HasOptimizer(module.Module): def __init__(self): super(_HasOptimizer, self).__init__() self.layer = core.Dense(1) self.optimizer = adam.Adam(0.01) @def_function.function def __call__(self, x): return self.layer(x) @def_function.function def train(self, x, y): with backprop.GradientTape() as tape: predicted = self(x) loss = math_ops.reduce_sum(math_ops.abs(y - predicted)) train_vars = self.layer.trainable_variables grads = tape.gradient(loss, train_vars) self.optimizer.apply_gradients(zip(grads, train_vars)) root = _HasOptimizer() train_input = dict(x=constant_op.constant([[1.]]), y=constant_op.constant([[2.]])) root.train(**train_input) imported = cycle(root, cycles) self.assertAllClose(root.optimizer.learning_rate.numpy(), imported.optimizer.learning_rate.numpy()) self.assertAllClose(root(constant_op.constant([[-0.5]])), imported(constant_op.constant([[-0.5]]))) root.train(**train_input) imported.train(**train_input) self.assertAllClose(root(constant_op.constant([[-0.5]])), imported(constant_op.constant([[-0.5]]))) def test_positional_arguments(self, cycles): def func(x, training=False, abc=7.1, defg=7.7): del abc if training: return 2 * x if defg == 7: return 6 else: return 7 root = tracking.AutoTrackable() root.f = def_function.function(func) self.assertEqual(20, root.f(constant_op.constant(10), True).numpy()) self.assertEqual(7, root.f(constant_op.constant(1)).numpy()) self.assertEqual(2, root.f(constant_op.constant(1), True).numpy()) self.assertEqual(6, root.f(constant_op.constant(1), defg=7.0).numpy()) imported = cycle(root, cycles) self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy()) self.assertEqual(7, imported.f(constant_op.constant(2)).numpy()) self.assertEqual(6, imported.f(constant_op.constant(1), defg=7.0).numpy()) def test_additional_kwargs(self, cycles): def func(x, training=False, **options): del options if training: return 2 * x else: return 7 root = tracking.AutoTrackable() root.f = def_function.function(func) x = constant_op.constant(10) self.assertEqual(7, root.f(x, learning_rate=0.5, epochs=3).numpy()) imported = cycle(root, cycles) with self.assertRaisesRegexp(ValueError, "Could not find matching function to call.*"): imported.f(x, learning_rate=0.5, epochs=4) self.assertEqual(7, imported.f(x, learning_rate=0.5, epochs=3).numpy()) def test_member_function(self, cycles): class TrackableWithMember(tracking.AutoTrackable): def __init__(self): super(TrackableWithMember, self).__init__() self._some_value = 20 @def_function.function def f(self, x, training=False): if training: return 2 * x else: return 7 + self._some_value root = TrackableWithMember() self.assertEqual(20, root.f(constant_op.constant(10), True).numpy()) self.assertEqual(27, root.f(constant_op.constant(1)).numpy()) self.assertEqual(2, root.f(constant_op.constant(1), True).numpy()) imported = cycle(root, cycles) self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy()) self.assertEqual(27, imported.f(constant_op.constant(2)).numpy()) def test_side_effect_listing(self, cycles): class M(tracking.AutoTrackable): def __init__(self): super(M, self).__init__() self.var = None @def_function.function( input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]) def f(self, x): if self.var is None: self.var = variables.Variable(2.) return x * self.var m = M() cycle(m, cycles) self.assertEqual(4.0, m.f(constant_op.constant(2.0)).numpy()) def test_basic_backprop(self, cycles): weight = variables.Variable(1., trainable=True) bias = variables.Variable(0., trainable=True) g = def_function.function( lambda x: x*weight + bias, input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]) root = tracking.AutoTrackable() root.weight = weight root.bias = bias root.g = g imported = cycle(root, cycles) with backprop.GradientTape() as t: x = constant_op.constant([3.5]) loss = imported.g(x) grad = t.gradient(loss, [imported.weight, imported.bias]) self.assertAllClose(grad, [3.5, 1.0]) def test_nested_backprop(self, cycles): weight = variables.Variable(1., trainable=True) bias = variables.Variable(0., trainable=True) # Note: this function gets called from other function defs via a # "PartitionedCall" op node. @def_function.function(input_signature=[ tensor_spec.TensorSpec(None, dtypes.float32), tensor_spec.TensorSpec(None, dtypes.float32)]) def mul(x, y): return x * y # Note: this function gets called from other function defs via a # "StatefulPartitionedCall" op node. @def_function.function(input_signature=[ tensor_spec.TensorSpec(None, dtypes.float32)]) def f(x): return mul(weight.read_value(), x) @def_function.function(input_signature=[ tensor_spec.TensorSpec(None, dtypes.float32)]) def g(x): return f(x) + bias, @def_function.function(input_signature=[ tensor_spec.TensorSpec(None, dtypes.float32)]) def h(x): return g(x) + bias, root = tracking.AutoTrackable() root.weight = weight root.bias = bias root.g = h imported = cycle(root, cycles) with backprop.GradientTape() as t: x = constant_op.constant([3.5]) loss = imported.g(x) grad = t.gradient(loss, [imported.weight, imported.bias]) self.assertAllClose(grad, [3.5, 2.0]) def test_callable(self, cycles): class M1(tracking.AutoTrackable): @def_function.function( input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]) def __call__(self, x): return x root = tracking.AutoTrackable() root.m1 = M1() root.m2 = tracking.AutoTrackable() root.m2.__call__ = def_function.function( input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])( lambda x: x*3.0) imported = cycle(root, cycles) x = constant_op.constant(1.0) self.assertTrue(callable(imported.m1)) self.assertAllEqual(root.m1(x), imported.m1(x)) # Note: `root.m2` was not callable since `__call__` attribute was set # into the instance and not on the class. But after a serialization cycle # that starts to work. self.assertTrue(callable(imported.m2)) self.assertAllEqual(root.m2.__call__(x), imported.m2(x)) # Verify that user objects without `__call__` attribute are not callable. self.assertFalse(callable(imported)) def test_chain_callable(self, cycles): func = def_function.function( input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])( lambda x: x*3.0) root = tracking.AutoTrackable() root.__call__ = tracking.AutoTrackable() root.__call__.__call__ = tracking.AutoTrackable() root.__call__.__call__.__call__ = func imported = cycle(root, cycles) self.assertTrue(callable(imported)) x = constant_op.constant(1.0) self.assertAllEqual(imported(x).numpy(), 3.0) def test_load_in_graph_mode(self, cycles): root = tracking.AutoTrackable() root.v1 = variables.Variable(1., name="v_one", trainable=False) root.v2 = variables.Variable(2., name="v_two", trainable=True) root.f = def_function.function( lambda x: root.v2 * x, input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]) if cycles > 1: root = cycle(root, cycles - 1) path = tempfile.mkdtemp(prefix=self.get_temp_dir()) save.save(root, path) with ops.Graph().as_default() as g: imported = load.load(path) var_v1 = imported.v1 self.assertFalse(var_v1.trainable) var_v2 = imported.v2 self.assertTrue(var_v2.trainable) output = imported.f(constant_op.constant(2.)) with monitored_session.MonitoredSession() as sess: self.assertEqual(1.0, sess.run(var_v1)) self.assertEqual(4.0, sess.run(output)) self.assertCountEqual([var_v1, var_v2], g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)) # load() should not add to TRAINABLE_VARIABLES. Higher levels of model # building control retraining or frozen use of imported SavedModels. self.assertCountEqual([], g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)) def test_load_in_func_graph(self, cycles): root = tracking.AutoTrackable() root.v1 = variables.Variable(1.) root.v2 = variables.Variable(2.) root.f = def_function.function( lambda x: root.v2 * x, input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]) if cycles > 1: root = cycle(root, cycles - 1) path = tempfile.mkdtemp(prefix=self.get_temp_dir()) save.save(root, path) closure = tracking.AutoTrackable() @def_function.function def func(x): if not hasattr(closure, "model"): closure.model = load.load(path) return closure.model.f(x) inputs = constant_op.constant(2.) self.assertEqual(4.0, func(inputs).numpy()) def test_soft_matching(self, cycles): @def_function.function( input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)]) def func(x): return 2 * x root = tracking.AutoTrackable() root.f = func self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy()) self.assertAllEqual([2, 4], root.f(constant_op.constant([1, 2])).numpy()) concrete_functions = root.f._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access self.assertEqual(1, len(concrete_functions)) imported = cycle(root, cycles) with self.assertRaisesRegexp(ValueError, "Python inputs incompatible"): # We cannot call the function with a constant of shape (). imported.f(constant_op.constant(2)).numpy() # TODO(vbardiovsky): When classes are revived with input_signatures, we # should also check that the calls below are not generating any more # concrete functions. self.assertAllEqual([2, 4, 6, 8], imported.f(constant_op.constant([1, 2, 3, 4])).numpy()) self.assertAllEqual([2, 4, 6], imported.f(constant_op.constant([1, 2, 3])).numpy()) def test_get_concrete_function(self, cycles): @def_function.function def func(x, training=False): if training: return 2 * x else: return 3 * x func.get_concrete_function( tensor_spec.TensorSpec([None], dtypes.int32), True) func.get_concrete_function(tensor_spec.TensorSpec([None], dtypes.float32)) root = tracking.AutoTrackable() root.f = func imported = cycle(root, cycles) concrete = imported.f.get_concrete_function( training=True, x=tensor_spec.TensorSpec([None], dtypes.int32)) self.assertAllEqual([2, 4, 6, 8], concrete(x=constant_op.constant([1, 2, 3, 4])).numpy()) with self.assertRaisesRegexp(ValueError, "Could not find matching function to call"): imported.f.get_concrete_function( tensor_spec.TensorSpec([None], dtypes.int32)) imported.f.get_concrete_function( tensor_spec.TensorSpec([None], dtypes.int32), True) def test_concrete_function(self, cycles): @def_function.function( input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)]) def func(x): return 2 * x root = tracking.AutoTrackable() root.f = func.get_concrete_function() self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy()) self.assertAllEqual([2, 4], root.f(constant_op.constant([1, 2])).numpy()) # TODO(andresp): Fix exporting of loaded concrete functions as signatures. imported = cycle(root, cycles, signatures={}) self.assertAllEqual([2, 4, 6, 8], imported.f(constant_op.constant([1, 2, 3, 4])).numpy()) self.assertAllEqual([2, 4, 6], imported.f(constant_op.constant([1, 2, 3])).numpy()) def test_concrete_function_captures(self, cycles): class Root(module.Module): def __init__(self): self.v = variables.Variable(1.) self.v1 = variables.Variable(1.) @def_function.function( input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]) def use_v(self, x): return self.v + self.v1 + 1. root = Root() self.assertIn(root.v.handle, root.use_v.get_concrete_function().graph.external_captures) for _ in range(cycles): root = cycle(root, 1, signatures=root.use_v.get_concrete_function()) func_captures = root.use_v.get_concrete_function().graph.external_captures self.assertLen(func_captures, 2) self.assertTrue(any(root.v.handle is t for t in func_captures)) self.assertTrue(any(root.v1.handle is t for t in func_captures)) signature_captures = root.signatures[ "serving_default"].graph.external_captures self.assertLen(signature_captures, 2) self.assertTrue(any(root.v.handle is t for t in signature_captures)) self.assertTrue(any(root.v1.handle is t for t in signature_captures)) def test_concrete_function_arg_names(self, cycles): @def_function.function( input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)]) def func(x): return 2 * x root = tracking.AutoTrackable() root.f = func.get_concrete_function() self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy()) # TODO(andresp): Fix exporting of loaded concrete functions as signatures. imported = cycle(root, cycles, signatures={}) self.assertAllEqual([2, 4, 6], imported.f(x=constant_op.constant([1, 2, 3])).numpy()) def test_concrete_function_no_signature(self, cycles): @def_function.function def func(x): return 2 * x root = tracking.AutoTrackable() root.f = func.get_concrete_function(constant_op.constant([1])) self.assertAllEqual([4], root.f(constant_op.constant([2])).numpy()) # TODO(andresp): Fix exporting of loaded concrete functions as signatures. imported = cycle(root, cycles, signatures={}) self.assertAllEqual([6], imported.f(constant_op.constant([3])).numpy()) def test_concrete_function_backprop(self, cycles): @def_function.function( input_signature=[tensor_spec.TensorSpec([None], dtypes.float32)]) def func(x): return x ** 2. root = tracking.AutoTrackable() root.f = func.get_concrete_function() def _compute_gradient(function): with backprop.GradientTape() as tape: inp = constant_op.constant(1.) tape.watch(inp) output = function(inp) return tape.gradient(output, inp) self.assertEqual(2., _compute_gradient(root.f).numpy()) # TODO(andresp): Fix exporting of loaded concrete functions as signatures. imported = cycle(root, cycles, signatures={}) self.assertEqual(2., _compute_gradient(imported.f).numpy()) def test_revived_concrete_function_kwargs(self, cycles): @def_function.function def func(x, y): return x * (y + 1.) root = tracking.AutoTrackable() root.f = func.get_concrete_function( tensor_spec.TensorSpec([], dtypes.float32), tensor_spec.TensorSpec([], dtypes.float32)) self.assertEqual(8., root.f(y=constant_op.constant(3.), x=constant_op.constant(2.)).numpy()) # TODO(andresp): Fix exporting of loaded concrete functions as signatures. imported = cycle(root, cycles, signatures={}) self.assertEqual(8., imported.f(y=constant_op.constant(3.), x=constant_op.constant(2.)).numpy()) def test_revived_concrete_function_tensorspec_kwargs(self, cycles): @def_function.function def func(*args): x, y = args return x * (y + 1.) root = tracking.AutoTrackable() root.f = func.get_concrete_function( tensor_spec.TensorSpec([], dtypes.float32, name="x"), tensor_spec.TensorSpec([], dtypes.float32, name="y")) self.assertEqual(8., root.f(y=constant_op.constant(3.), x=constant_op.constant(2.)).numpy()) imported = cycle(root, cycles, signatures={}) self.assertEqual(8., imported.f(y=constant_op.constant(3.), x=constant_op.constant(2.)).numpy()) def test_concrete_function_variable_argument(self, cycles): # TODO(allenl): Fix variables in input signatures. self.skipTest("Need to fix encoding of variables in inputs signatures") capture = variables.Variable(0) @def_function.function def func(v): v.assign_add(1) capture.assign_sub(1) vsave = variables.Variable(1) root = tracking.AutoTrackable() root.f = func.get_concrete_function(vsave) root.capture = capture self.assertEqual(1, vsave.numpy()) root.f(vsave) self.assertEqual(2, vsave.numpy()) self.assertEqual(-1, capture.numpy()) imported = cycle(root, cycles) vload = variables.Variable(1) imported.f(vload) self.assertEqual(2, vload.numpy()) imported.f(v=vload) self.assertEqual(3, vload.numpy()) self.assertEqual(-3, imported.capture.numpy()) self.assertEqual(-1, capture.numpy()) def test_function_and_component(self, cycles): @def_function.function def func(v): return v + 1 root = tracking.AutoTrackable() root.func = func root.concrete_func = func.get_concrete_function( tensor_spec.TensorSpec(None, dtypes.int32)) one = constant_op.constant(1) self.assertEqual(2, root.func(one).numpy()) self.assertEqual(2, root.concrete_func(one).numpy()) imported = cycle(root, cycles) self.assertEqual(2, imported.func(one).numpy()) self.assertEqual(2, imported.concrete_func(one).numpy()) def test_dict(self, cycles): root = tracking.AutoTrackable() root.variables = dict(a=variables.Variable(1.)) root.variables["b"] = variables.Variable(2.) root.variables["c"] = 1 root.funcs = dict( a=def_function.function(lambda: constant_op.constant(100.))) root.funcs["conc"] = root.funcs["a"].get_concrete_function() imported = cycle(root, cycles) self.assertEqual(1., imported.variables["a"].numpy()) self.assertEqual(2., imported.variables["b"].numpy()) self.assertEqual(set(["a", "b"]), set(imported.variables.keys())) self.assertEqual(100., imported.funcs["a"]().numpy()) self.assertEqual(100., imported.funcs["conc"]().numpy()) def test_list(self, cycles): root = tracking.AutoTrackable() root.variables = [variables.Variable(1.)] root.variables.append(1) root.variables.append(variables.Variable(3.)) imported = cycle(root, cycles) self.assertEqual(1., imported.variables[0].numpy()) self.assertEqual(3., imported.variables[2].numpy()) self.assertIs(None, imported.variables[1]) self.assertEqual(3, len(imported.variables)) def test_functions_list(self, cycles): root = tracking.AutoTrackable() v1 = variables.Variable(1.) root.losses = [def_function.function(lambda: math_ops.reduce_sum(v1 ** 2))] root.variables = [v1] @def_function.function def _v2_loss(): if len(root.variables) == 1: v2 = variables.Variable(2.) root.variables.append(v2) return math_ops.reduce_sum(root.variables[1] ** 2) root.losses.append(_v2_loss) self.assertAllClose([1., 4.], [loss() for loss in root.losses]) imported = cycle(root, cycles) self.assertAllClose([1., 4.], [loss() for loss in imported.losses]) imported.variables[0].assign(3.) imported.variables[1].assign(4.) self.assertAllClose([9., 16.], [loss() for loss in imported.losses]) def test_captured_constant(self, cycles): const = array_ops.zeros([100]) root = tracking.AutoTrackable() root.f = def_function.function(lambda: const + 1.) root.g = def_function.function(lambda: const + 2.) self.assertAllClose(array_ops.ones([100]), root.f()) self.assertAllClose(2. * array_ops.ones([100]), root.g()) imported = cycle(root, cycles) self.assertAllClose(array_ops.ones([100]), imported.f()) self.assertAllClose(2. * array_ops.ones([100]), imported.g()) # TODO(b/123408994): Use the public get_concrete_function. f_concrete = imported.f._list_all_concrete_functions_for_serialization()[0] g_concrete = imported.g._list_all_concrete_functions_for_serialization()[0] self.assertLen(f_concrete.captured_inputs, 1) self.assertLen(g_concrete.captured_inputs, 1) # We should be using the same captured EagerTensor in both functions, not # duplicating the constant. self.assertIs(f_concrete.captured_inputs[0], g_concrete.captured_inputs[0]) def test_functions_accessed_once(self, cycles): class Exported(tracking.AutoTrackable): def __init__(self): self._counter = 0 @property def make_func(self): @def_function.function def f(): return constant_op.constant(self._counter) f.get_concrete_function() # force a trace self._counter += 1 return f exported = Exported() imported = cycle(exported, cycles) self.assertEqual(0, imported.make_func().numpy()) self.assertEqual(1, exported.make_func().numpy()) def test_overwritten_signatures_error(self, cycles): exported = tracking.AutoTrackable() exported.f = def_function.function(lambda: constant_op.constant(1.)) imported = cycle( exported, cycles, signatures={"key": exported.f.get_concrete_function()}) self.assertEqual(1., imported.signatures["key"]()["output_0"].numpy()) imported.signatures = {"key1": imported.signatures["key"]} with self.assertRaisesRegexp(ValueError, "signatures"): save.save(imported, tempfile.mkdtemp(prefix=self.get_temp_dir())) def test_signature_loading(self, cycles): class Exported(tracking.AutoTrackable): def __init__(self): self.v = variables.Variable(3.) @def_function.function def do(self, x): return self.v * x exported = Exported() imported = cycle( exported, cycles=1, signatures=exported.do.get_concrete_function( tensor_spec.TensorSpec(None, dtypes.float32))) for _ in range(cycles - 1): imported = cycle(imported, cycles=1, signatures=imported.signatures) self.assertEqual(["serving_default"], list(imported.signatures.keys())) imported_function = imported.signatures["serving_default"] two = constant_op.constant(2.) self.assertEqual(6., imported_function(x=two)["output_0"].numpy()) imported.v.assign(4.) self.assertEqual(8., imported_function(x=two)["output_0"].numpy()) self.assertEqual(8., imported_function(two)["output_0"].numpy()) with self.assertRaises(TypeError): # The signatures mapping is immutable imported.signatures["random_key"] = 3 def test_multiple_argument_signatures_no_positional(self, cycles): class Exported(tracking.AutoTrackable): @def_function.function def do(self, x, y): return x + y exported = Exported() imported = cycle( exported, cycles=1, signatures=exported.do.get_concrete_function( tensor_spec.TensorSpec(None, dtypes.float32), tensor_spec.TensorSpec(None, dtypes.float32))) for _ in range(cycles - 1): imported = cycle(imported, cycles=1, signatures=imported.signatures) with self.assertRaises(TypeError): imported.signatures["serving_default"]( constant_op.constant(1.), y=constant_op.constant(2.)) self.assertEqual( {"output_0": 3.}, self.evaluate(imported.signatures["serving_default"]( x=constant_op.constant(1.), y=constant_op.constant(2.)))) def _make_model_with_tables(self): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) table1_initializer = lookup_ops.KeyValueTensorInitializer(keys, values) table1 = lookup_ops.HashTable(table1_initializer, default_val) table2_file = self._make_asset("test\nfoo\nbrain\n") table2_initializer = lookup_ops.TextFileIdTableInitializer(table2_file) table2 = lookup_ops.HashTable(table2_initializer, default_val) def _make_lookup_function(table): signature = [tensor_spec.TensorSpec(None, dtypes.string)] return def_function.function(input_signature=signature)( lambda x: table.lookup(x)) # pylint: disable=unnecessary-lambda root = tracking.AutoTrackable() root.table1 = table1 root.lookup1 = _make_lookup_function(table1) root.table2 = table2 root.lookup2 = _make_lookup_function(table2) return root def test_table(self, cycles): root = self._make_model_with_tables() imported = cycle(root, cycles, signatures={}) keys = constant_op.constant(["brain", "test", "foo", "surgery"]) self.assertAllEqual([0, -1, -1, 2], imported.lookup1(keys).numpy()) self.assertAllEqual([2, 0, 1, -1], imported.lookup2(keys).numpy()) def test_table_collections_untouched_eager(self, cycles): def _gather_nonempty_collections(): graph = ops.get_default_graph() gathered = {} for collection in graph.collections: collection_contents = graph.get_collection(collection) if collection_contents: gathered[collection] = collection_contents return gathered root = self._make_model_with_tables() # Warm up collections to ignore those that don't expand every iteration, # e.g. the __varscope collection. cycle(root, 1) original_collections = _gather_nonempty_collections() cycle(root, cycles) self.assertEqual(original_collections, _gather_nonempty_collections()) def test_table_in_graph(self, cycles): root = self._make_model_with_tables() if cycles > 1: root = cycle(root, cycles - 1) path = tempfile.mkdtemp(prefix=self.get_temp_dir()) save.save(root, path) imported = cycle(root, 1) with ops.Graph().as_default(): imported = load.load(path) keys = constant_op.constant(["brain", "test", "foo", "surgery"]) output1 = imported.lookup1(keys) output2 = imported.lookup2(keys) with monitored_session.MonitoredSession() as sess: self.assertAllEqual([0, -1, -1, 2], sess.run(output1)) self.assertAllEqual([2, 0, 1, -1], sess.run(output2)) def test_perserve_argspec(self, cycles): def f(a, b, c): # pylint: disable=unused-argument return None original_fullargspec = tf_inspect.getfullargspec(f) root = tracking.AutoTrackable() root.f = def_function.function(f) imported = cycle(root, cycles) restored_fullargspec = tf_inspect.getfullargspec(imported.f) self.assertEqual(original_fullargspec, restored_fullargspec) def test_canonicalize_inputs(self, cycles): @def_function.function(autograph=False) def func(a=1, b=2, c=3, training=True): if training: return [a, b, c, training] else: return [c, b, a, training] # TODO(b/123501567): Work-around to trigger generic traces of a function # with extra non tensor args. signature = 3*[tensor_spec.TensorSpec(None, dtypes.float32)] @def_function.function(input_signature=signature) def trigger(a, b, c): func(a, b, c, True) func(a, b, c, False) trigger.get_concrete_function() root = tracking.AutoTrackable() root.f = func root = cycle(root, cycles) self.assertAllEqual(root.f(), [1.0, 2.0, 3.0, True]) self.assertAllEqual(root.f(-1.0, training=False), [3.0, 2.0, -1.0, False]) with self.assertRaisesRegexp(ValueError, "Could not find matching function"): root.f(["hello", 1.0]) def test_prefer_specific_trace(self, cycles): @def_function.function(autograph=False) def func(a): if isinstance(a, int): return a else: return a + 1 self.assertAllEqual(2, func(2).numpy()) self.assertAllEqual(3, func(constant_op.constant(2)).numpy()) root = tracking.AutoTrackable() root.f = func root = cycle(root, cycles) self.assertAllEqual(2, root.f(2).numpy()) self.assertAllEqual(4, root.f(3).numpy()) self.assertAllEqual(3, root.f(constant_op.constant(2)).numpy()) self.assertAllEqual(4, root.f(constant_op.constant(3)).numpy()) def test_partial(self, cycles): def f(x, y): return x + y func = def_function.function( functools.partial(f, x=array_ops.zeros([1]), y=array_ops.ones([1]))) root = tracking.AutoTrackable() root.f = func self.assertAllEqual(root.f(), [1.0]) root = cycle(root, cycles) self.assertAllEqual(root.f(), [1.0]) def test_partial_with_non_tensor_defaults(self, cycles): def f(x, y=3): return x + y func = def_function.function(functools.partial(f, y=5)) root = tracking.AutoTrackable() root.f = func self.assertAllEqual(root.f(1), 6) root = cycle(root, cycles) self.assertAllEqual(root.f(1), 6) def test_partial_with_positional(self, cycles): def f(x, y): return x + y func = def_function.function(functools.partial(f, constant_op.constant(5))) root = tracking.AutoTrackable() root.f = func self.assertAllEqual(root.f(1), 6) root = cycle(root, cycles) self.assertAllEqual(root.f(1), 6) def test_partial_with_positional_captured_tensors(self, cycles): def f(x, y): return x + y tensor = constant_op.constant(5) + constant_op.constant(7) func = def_function.function(functools.partial(f, tensor)) root = tracking.AutoTrackable() root.f = func self.assertAllEqual(root.f(1), 13) root = cycle(root, cycles) self.assertAllEqual(root.f(1), 13) def test_partial_keyword_hiding_default(self, cycles): def f(x=3, training=True, y=7): if training: return x + y else: return x + y + 2 func = def_function.function(functools.partial(f, y=6)) root = tracking.AutoTrackable() root.f = func self.assertEqual(root.f().numpy(), 9) self.assertEqual(root.f(training=False).numpy(), 11) root = cycle(root, cycles) self.assertEqual(root.f().numpy(), 9) self.assertEqual(root.f(training=False).numpy(), 11) def test_partial_with_kwargs(self, cycles): def f(a, b, *args, **kwargs): args_sum = sum(args) return a + b + kwargs["some_tensor"] * kwargs["learning_rate"] + args_sum constant_tensor = constant_op.constant(10) func = def_function.function( functools.partial( f, 7, 1, 2, learning_rate=3, some_tensor=constant_tensor)) root = tracking.AutoTrackable() root.f = func self.assertEqual(root.f(constant_op.constant(4)).numpy(), 44) root = cycle(root, cycles) self.assertEqual(root.f(constant_op.constant(5)).numpy(), 45) def test_partial_bind_only_first_argument(self, cycles): if sys.version_info[0] < 3: self.skipTest("Test is only valid in python3. Only then we get some more " "advanced inspection of partials where this is allowed.") def f(x, y): return x + y partial_func = functools.partial(f, x=5) tf_func = def_function.function(partial_func) root = tracking.AutoTrackable() root.f = tf_func self.assertAllEqual(root.f(y=constant_op.constant(7)), 12) root = cycle(root, cycles) self.assertAllEqual(root.f(y=constant_op.constant(9)), 14) def test_partial_with_passed_fn_as_default(self, cycles): def f(x, y): return x(3) + y def my_func(a): return 2 * a func = def_function.function(functools.partial(f, my_func)) root = tracking.AutoTrackable() root.f = func self.assertEqual(root.f(constant_op.constant(3)).numpy(), 9) root = cycle(root, cycles) self.assertEqual(root.f(constant_op.constant(3)).numpy(), 9) def test_partial_with_input_signature(self, cycles): def full_function(a, b, c=3.0): return a, b, c partial = functools.partial(full_function, 1, c=4) self.assertAllEqual((1, 2.0, 4), partial(2.0)) signature = [tensor_spec.TensorSpec([], dtypes.float32)] func = def_function.function(partial, input_signature=signature) root = tracking.AutoTrackable() root.f = func a, b, c = root.f(2.0) self.assertAllEqual([a.numpy(), b.numpy(), c.numpy()], (1, 2.0, 4)) root = cycle(root, cycles) a, b, c = root.f(3.0) self.assertAllEqual([a.numpy(), b.numpy(), c.numpy()], (1, 3.0, 4)) def test_convert_to_input_signature(self, cycles): @def_function.function( input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)]) def func(x): return x root = tracking.AutoTrackable() root.f = func root = cycle(root, cycles) self.assertEqual([2], root.f([2]).numpy()) def test_named_tuple(self, cycles): class NamedTupleType(collections.namedtuple("NamedTupleType", ["a", "b"])): pass @def_function.function def f(x): return x.a + x.b f.get_concrete_function( NamedTupleType( a=tensor_spec.TensorSpec(None, dtypes.float32, name="a"), b=tensor_spec.TensorSpec(None, dtypes.float32, name="b"))) obj = tracking.AutoTrackable() obj.__call__ = f if sys.version_info.major == 3 and sys.version_info.minor < 5: # TODO(allenl): figure out why this doesn't work in Python3.4 self.skipTest("Not working in Python 3.4") imported = cycle(obj, cycles) self.assertAllClose(3., imported(NamedTupleType(a=constant_op.constant(1.), b=constant_op.constant(2.)))) def test_extra_args(self, cycles): @def_function.function def f(x): return math_ops.add(x["a"], 1.) # Trigger a trace. f({"a": constant_op.constant(2.0)}) obj = tracking.AutoTrackable() obj.__call__ = f imported = cycle(obj, cycles) self.assertEqual(4.0, imported({"a": 3.0}).numpy()) with self.assertRaisesRegexp(ValueError, "Could not find matching function to call"): imported({"a": 2.0, "b": 3.0}) def test_shapes_available(self, cycles): @def_function.function(input_signature=[ tensor_spec.TensorSpec([None, 3], dtypes.int32), tensor_spec.TensorSpec([None, 2], dtypes.int32) ]) def func(x, y): return array_ops.concat([x, y], axis=1) root = tracking.AutoTrackable() root.f = func root = cycle(root, cycles) imported_graph = root.f.get_concrete_function().graph input_x, input_y = imported_graph.inputs self.assertEqual([None, 3], input_x.shape.as_list()) self.assertEqual([None, 2], input_y.shape.as_list()) output, = imported_graph.outputs self.assertEqual([None, 5], output.shape.as_list()) signature = root.signatures["serving_default"] self.assertEqual( [None, 3], signature.inputs[0].shape.as_list()) self.assertEqual( [None, 2], signature.inputs[1].shape.as_list()) self.assertEqual( [None, 5], signature.outputs[0].shape.as_list()) def test_variables_destroyed(self, cycles): v1 = variables.Variable(1.) weak_v1 = weakref.ref(v1) root = util.Checkpoint(v=v1) root = cycle(root, cycles) del v1 self.assertIsNone(weak_v1()) weak_v2 = weakref.ref(root.v) del root self.assertIsNone(weak_v2()) def test_variable_attributes_preserved(self, cycles): v = variables.Variable( 1., trainable=False, synchronization=variables.VariableSynchronization.NONE, aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA) self.assertEqual(variables.VariableSynchronization.NONE, v.synchronization) self.assertEqual(variables.VariableAggregation.ONLY_FIRST_REPLICA, v.aggregation) root = tracking.AutoTrackable() root.v = v root = cycle(root, cycles) self.assertEqual(False, root.v.trainable) self.assertEqual(variables.VariableSynchronization.NONE, root.v.synchronization) self.assertEqual(variables.VariableAggregation.ONLY_FIRST_REPLICA, root.v.aggregation) def test_captured_dataset(self, cycles): class HasDataset(module.Module): def __init__(self): super(HasDataset, self).__init__() self.dataset = ( dataset_ops.Dataset.range(5) .map(lambda x: x ** 2)) @def_function.function def __call__(self, x): current_sum = array_ops.zeros([], dtype=dtypes.int64) for element in self.dataset: current_sum += x * element return current_sum root = HasDataset() self.assertEqual( 3 * (1 + 4 + 9 + 16), root(constant_op.constant(3, dtype=dtypes.int64)).numpy()) root = cycle(root, cycles) self.assertEqual( 3 * (1 + 4 + 9 + 16), root(constant_op.constant(3, dtype=dtypes.int64)).numpy()) def test_tuple_signature(self, cycles): root = util.Checkpoint() root.f = def_function.function( lambda: (array_ops.ones([]), array_ops.zeros([])), input_signature=()) for _ in range(cycles): root = cycle(root, 1, signatures=root.f) self.assertEqual(({"output_0": 1., "output_1": 0.}), self.evaluate(root.signatures["serving_default"]())) def test_model_with_custom_function_attached(self, cycles): root = util.Checkpoint(model=sequential.Sequential([core.Dense(2)])) @def_function.function def _use_sequential(x): return root.model.call(x) root.model.traced_call = _use_sequential original = root.model.traced_call(array_ops.zeros([1, 1])).numpy() root = cycle(root, cycles) self.assertAllEqual( original, root.model.traced_call(array_ops.zeros([1, 1])).numpy()) def test_version_info(self, cycles): root = util.Checkpoint() root = cycle(root, cycles) self.assertEqual(versions.__version__, root.tensorflow_version) self.assertEqual(versions.__git_version__, root.tensorflow_git_version) def test_load_grad_save(self, cycles): root = util.Checkpoint() root.v = variables.Variable(2.) root.f = def_function.function(lambda x: root.v * x) root.g = def_function.function(root.f) for _ in range(cycles): with backprop.GradientTape() as tape: inp = constant_op.constant(2.) tape.watch(inp) output = root.g(inp) self.assertAllClose(4., output) self.assertAllClose(2., tape.gradient(output, inp)) root = cycle(root, 1) def test_destroy_resource(self, cycles): def get_handle(): return gen_resource_variable_ops.var_handle_op( shape=tensor_shape.as_shape([]), dtype=dtypes.float32, shared_name="my_var_name", name="my_var", container="my_container") class MyResourceDeleter(tracking.CapturableResourceDeleter): def destroy_resource(self): handle = get_handle() gen_resource_variable_ops.destroy_resource_op( handle, ignore_lookup_error=True) class MyResource(tracking.TrackableResource): def __init__(self): # Set the resource deleter, so when the resource object goes out of # scope it will be deleted automatically. super(MyResource, self).__init__(deleter=MyResourceDeleter()) def _create_resource(self): return get_handle() def _initialize(self): gen_resource_variable_ops.assign_variable_op( self.resource_handle, 1.0, name="assign") class MyModel(tracking.AutoTrackable): def __init__(self): super(MyModel, self).__init__() self.resource = MyResource() @def_function.function(input_signature=[]) def increase(self): handle = self.resource.resource_handle gen_resource_variable_ops.assign_add_variable_op( handle, 10.0, name="assign_add") return gen_resource_variable_ops.read_variable_op( handle, dtypes.float32) root = MyModel() imported = cycle(root, cycles) self.assertEqual(11, imported.increase().numpy()) # Create the resource. handle = imported.resource.resource_handle # Delete the imported SaveModel. Since we explicitly set the deleter, it # should destroy the resource automatically. del imported # Try to destroy the resource again, should fail. with self.assertRaisesRegexp(errors.NotFoundError, r"Resource .* does not exist."): gen_resource_variable_ops.destroy_resource_op( handle, ignore_lookup_error=False) def test_function_called_as_operation(self, cycles): @framework_function.Defun(dtypes.float32) def inner(x): return x + 1. @def_function.function( input_signature=[tensor_spec.TensorSpec([], dtypes.float32)]) def outer(x): return inner(x) root = module.Module() root.f = outer imported = cycle(root, cycles) self.assertAllClose(2., imported.f(constant_op.constant(1.))) def test_ragged(self, cycles): @def_function.function(input_signature=[ ragged_tensor.RaggedTensorSpec(shape=[None, None], dtype=dtypes.int32) ]) def f(x): return x + 1 obj = tracking.AutoTrackable() obj.f = f imported1 = cycle(obj, cycles, signatures={}) rt = ragged_factory_ops.constant([[1, 2], [3]]) self.assertAllEqual(imported1.f(rt), [[2, 3], [4]]) imported2 = cycle(obj, cycles) rt = ragged_factory_ops.constant([[1, 2], [3]]) self.assertAllEqual(imported2.f(rt), [[2, 3], [4]]) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) @parameterized.named_parameters( dict(testcase_name="ReloadOnce", cycles=1), dict(testcase_name="ReloadTwice", cycles=2), dict(testcase_name="ReloadThrice", cycles=3)) class KerasLoadTest(test.TestCase, parameterized.TestCase): def test_dense_features_layer(self, cycles): columns = [ feature_column_lib.numeric_column("x"), feature_column_lib.numeric_column("y") ] layer = feature_column_lib.DenseFeatures(columns) model = sequential.Sequential([layer]) model_input = {"x": constant_op.constant([[1.]]), "y": constant_op.constant([[2.]])} self.assertAllClose([[1., 2.]], model.predict(model_input, steps=1)) loaded = cycle(model, cycles) output, = loaded._default_save_signature(model_input).values() self.assertAllClose([[1., 2.]], output) signature_output, = loaded.signatures["serving_default"]( **model_input).values() self.assertAllClose([[1., 2.]], signature_output) def test_dense_features_layer_fit(self, cycles): columns = [feature_column_lib.numeric_column("x")] model = sequential.Sequential( [feature_column_lib.DenseFeatures(columns), core.Dense(1)]) model_input = {"x": constant_op.constant([[1.]])} model.compile(optimizer="adam", loss="mse", run_eagerly=True, experimental_run_tf_function=True) model.fit(model_input, constant_op.constant([[3.]])) loaded = cycle(model, cycles) loaded._default_save_signature(model_input) loaded.signatures["serving_default"](**model_input) def test_multi_output_layer(self, cycles): inp = input_layer.Input(name="inp", shape=(None,), dtype=dtypes.float32) class _MultiOutput(base_layer.Layer): def call(self, x): return x + 1., x + 2. out = _MultiOutput(name="out")(inp) model = training_lib.Model(inp, out) loaded = cycle(model, cycles) self.assertAllClose( dict(out=2., out_1=3.), loaded.signatures["serving_default"](constant_op.constant(1.))) def test_functional_model_with_conv(self, cycles): x = input_layer.Input(name="x", shape=(None, None, 3), dtype=dtypes.float32) conved = convolutional.Conv2D(filters=3, kernel_size=3, dilation_rate=2)(x) model = training_lib.Model([x], conved) model_input = array_ops.ones((1, 10, 10, 3)) initial_output = model.predict([model_input]) model = cycle(model, cycles) self.assertAllClose( [initial_output], list(model.signatures["serving_default"](model_input).values())) class SingleCycleTests(test.TestCase, parameterized.TestCase): def test_load_with_tags(self): root = tracking.AutoTrackable() path = tempfile.mkdtemp(prefix=self.get_temp_dir()) save.save(root, path) with self.assertRaises(ValueError): load.load(path, tags=[tag_constants.EVAL]) load.load(path, tags=[tag_constants.SERVING]) load.load(path, tags=tag_constants.SERVING) load.load(path, tags=set([tag_constants.SERVING])) def test_docstring_examples(self): path = tempfile.mkdtemp(prefix=self.get_temp_dir()) exported = util.Checkpoint(v=variables.Variable(3.)) exported.f = def_function.function( lambda x: exported.v * x, input_signature=[ tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)]) save.save(exported, path) imported = load.load(path) self.assertEqual(3., imported.v.numpy()) self.assertEqual(6., imported.f(x=constant_op.constant(2.)).numpy()) save.save(exported, path, exported.f.get_concrete_function()) imported = load.load(path) f = imported.signatures["serving_default"] self.assertAllEqual( [[-3.]], f(x=constant_op.constant([[-1.]]))["output_0"].numpy()) def test_object_with_extra_dependencies(self): class Extra(tracking.AutoTrackable): def _list_extra_dependencies_for_serialization(self, cache): if self not in cache: cache[self] = {"a": variables.Variable(5.)} return cache[self] root = Extra() path = tempfile.mkdtemp(prefix=self.get_temp_dir()) save.save(root, path) imported = load.load(path) self.assertEqual(5, self.evaluate(imported.a)) root.a = variables.Variable(3.) with self.assertRaisesRegexp( ValueError, "object has an attribute named a, which is reserved."): save.save(root, path) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/load_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Loader implementation for SavedModel with hermetic, language-neutral exports. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from google.protobuf import message from google.protobuf import text_format from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.core.protobuf import saved_model_pb2 from tensorflow.python.framework import ops from tensorflow.python.lib.io import file_io from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging from tensorflow.python.saved_model import constants from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.saved_model import utils_impl as saved_model_utils from tensorflow.python.training import saver as tf_saver from tensorflow.python.util import compat from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export def parse_saved_model(export_dir): """Reads the savedmodel.pb or savedmodel.pbtxt file containing `SavedModel`. Args: export_dir: Directory containing the SavedModel file. Returns: A `SavedModel` protocol buffer. Raises: IOError: If the file does not exist, or cannot be successfully parsed. """ # Build the path to the SavedModel in pbtxt format. path_to_pbtxt = os.path.join( compat.as_bytes(export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT)) # Build the path to the SavedModel in pb format. path_to_pb = os.path.join( compat.as_bytes(export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB)) # Parse the SavedModel protocol buffer. saved_model = saved_model_pb2.SavedModel() if file_io.file_exists(path_to_pb): try: file_content = file_io.FileIO(path_to_pb, "rb").read() saved_model.ParseFromString(file_content) return saved_model except message.DecodeError as e: raise IOError("Cannot parse file %s: %s." % (path_to_pb, str(e))) elif file_io.file_exists(path_to_pbtxt): try: file_content = file_io.FileIO(path_to_pbtxt, "rb").read() text_format.Merge(file_content.decode("utf-8"), saved_model) return saved_model except text_format.ParseError as e: raise IOError("Cannot parse file %s: %s." % (path_to_pbtxt, str(e))) else: raise IOError("SavedModel file does not exist at: %s/{%s|%s}" % (export_dir, constants.SAVED_MODEL_FILENAME_PBTXT, constants.SAVED_MODEL_FILENAME_PB)) # TODO(b/120594573): Make this symbol also available as private, so that # tensorflow_transform and tensorflow_estimator do not break. _parse_saved_model = parse_saved_model def get_asset_tensors(export_dir, meta_graph_def_to_load, import_scope=None): """Gets the asset tensors, if defined in the meta graph def to load. Args: export_dir: Directory where the SavedModel is located. meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded. import_scope: Optional `string` -- if specified, prepend this followed by '/' to all returned asset tensor names. Returns: A dictionary of asset tensors, keyed by the name of the asset tensor. The value in the map corresponds to the absolute path of the asset file. """ # Collection-def that may contain the assets key. collection_def = meta_graph_def_to_load.collection_def asset_tensor_dict = {} asset_protos = [] if meta_graph_def_to_load.asset_file_def: asset_protos = meta_graph_def_to_load.asset_file_def elif constants.ASSETS_KEY in collection_def: assets_any_proto = collection_def[constants.ASSETS_KEY].any_list.value for asset_any_proto in assets_any_proto: asset_proto = meta_graph_pb2.AssetFileDef() asset_any_proto.Unpack(asset_proto) asset_protos.append(asset_proto) # Location of the assets for SavedModel. assets_directory = os.path.join( compat.as_bytes(export_dir), compat.as_bytes(constants.ASSETS_DIRECTORY)) # Process each asset and add it to the asset tensor dictionary. for asset_proto in asset_protos: tensor_name = asset_proto.tensor_info.name if import_scope: tensor_name = "%s/%s" % (import_scope, tensor_name) asset_tensor_dict[tensor_name] = os.path.join( compat.as_bytes(assets_directory), compat.as_bytes(asset_proto.filename)) return asset_tensor_dict def _get_main_op_tensor( meta_graph_def_to_load, init_op_key=constants.MAIN_OP_KEY): """Gets the main op tensor, if one exists. Args: meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded. init_op_key: name of collection to check; should be one of MAIN_OP_KEY or the deprecated LEGACY_INIT_OP_KEY Returns: The main op tensor, if it exists and `None` otherwise. Raises: RuntimeError: If the collection def corresponding to the main op key has other than exactly one tensor. """ # TODO(kathywu): Rename this method to _get_op_from_collection when # dependency from SavedModelEstimator is removed. collection_def = meta_graph_def_to_load.collection_def init_op = None if init_op_key in collection_def: init_op_list = collection_def[init_op_key].node_list.value if len(init_op_list) != 1: raise RuntimeError("Expected exactly one SavedModel init op. " "Found: {}".format(init_op_list)) init_op = ops.get_collection(init_op_key)[0] return init_op def _get_op_from_collection(meta_graph_def, op_key): return _get_main_op_tensor(meta_graph_def, op_key) def _get_op_from_signature_def(meta_graph_def, op_signature_key, import_scope): """Retrieve op stored in the imported meta graph's signature def.""" if op_signature_key in meta_graph_def.signature_def: return signature_def_utils.load_op_from_signature_def( meta_graph_def.signature_def[op_signature_key], op_signature_key, import_scope) else: return None def get_init_op(meta_graph_def, import_scope=None): return (_get_op_from_signature_def( meta_graph_def, constants.INIT_OP_SIGNATURE_KEY, import_scope) or _get_op_from_collection(meta_graph_def, constants.MAIN_OP_KEY) or _get_op_from_collection(meta_graph_def, constants.LEGACY_INIT_OP_KEY)) def get_train_op(meta_graph_def, import_scope=None): train_op = _get_op_from_signature_def( meta_graph_def, constants.TRAIN_OP_SIGNATURE_KEY, import_scope) if train_op is None: train_op = _get_op_from_collection(meta_graph_def, constants.TRAIN_OP_KEY) return train_op @tf_export(v1=[ "saved_model.contains_saved_model", "saved_model.maybe_saved_model_directory", "saved_model.loader.maybe_saved_model_directory" ]) @deprecation.deprecated_endpoints( "saved_model.loader.maybe_saved_model_directory") def maybe_saved_model_directory(export_dir): """Checks whether the provided export directory could contain a SavedModel. Note that the method does not load any data by itself. If the method returns `false`, the export directory definitely does not contain a SavedModel. If the method returns `true`, the export directory may contain a SavedModel but provides no guarantee that it can be loaded. Args: export_dir: Absolute string path to possible export location. For example, '/my/foo/model'. Returns: True if the export directory contains SavedModel files, False otherwise. """ txt_path = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PBTXT) pb_path = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PB) return file_io.file_exists(txt_path) or file_io.file_exists(pb_path) @tf_export("saved_model.contains_saved_model", v1=[]) def contains_saved_model(export_dir): """Checks whether the provided export directory could contain a SavedModel. Note that the method does not load any data by itself. If the method returns `false`, the export directory definitely does not contain a SavedModel. If the method returns `true`, the export directory may contain a SavedModel but provides no guarantee that it can be loaded. Args: export_dir: Absolute string path to possible export location. For example, '/my/foo/model'. Returns: True if the export directory contains SavedModel files, False otherwise. """ return maybe_saved_model_directory(export_dir) @tf_export(v1=["saved_model.load", "saved_model.loader.load"]) @deprecation.deprecated( None, "This function will only be available through the v1 compatibility " "library as tf.compat.v1.saved_model.loader.load or " "tf.compat.v1.saved_model.load. There will be a new function for importing " "SavedModels in Tensorflow 2.0.") def load(sess, tags, export_dir, import_scope=None, **saver_kwargs): """Loads the model from a SavedModel as specified by tags. Args: sess: The TensorFlow session to restore the variables. tags: Set of string tags to identify the required MetaGraphDef. These should correspond to the tags used when saving the variables using the SavedModel `save()` API. export_dir: Directory in which the SavedModel protocol buffer and variables to be loaded are located. import_scope: Optional `string` -- if specified, prepend this string followed by '/' to all loaded tensor names. This scope is applied to tensor instances loaded into the passed session, but it is *not* written through to the static `MetaGraphDef` protocol buffer that is returned. **saver_kwargs: Optional keyword arguments passed through to Saver. Returns: The `MetaGraphDef` protocol buffer loaded in the provided session. This can be used to further extract signature-defs, collection-defs, etc. Raises: RuntimeError: MetaGraphDef associated with the tags cannot be found. """ loader = SavedModelLoader(export_dir) return loader.load(sess, tags, import_scope, **saver_kwargs) class SavedModelLoader(object): """Load graphs and restore variable values from a `SavedModel`.""" def __init__(self, export_dir): """Creates a `SavedModelLoader`. Args: export_dir: Directory in which the SavedModel protocol buffer and variables to be loaded are located. """ self._export_dir = export_dir self._variables_path = saved_model_utils.get_variables_path(export_dir) self._saved_model = parse_saved_model(export_dir) @property def export_dir(self): """Directory containing the SavedModel.""" return self._export_dir @property def variables_path(self): """Path to variable checkpoint files.""" return self._variables_path @property def saved_model(self): """SavedModel object parsed from the export directory.""" return self._saved_model def get_meta_graph_def_from_tags(self, tags): """Return MetaGraphDef with the exact specified tags. Args: tags: A list or set of string tags that identify the MetaGraphDef. Returns: MetaGraphDef with the same tags. Raises: RuntimeError: if no metagraphs were found with the associated tags. """ found_match = False available_tags = [] for meta_graph_def in self._saved_model.meta_graphs: available_tags.append(set(meta_graph_def.meta_info_def.tags)) if set(meta_graph_def.meta_info_def.tags) == set(tags): meta_graph_def_to_load = meta_graph_def found_match = True break if not found_match: raise RuntimeError( "MetaGraphDef associated with tags " + str(tags).strip("[]") + " could not be found in SavedModel. To inspect available tag-sets in" " the SavedModel, please use the SavedModel CLI: `saved_model_cli`" "\navailable_tags: " + str(available_tags)) return meta_graph_def_to_load def load_graph(self, graph, tags, import_scope=None, **saver_kwargs): """Load ops and nodes from SavedModel MetaGraph into graph. Args: graph: tf.Graph object. tags: a set of string tags identifying a MetaGraphDef. import_scope: Optional `string` -- if specified, prepend this string followed by '/' to all loaded tensor names. This scope is applied to tensor instances loaded into the passed session, but it is *not* written through to the static `MetaGraphDef` protocol buffer that is returned. **saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph. Returns: A tuple of * Saver defined by the MetaGraph, which can be used to restore the variable values. * List of `Operation`/`Tensor` objects returned from `tf.import_graph_def` (may be `None`). """ meta_graph_def = self.get_meta_graph_def_from_tags(tags) with graph.as_default(): return tf_saver._import_meta_graph_with_return_elements( # pylint: disable=protected-access meta_graph_def, import_scope=import_scope, **saver_kwargs) def restore_variables(self, sess, saver, import_scope=None): """Restore SavedModel variable values into the session. Args: sess: tf.compat.v1.Session to restore variable values. saver: a tf.compat.v1.train.Saver object. Can be None if there are no variables in graph. This may be the saver returned by the load_graph() function, or a default `tf.compat.v1.train.Saver()`. import_scope: Optional `string` -- if specified, prepend this string followed by '/' to all loaded tensor names. This scope is applied to tensor instances loaded into the passed session, but it is *not* written through to the static `MetaGraphDef` protocol buffer that is returned. Raises: ValueError: if no saver was passed to the saver argument, and there are variables in the graph. """ with sess.graph.as_default(): if (saver is None and not variables._all_saveable_objects(scope=import_scope)): # pylint: disable=protected-access tf_logging.info("The specified SavedModel has no variables; no " "checkpoints were restored.") elif isinstance(saver, tf_saver.Saver): saver.restore(sess, self._variables_path) else: raise ValueError( "No tf.train.Saver object was passed to the function " "SavedModelLoader.restore_variables. Since there are variables in " "the graph, a saver is required.") def run_init_ops(self, sess, tags, import_scope=None): """Run initialization ops defined in the `MetaGraphDef`. Args: sess: tf.compat.v1.Session to restore variable values. tags: a set of string tags identifying a MetaGraphDef. import_scope: Optional `string` -- if specified, prepend this string followed by '/' to all loaded tensor names. This scope is applied to tensor instances loaded into the passed session, but it is *not* written through to the static `MetaGraphDef` protocol buffer that is returned. """ meta_graph_def = self.get_meta_graph_def_from_tags(tags) with sess.graph.as_default(): # Get asset tensors, if any. asset_tensors_dictionary = get_asset_tensors( self._export_dir, meta_graph_def, import_scope=import_scope) init_op = get_init_op(meta_graph_def, import_scope) if init_op is not None: sess.run(fetches=[init_op], feed_dict=asset_tensors_dictionary) def load(self, sess, tags, import_scope=None, **saver_kwargs): """Load the MetaGraphDef graph and restore variable values into the session. Args: sess: tf.compat.v1.Session to restore variable values. tags: a set of string tags identifying a MetaGraphDef. import_scope: Optional `string` -- if specified, prepend this string followed by '/' to all loaded tensor names. This scope is applied to tensor instances loaded into the passed session, but it is *not* written through to the static `MetaGraphDef` protocol buffer that is returned. **saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph. Returns: `MetagraphDef` proto of the graph that was loaded. """ with sess.graph.as_default(): saver, _ = self.load_graph(sess.graph, tags, import_scope, **saver_kwargs) self.restore_variables(sess, saver, import_scope) self.run_init_ops(sess, tags, import_scope) return self.get_meta_graph_def_from_tags(tags)
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/loader_impl.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SignatureDef utility functions implementation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import types_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import utils_impl as utils from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export @tf_export( v1=[ 'saved_model.build_signature_def', 'saved_model.signature_def_utils.build_signature_def' ]) @deprecation.deprecated_endpoints( 'saved_model.signature_def_utils.build_signature_def') def build_signature_def(inputs=None, outputs=None, method_name=None): """Utility function to build a SignatureDef protocol buffer. Args: inputs: Inputs of the SignatureDef defined as a proto map of string to tensor info. outputs: Outputs of the SignatureDef defined as a proto map of string to tensor info. method_name: Method name of the SignatureDef as a string. Returns: A SignatureDef protocol buffer constructed based on the supplied arguments. """ signature_def = meta_graph_pb2.SignatureDef() if inputs is not None: for item in inputs: signature_def.inputs[item].CopyFrom(inputs[item]) if outputs is not None: for item in outputs: signature_def.outputs[item].CopyFrom(outputs[item]) if method_name is not None: signature_def.method_name = method_name return signature_def @tf_export( v1=[ 'saved_model.regression_signature_def', 'saved_model.signature_def_utils.regression_signature_def' ]) @deprecation.deprecated_endpoints( 'saved_model.signature_def_utils.regression_signature_def') def regression_signature_def(examples, predictions): """Creates regression signature from given examples and predictions. This function produces signatures intended for use with the TensorFlow Serving Regress API (tensorflow_serving/apis/prediction_service.proto), and so constrains the input and output types to those allowed by TensorFlow Serving. Args: examples: A string `Tensor`, expected to accept serialized tf.Examples. predictions: A float `Tensor`. Returns: A regression-flavored signature_def. Raises: ValueError: If examples is `None`. """ if examples is None: raise ValueError('Regression examples cannot be None.') if not isinstance(examples, ops.Tensor): raise ValueError('Regression examples must be a string Tensor.') if predictions is None: raise ValueError('Regression predictions cannot be None.') input_tensor_info = utils.build_tensor_info(examples) if input_tensor_info.dtype != types_pb2.DT_STRING: raise ValueError('Regression examples must be a string Tensor.') signature_inputs = {signature_constants.REGRESS_INPUTS: input_tensor_info} output_tensor_info = utils.build_tensor_info(predictions) if output_tensor_info.dtype != types_pb2.DT_FLOAT: raise ValueError('Regression output must be a float Tensor.') signature_outputs = {signature_constants.REGRESS_OUTPUTS: output_tensor_info} signature_def = build_signature_def( signature_inputs, signature_outputs, signature_constants.REGRESS_METHOD_NAME) return signature_def @tf_export( v1=[ 'saved_model.classification_signature_def', 'saved_model.signature_def_utils.classification_signature_def' ]) @deprecation.deprecated_endpoints( 'saved_model.signature_def_utils.classification_signature_def') def classification_signature_def(examples, classes, scores): """Creates classification signature from given examples and predictions. This function produces signatures intended for use with the TensorFlow Serving Classify API (tensorflow_serving/apis/prediction_service.proto), and so constrains the input and output types to those allowed by TensorFlow Serving. Args: examples: A string `Tensor`, expected to accept serialized tf.Examples. classes: A string `Tensor`. Note that the ClassificationResponse message requires that class labels are strings, not integers or anything else. scores: a float `Tensor`. Returns: A classification-flavored signature_def. Raises: ValueError: If examples is `None`. """ if examples is None: raise ValueError('Classification examples cannot be None.') if not isinstance(examples, ops.Tensor): raise ValueError('Classification examples must be a string Tensor.') if classes is None and scores is None: raise ValueError('Classification classes and scores cannot both be None.') input_tensor_info = utils.build_tensor_info(examples) if input_tensor_info.dtype != types_pb2.DT_STRING: raise ValueError('Classification examples must be a string Tensor.') signature_inputs = {signature_constants.CLASSIFY_INPUTS: input_tensor_info} signature_outputs = {} if classes is not None: classes_tensor_info = utils.build_tensor_info(classes) if classes_tensor_info.dtype != types_pb2.DT_STRING: raise ValueError('Classification classes must be a string Tensor.') signature_outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES] = ( classes_tensor_info) if scores is not None: scores_tensor_info = utils.build_tensor_info(scores) if scores_tensor_info.dtype != types_pb2.DT_FLOAT: raise ValueError('Classification scores must be a float Tensor.') signature_outputs[signature_constants.CLASSIFY_OUTPUT_SCORES] = ( scores_tensor_info) signature_def = build_signature_def( signature_inputs, signature_outputs, signature_constants.CLASSIFY_METHOD_NAME) return signature_def @tf_export( v1=[ 'saved_model.predict_signature_def', 'saved_model.signature_def_utils.predict_signature_def' ]) @deprecation.deprecated_endpoints( 'saved_model.signature_def_utils.predict_signature_def') def predict_signature_def(inputs, outputs): """Creates prediction signature from given inputs and outputs. This function produces signatures intended for use with the TensorFlow Serving Predict API (tensorflow_serving/apis/prediction_service.proto). This API imposes no constraints on the input and output types. Args: inputs: dict of string to `Tensor`. outputs: dict of string to `Tensor`. Returns: A prediction-flavored signature_def. Raises: ValueError: If inputs or outputs is `None`. """ if inputs is None or not inputs: raise ValueError('Prediction inputs cannot be None or empty.') if outputs is None or not outputs: raise ValueError('Prediction outputs cannot be None or empty.') signature_inputs = {key: utils.build_tensor_info(tensor) for key, tensor in inputs.items()} signature_outputs = {key: utils.build_tensor_info(tensor) for key, tensor in outputs.items()} signature_def = build_signature_def( signature_inputs, signature_outputs, signature_constants.PREDICT_METHOD_NAME) return signature_def def supervised_train_signature_def( inputs, loss, predictions=None, metrics=None): return _supervised_signature_def( signature_constants.SUPERVISED_TRAIN_METHOD_NAME, inputs, loss=loss, predictions=predictions, metrics=metrics) def supervised_eval_signature_def( inputs, loss, predictions=None, metrics=None): return _supervised_signature_def( signature_constants.SUPERVISED_EVAL_METHOD_NAME, inputs, loss=loss, predictions=predictions, metrics=metrics) def _supervised_signature_def( method_name, inputs, loss=None, predictions=None, metrics=None): """Creates a signature for training and eval data. This function produces signatures that describe the inputs and outputs of a supervised process, such as training or evaluation, that results in loss, metrics, and the like. Note that this function only requires inputs to be not None. Args: method_name: Method name of the SignatureDef as a string. inputs: dict of string to `Tensor`. loss: dict of string to `Tensor` representing computed loss. predictions: dict of string to `Tensor` representing the output predictions. metrics: dict of string to `Tensor` representing metric ops. Returns: A train- or eval-flavored signature_def. Raises: ValueError: If inputs or outputs is `None`. """ if inputs is None or not inputs: raise ValueError('{} inputs cannot be None or empty.'.format(method_name)) signature_inputs = {key: utils.build_tensor_info(tensor) for key, tensor in inputs.items()} signature_outputs = {} for output_set in (loss, predictions, metrics): if output_set is not None: sig_out = {key: utils.build_tensor_info(tensor) for key, tensor in output_set.items()} signature_outputs.update(sig_out) signature_def = build_signature_def( signature_inputs, signature_outputs, method_name) return signature_def @tf_export( v1=[ 'saved_model.is_valid_signature', 'saved_model.signature_def_utils.is_valid_signature' ]) @deprecation.deprecated_endpoints( 'saved_model.signature_def_utils.is_valid_signature') def is_valid_signature(signature_def): """Determine whether a SignatureDef can be served by TensorFlow Serving.""" if signature_def is None: return False return (_is_valid_classification_signature(signature_def) or _is_valid_regression_signature(signature_def) or _is_valid_predict_signature(signature_def)) def _is_valid_predict_signature(signature_def): """Determine whether the argument is a servable 'predict' SignatureDef.""" if signature_def.method_name != signature_constants.PREDICT_METHOD_NAME: return False if not signature_def.inputs.keys(): return False if not signature_def.outputs.keys(): return False return True def _is_valid_regression_signature(signature_def): """Determine whether the argument is a servable 'regress' SignatureDef.""" if signature_def.method_name != signature_constants.REGRESS_METHOD_NAME: return False if (set(signature_def.inputs.keys()) != set([signature_constants.REGRESS_INPUTS])): return False if (signature_def.inputs[signature_constants.REGRESS_INPUTS].dtype != types_pb2.DT_STRING): return False if (set(signature_def.outputs.keys()) != set([signature_constants.REGRESS_OUTPUTS])): return False if (signature_def.outputs[signature_constants.REGRESS_OUTPUTS].dtype != types_pb2.DT_FLOAT): return False return True def _is_valid_classification_signature(signature_def): """Determine whether the argument is a servable 'classify' SignatureDef.""" if signature_def.method_name != signature_constants.CLASSIFY_METHOD_NAME: return False if (set(signature_def.inputs.keys()) != set([signature_constants.CLASSIFY_INPUTS])): return False if (signature_def.inputs[signature_constants.CLASSIFY_INPUTS].dtype != types_pb2.DT_STRING): return False allowed_outputs = set([signature_constants.CLASSIFY_OUTPUT_CLASSES, signature_constants.CLASSIFY_OUTPUT_SCORES]) if not signature_def.outputs.keys(): return False if set(signature_def.outputs.keys()) - allowed_outputs: return False if (signature_constants.CLASSIFY_OUTPUT_CLASSES in signature_def.outputs and signature_def.outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES].dtype != types_pb2.DT_STRING): return False if (signature_constants.CLASSIFY_OUTPUT_SCORES in signature_def.outputs and signature_def.outputs[signature_constants.CLASSIFY_OUTPUT_SCORES].dtype != types_pb2.DT_FLOAT): return False return True def op_signature_def(op, key): """Creates a signature def with the output pointing to an op. Note that op isn't strictly enforced to be an Op object, and may be a Tensor. It is recommended to use the build_signature_def() function for Tensors. Args: op: An Op (or possibly Tensor). key: Key to graph element in the SignatureDef outputs. Returns: A SignatureDef with a single output pointing to the op. """ # Use build_tensor_info_from_op, which creates a TensorInfo from the element's # name. return build_signature_def(outputs={key: utils.build_tensor_info_from_op(op)}) def load_op_from_signature_def(signature_def, key, import_scope=None): """Load an Op from a SignatureDef created by op_signature_def(). Args: signature_def: a SignatureDef proto key: string key to op in the SignatureDef outputs. import_scope: Scope used to import the op Returns: Op (or possibly Tensor) in the graph with the same name as saved in the SignatureDef. Raises: NotFoundError: If the op could not be found in the graph. """ tensor_info = signature_def.outputs[key] try: # The init and train ops are not strictly enforced to be operations, so # retrieve any graph element (can be either op or tensor). return utils.get_element_from_tensor_info( tensor_info, import_scope=import_scope) except KeyError: raise errors.NotFoundError( None, None, 'The {0} could not be found in the graph. Please make sure the ' 'SavedModel was created by the internal _SavedModelBuilder. If you ' 'are using the public API, please make sure the SignatureDef in the ' 'SavedModel does not contain the key "{0}".'.format(key))
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/signature_def_utils_impl.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for trackable object SavedModel save.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys from tensorflow.python.client import session as session_lib from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import backprop from tensorflow.python.eager import def_function from tensorflow.python.eager import function from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util from tensorflow.python.framework import versions from tensorflow.python.keras.engine import sequential from tensorflow.python.keras.layers import core from tensorflow.python.keras.optimizer_v2 import adam from tensorflow.python.lib.io import file_io from tensorflow.python.module import module from tensorflow.python.ops import array_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.saved_model import loader from tensorflow.python.saved_model import loader_impl from tensorflow.python.saved_model import save from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import tag_constants from tensorflow.python.training.tracking import tracking from tensorflow.python.training.tracking import util from tensorflow.python.util import compat class _ModelWithOptimizer(util.Checkpoint): def __init__(self): self.dense = core.Dense(1) self.optimizer = adam.Adam(0.01) @def_function.function( input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32), tensor_spec.TensorSpec([None], dtypes.float32))) def call(self, x, y): with backprop.GradientTape() as tape: loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.) trainable_variables = self.dense.trainable_variables gradients = tape.gradient(loss, trainable_variables) self.optimizer.apply_gradients(zip(gradients, trainable_variables)) return {"loss": loss} def _import_and_infer( save_dir, inputs, signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY): """Import a SavedModel into a TF 1.x-style graph and run `signature_key`.""" graph = ops.Graph() with graph.as_default(), session_lib.Session() as session: model = loader.load(session, [tag_constants.SERVING], save_dir) signature = model.signature_def[signature_key] assert set(inputs.keys()) == set(signature.inputs.keys()) feed_dict = {} for arg_name in inputs.keys(): feed_dict[graph.get_tensor_by_name(signature.inputs[arg_name].name)] = ( inputs[arg_name]) output_dict = {} for output_name, output_tensor_info in signature.outputs.items(): output_dict[output_name] = graph.get_tensor_by_name( output_tensor_info.name) return session.run(output_dict, feed_dict=feed_dict) class SaveTest(test.TestCase): def test_method_save_signature(self): root = tracking.AutoTrackable() root.f = def_function.function( lambda x: 2. * x, input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]) root.f(constant_op.constant(1.)) save_dir = os.path.join(self.get_temp_dir(), "saved_model") save.save(root, save_dir, root.f) self.assertEqual( {"output_0": 2.}, _import_and_infer(save_dir, {"x": 1.})) def test_method_save_concrete(self): root = tracking.AutoTrackable() root.f = def_function.function( lambda z: {"out": 2. * z}) root.f(constant_op.constant(1.)) save_dir = os.path.join(self.get_temp_dir(), "saved_model") save.save( root, save_dir, {"non_default_key": root.f.get_concrete_function( tensor_spec.TensorSpec(None, dtypes.float32))}) self.assertEqual( {"out": 2.}, _import_and_infer( save_dir, {"z": 1.}, signature_key="non_default_key")) def test_unbuilt_model_does_not_prevent_saving(self): root = util.Checkpoint(model=sequential.Sequential([core.Dense(2)])) save.save(root, os.path.join(self.get_temp_dir(), "saved_model")) def test_captured_symbolic_tensor_exception(self): root = module.Module() symbolic_tensor = [] @def_function.function def captured_intermediate(x): symbolic_tensor.append(math_ops.add(x, x, name="a_tensor")) return symbolic_tensor[-1] * 2 captured_intermediate(constant_op.constant(1.)) root.f = def_function.function(lambda: symbolic_tensor[-1], input_signature=[]) with self.assertRaisesRegexp(ValueError, "a_tensor"): save.save(root, os.path.join(self.get_temp_dir(), "saved_model"), signatures=root.f) def test_unsaveable_func_graph(self): root = module.Module() @def_function.function(input_signature=[]) def nested_f(): ops.get_default_graph().mark_as_unsaveable("ERROR MSG") return 1 @def_function.function(input_signature=[]) def f(): return nested_f() root.f = f with self.assertRaisesRegexp(ValueError, "ERROR MSG"): save.save(root, os.path.join(self.get_temp_dir(), "saved_model")) def test_version_information_included(self): root = tracking.AutoTrackable() save_dir = os.path.join(self.get_temp_dir(), "saved_model") save.save(root, save_dir) saved_model_proto = loader_impl.parse_saved_model(save_dir) self.assertEqual( versions.__version__, saved_model_proto.meta_graphs[0].meta_info_def.tensorflow_version) self.assertEqual( versions.__git_version__, saved_model_proto.meta_graphs[0].meta_info_def.tensorflow_git_version) def test_non_concrete_error(self): root = tracking.AutoTrackable() root.f = def_function.function(lambda x: 2. * x) root.f(constant_op.constant(1.)) save_dir = os.path.join(self.get_temp_dir(), "saved_model") with self.assertRaisesRegexp( ValueError, "Expected a TensorFlow function"): save.save(root, save_dir, root.f) def test_captures_unreachable_variable(self): root = tracking.AutoTrackable() unreachable_variable = variables.Variable([5.0, 2.0]) root.reachable_variable = variables.Variable([1.0, 3.0]) @def_function.function def increase_variable(x): return 2 * unreachable_variable * x + root.reachable_variable root.f = increase_variable self.assertAllEqual([101.0, 83.0], root.f(constant_op.constant([10.0, 20.0])).numpy()) save_dir = os.path.join(self.get_temp_dir(), "saved_model") with self.assertRaisesRegexp(KeyError, "not reachable from root"): save.save(root, save_dir) def test_nested_inputs(self): root = tracking.AutoTrackable() root.f = def_function.function( lambda x: 2. * x[0], input_signature=([tensor_spec.TensorSpec(None, dtypes.float32), tensor_spec.TensorSpec(None, dtypes.float32)],)) root.f([constant_op.constant(1.), constant_op.constant(1.)]) def test_nested_outputs(self): root = tracking.AutoTrackable() root.f = def_function.function(lambda x: (2. * x, (3. * x, 4. * x))) root.f(constant_op.constant(1.)) to_save = root.f.get_concrete_function(constant_op.constant(1.)) save_dir = os.path.join(self.get_temp_dir(), "saved_model") with self.assertRaisesRegexp( ValueError, "non-flat outputs"): save.save(root, save_dir, to_save) def test_nested_dict_outputs(self): root = util.Checkpoint( f=def_function.function( lambda x: {"a": 2. * x, "b": (3. * x, 4. * x)})) root.f(constant_op.constant(1.)) to_save = root.f.get_concrete_function(constant_op.constant(1.)) save_dir = os.path.join(self.get_temp_dir(), "saved_model") with self.assertRaisesRegexp( ValueError, "dictionary containing non-Tensor value"): save.save(root, save_dir, to_save) def test_variable(self): root = tracking.AutoTrackable() root.v1 = variables.Variable(3.) root.v2 = variables.Variable(2.) root.f = def_function.function( lambda x: root.v1 * root.v2 * x) root.f(constant_op.constant(1.)) to_save = root.f.get_concrete_function(constant_op.constant(1.)) save_dir = os.path.join(self.get_temp_dir(), "saved_model") save.save(root, save_dir, to_save) self.assertAllEqual({"output_0": 12.}, _import_and_infer(save_dir, {"x": 2.})) def test_optimizer(self): x = constant_op.constant([[3., 4.]]) y = constant_op.constant([2.]) model = _ModelWithOptimizer() first_loss = model.call(x, y) save_dir = os.path.join(self.get_temp_dir(), "saved_model") save.save(model, save_dir, model.call) second_loss = model.call(x, y) self.assertNotEqual(first_loss, second_loss) self.assertAllClose( second_loss, _import_and_infer(save_dir, {"x": [[3., 4.]], "y": [2.]})) def test_single_method_default_signature(self): model = _ModelWithOptimizer() x = constant_op.constant([[3., 4.]]) y = constant_op.constant([2.]) model.call(x, y) save_dir = os.path.join(self.get_temp_dir(), "saved_model") save.save(model, save_dir) self.assertIn("loss", _import_and_infer(save_dir, {"x": [[3., 4.]], "y": [2.]})) def test_single_function_default_signature(self): model = tracking.AutoTrackable() model.f = def_function.function(lambda: 3., input_signature=()) model.f() save_dir = os.path.join(self.get_temp_dir(), "saved_model") save.save(model, save_dir) self.assertAllClose({"output_0": 3.}, _import_and_infer(save_dir, {})) def test_single_function_no_signature(self): model = tracking.AutoTrackable() model.f = def_function.function(lambda: 3.) save_dir = os.path.join(self.get_temp_dir(), "saved_model") save.save(model, save_dir) def test_find_default_save_function(self): class ObjWithDefaultSignature(util.Checkpoint): @def_function.function(input_signature=[tensor_spec.TensorSpec( shape=None, dtype=dtypes.float32)]) def _default_save_signature(self, x): return x + x + 1 obj = ObjWithDefaultSignature() save_dir = os.path.join(self.get_temp_dir(), "saved_model") save.save(obj, save_dir) self.assertAllClose( {"output_0": 7.}, _import_and_infer(save_dir, {"x": 3.})) def test_docstring(self): class Adder(module.Module): @def_function.function(input_signature=[tensor_spec.TensorSpec( shape=None, dtype=dtypes.float32)]) def add(self, x): return x + x + 1. to_save = Adder() to_save.add(constant_op.constant(1.)) save_dir = os.path.join(self.get_temp_dir(), "saved_model") save.save(to_save, save_dir) self.assertAllClose({"output_0": 7.}, _import_and_infer(save_dir, {"x": 3.})) def test_datastructures(self): class HasDatastructures(util.Checkpoint): def __init__(self): self.a = [1.] self.a.append(variables.Variable(2.)) self.b = {"a": variables.Variable(3.)} @def_function.function(input_signature=[tensor_spec.TensorSpec( shape=None, dtype=dtypes.float32)]) def add(self, x): return x + math_ops.add_n(self.a) + self.b["a"] to_save = HasDatastructures() to_save.add(constant_op.constant(1.)) save_dir = os.path.join(self.get_temp_dir(), "saved_model") save.save(to_save, save_dir) self.assertAllClose({"output_0": 10.}, _import_and_infer(save_dir, {"x": 4.})) def test_default_attr_stripping(self): class Complex(util.Checkpoint): @def_function.function(input_signature=[]) def __call__(self): return math_ops.complex( constant_op.constant(1.), constant_op.constant(2.), name="complex") to_save = Complex() to_save() save_dir = os.path.join(self.get_temp_dir(), "saved_model") save.save(to_save, save_dir) graph = ops.Graph() with graph.as_default(), self.session(graph) as session: loader.load(session, [tag_constants.SERVING], save_dir) func, = [f for name, f in graph._functions.items() if "call" in name] complex_node, = [ node for node in func.definition.node_def if node.op == "Complex"] self.assertNotIn("T", complex_node.attr) self.assertNotIn("Tout", complex_node.attr) def test_signature_attribute_reserved(self): root = util.Checkpoint(signatures=variables.Variable(1.)) save_dir = os.path.join(self.get_temp_dir(), "saved_model") with self.assertRaisesRegexp(ValueError, "del obj.signatures"): save.save(root, save_dir) del root.signatures save.save(root, save_dir) def test_function_with_captured_dataset(self): if test_util.is_gpu_available(): self.skipTest("Currently broken when a GPU is available.") class HasDataset(module.Module): def __init__(self): super(HasDataset, self).__init__() self.dataset = ( dataset_ops.Dataset.range(5) .map(lambda x: x ** 2)) @def_function.function def __call__(self, x): current_sum = array_ops.zeros([], dtype=dtypes.int64) for element in self.dataset: current_sum += x * element return current_sum root = HasDataset() save_dir = os.path.join(self.get_temp_dir(), "saved_model") save.save( root, save_dir, signatures=root.__call__.get_concrete_function( tensor_spec.TensorSpec(None, dtypes.int64))) self.assertAllClose({"output_0": 3 * (1 + 4 + 9 + 16)}, _import_and_infer(save_dir, {"x": 3})) class AssetTests(test.TestCase): def setUp(self): super(AssetTests, self).setUp() self._vocab_path = os.path.join(self.get_temp_dir(), "vocab.txt") with open(self._vocab_path, "w") as f: f.write("alpha\nbeta\ngamma\n") def test_asset_path_returned(self): root = tracking.AutoTrackable() root.path = tracking.TrackableAsset(self._vocab_path) save_dir = os.path.join(self.get_temp_dir(), "saved_model") root.get_asset = def_function.function(lambda: root.path.asset_path) save.save(root, save_dir, signatures=root.get_asset.get_concrete_function()) second_dir = os.path.join(self.get_temp_dir(), "second_dir") file_io.rename(save_dir, second_dir) imported_path = _import_and_infer(second_dir, {})["output_0"] self.assertIn(compat.as_str_any(second_dir), compat.as_str_any(imported_path)) def test_table(self): initializer = lookup_ops.TextFileInitializer( self._vocab_path, key_dtype=dtypes.string, key_index=lookup_ops.TextFileIndex.WHOLE_LINE, value_dtype=dtypes.int64, value_index=lookup_ops.TextFileIndex.LINE_NUMBER) root = util.Checkpoint(table=lookup_ops.HashTable( initializer, default_value=-1)) root.table_user = def_function.function( root.table.lookup, input_signature=[tensor_spec.TensorSpec(None, dtypes.string)]) self.assertEqual( 2, self.evaluate(root.table_user(constant_op.constant("gamma")))) save_dir = os.path.join(self.get_temp_dir(), "saved_model") save.save(root, save_dir) file_io.delete_file(self._vocab_path) self.assertAllClose( {"output_0": [2, 0]}, _import_and_infer(save_dir, {"keys": ["gamma", "alpha"]})) second_dir = os.path.join(self.get_temp_dir(), "second_dir") # Asset paths should track the location the SavedModel is loaded from. file_io.rename(save_dir, second_dir) self.assertAllClose( {"output_0": [2, 1]}, _import_and_infer(second_dir, {"keys": ["gamma", "beta"]})) def test_unused_asset(self): root = tracking.AutoTrackable() root.f = def_function.function( lambda x: 2. * x, input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]) root.asset = tracking.TrackableAsset(self._vocab_path) export_dir = os.path.join(self.get_temp_dir(), "save_dir") save.save(root, export_dir) self.assertAllClose( {"output_0": [0.2]}, _import_and_infer(export_dir, {"x": [0.1]})) def test_sensible_function_building_exception(self): root = util.Checkpoint(v=variables.Variable(2.)) root.f = def_function.function( lambda x: 2. * root.v, input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]) export_dir = os.path.join(self.get_temp_dir(), "save_dir") @def_function.function def _calls_save(): save.save(root, export_dir) with self.assertRaisesRegexp(AssertionError, "tf.function"): _calls_save() class _ModelWithOptimizerUsingDefun(util.Checkpoint): def __init__(self): self.dense = core.Dense(1) self.optimizer = adam.Adam(0.01) # Using defun due to control flow v2 cycles, b/121159261. def_function uses # conds to gate variable initialization and so triggers cond reference cycles, # but the thing being wrapped here does not use cond itself. @function.defun( input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32), tensor_spec.TensorSpec([None], dtypes.float32)), ) def call(self, x, y): with backprop.GradientTape() as tape: loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.) trainable_variables = self.dense.trainable_variables gradients = tape.gradient(loss, trainable_variables) self.optimizer.apply_gradients(zip(gradients, trainable_variables)) return {"loss": loss} class MemoryTests(test.TestCase): def setUp(self): self._model = _ModelWithOptimizerUsingDefun() @test_util.assert_no_garbage_created def test_no_reference_cycles(self): x = constant_op.constant([[3., 4.]]) y = constant_op.constant([2.]) self._model.call(x, y) if sys.version_info[0] < 3: # TODO(allenl): debug reference cycles in Python 2.x self.skipTest("This test only works in Python 3+. Reference cycles are " "created in older Python versions.") save_dir = os.path.join(self.get_temp_dir(), "saved_model") save.save(self._model, save_dir, self._model.call) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/save_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SavedModel builder. Builds a SavedModel that can be saved to storage, is language neutral, and enables systems to produce, consume, or transform TensorFlow Models. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.saved_model.builder_impl import _SavedModelBuilder from tensorflow.python.saved_model.builder_impl import SavedModelBuilder # pylint: enable=unused-import
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/builder.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Handles types registrations for tf.saved_model.load.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import versions_pb2 from tensorflow.core.protobuf import saved_object_graph_pb2 class VersionedTypeRegistration(object): """Holds information about one version of a revived type.""" def __init__(self, object_factory, version, min_producer_version, min_consumer_version, bad_consumers=None, setter=setattr): """Identify a revived type version. Args: object_factory: A callable which takes a SavedUserObject proto and returns a trackable object. Dependencies are added later via `setter`. version: An integer, the producer version of this wrapper type. When making incompatible changes to a wrapper, add a new `VersionedTypeRegistration` with an incremented `version`. The most recent version will be saved, and all registrations with a matching identifier will be searched for the highest compatible version to use when loading. min_producer_version: The minimum producer version number required to use this `VersionedTypeRegistration` when loading a proto. min_consumer_version: `VersionedTypeRegistration`s with a version number less than `min_consumer_version` will not be used to load a proto saved with this object. `min_consumer_version` should be set to the lowest version number which can successfully load protos saved by this object. If no matching registration is available on load, the object will be revived with a generic trackable type. `min_consumer_version` and `bad_consumers` are a blunt tool, and using them will generally break forward compatibility: previous versions of TensorFlow will revive newly saved objects as opaque trackable objects rather than wrapped objects. When updating wrappers, prefer saving new information but preserving compatibility with previous wrapper versions. They are, however, useful for ensuring that previously-released buggy wrapper versions degrade gracefully rather than throwing exceptions when presented with newly-saved SavedModels. bad_consumers: A list of consumer versions which are incompatible (in addition to any version less than `min_consumer_version`). setter: A callable with the same signature as `setattr` to use when adding dependencies to generated objects. """ self.setter = setter self.identifier = None # Set after registration self._object_factory = object_factory self.version = version self._min_consumer_version = min_consumer_version self._min_producer_version = min_producer_version if bad_consumers is None: bad_consumers = [] self._bad_consumers = bad_consumers def to_proto(self): """Create a SavedUserObject proto.""" # For now wrappers just use dependencies to save their state, so the # SavedUserObject doesn't depend on the object being saved. # TODO(allenl): Add a wrapper which uses its own proto. return saved_object_graph_pb2.SavedUserObject( identifier=self.identifier, version=versions_pb2.VersionDef( producer=self.version, min_consumer=self._min_consumer_version, bad_consumers=self._bad_consumers)) def from_proto(self, proto): """Recreate a trackable object from a SavedUserObject proto.""" return self._object_factory(proto) def should_load(self, proto): """Checks if this object should load the SavedUserObject `proto`.""" if proto.identifier != self.identifier: return False if self.version < proto.version.min_consumer: return False if proto.version.producer < self._min_producer_version: return False for bad_version in proto.version.bad_consumers: if self.version == bad_version: return False return True # string identifier -> (predicate, [VersionedTypeRegistration]) _REVIVED_TYPE_REGISTRY = {} _TYPE_IDENTIFIERS = [] def register_revived_type(identifier, predicate, versions): """Register a type for revived objects. Args: identifier: A unique string identifying this class of objects. predicate: A Boolean predicate for this registration. Takes a trackable object as an argument. If True, `type_registration` may be used to save and restore the object. versions: A list of `VersionedTypeRegistration` objects. """ # Keep registrations in order of version. We always use the highest matching # version (respecting the min consumer version and bad consumers). versions.sort(key=lambda reg: reg.version, reverse=True) if not versions: raise AssertionError("Need at least one version of a registered type.") version_numbers = set() for registration in versions: # Copy over the identifier for use in generating protos registration.identifier = identifier if registration.version in version_numbers: raise AssertionError( "Got multiple registrations with version {} for type {}".format( registration.version, identifier)) version_numbers.add(registration.version) if identifier in _REVIVED_TYPE_REGISTRY: raise AssertionError( "Duplicate registrations for type {}".format(identifier)) _REVIVED_TYPE_REGISTRY[identifier] = (predicate, versions) _TYPE_IDENTIFIERS.append(identifier) def serialize(obj): """Create a SavedUserObject from a trackable object.""" for identifier in _TYPE_IDENTIFIERS: predicate, versions = _REVIVED_TYPE_REGISTRY[identifier] if predicate(obj): # Always uses the most recent version to serialize. return versions[0].to_proto() return None def deserialize(proto): """Create a trackable object from a SavedUserObject proto. Args: proto: A SavedUserObject to deserialize. Returns: A tuple of (trackable, assignment_fn) where assignment_fn has the same signature as setattr and should be used to add dependencies to `trackable` when they are available. """ _, type_registrations = _REVIVED_TYPE_REGISTRY.get( proto.identifier, (None, None)) if type_registrations is not None: for type_registration in type_registrations: if type_registration.should_load(proto): return (type_registration.from_proto(proto), type_registration.setter) return None def registered_identifiers(): return _REVIVED_TYPE_REGISTRY.keys()
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/revived_types.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SavedModel utility functions implementation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.core.framework import types_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.core.protobuf import struct_pb2 from tensorflow.python.eager import context from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.lib.io import file_io from tensorflow.python.saved_model import constants from tensorflow.python.saved_model import nested_structure_coder from tensorflow.python.util import compat from tensorflow.python.util import deprecation from tensorflow.python.util import nest from tensorflow.python.util.tf_export import tf_export # TensorInfo helpers. @tf_export(v1=["saved_model.build_tensor_info", "saved_model.utils.build_tensor_info"]) @deprecation.deprecated( None, "This function will only be available through the v1 compatibility " "library as tf.compat.v1.saved_model.utils.build_tensor_info or " "tf.compat.v1.saved_model.build_tensor_info.") def build_tensor_info(tensor): """Utility function to build TensorInfo proto from a Tensor. Args: tensor: Tensor or SparseTensor whose name, dtype and shape are used to build the TensorInfo. For SparseTensors, the names of the three constituent Tensors are used. Returns: A TensorInfo protocol buffer constructed based on the supplied argument. Raises: RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError("build_tensor_info is not supported in Eager mode.") return build_tensor_info_internal(tensor) def build_tensor_info_internal(tensor): """Utility function to build TensorInfo proto from a Tensor.""" if (isinstance(tensor, composite_tensor.CompositeTensor) and not isinstance(tensor, sparse_tensor.SparseTensor)): return _build_composite_tensor_info_internal(tensor) tensor_info = meta_graph_pb2.TensorInfo( dtype=dtypes.as_dtype(tensor.dtype).as_datatype_enum, tensor_shape=tensor.get_shape().as_proto()) if isinstance(tensor, sparse_tensor.SparseTensor): tensor_info.coo_sparse.values_tensor_name = tensor.values.name tensor_info.coo_sparse.indices_tensor_name = tensor.indices.name tensor_info.coo_sparse.dense_shape_tensor_name = tensor.dense_shape.name else: tensor_info.name = tensor.name return tensor_info def _build_composite_tensor_info_internal(tensor): """Utility function to build TensorInfo proto from a CompositeTensor.""" spec = tensor._type_spec # pylint: disable=protected-access tensor_info = meta_graph_pb2.TensorInfo() struct_coder = nested_structure_coder.StructureCoder() spec_proto = struct_coder.encode_structure(spec) tensor_info.composite_tensor.type_spec.CopyFrom(spec_proto.type_spec_value) for component in nest.flatten(tensor, expand_composites=True): tensor_info.composite_tensor.components.add().CopyFrom( build_tensor_info_internal(component)) return tensor_info def build_tensor_info_from_op(op): """Utility function to build TensorInfo proto from an Op. Note that this function should be used with caution. It is strictly restricted to TensorFlow internal use-cases only. Please make sure you do need it before using it. This utility function overloads the TensorInfo proto by setting the name to the Op's name, dtype to DT_INVALID and tensor_shape as None. One typical usage is for the Op of the call site for the defunned function: ```python @function.defun def some_vairable_initialiation_fn(value_a, value_b): a = value_a b = value_b value_a = constant_op.constant(1, name="a") value_b = constant_op.constant(2, name="b") op_info = utils.build_op_info( some_vairable_initialiation_fn(value_a, value_b)) ``` Args: op: An Op whose name is used to build the TensorInfo. The name that points to the Op could be fetched at run time in the Loader session. Returns: A TensorInfo protocol buffer constructed based on the supplied argument. """ return meta_graph_pb2.TensorInfo( dtype=types_pb2.DT_INVALID, tensor_shape=tensor_shape.unknown_shape().as_proto(), name=op.name) @tf_export(v1=["saved_model.get_tensor_from_tensor_info", "saved_model.utils.get_tensor_from_tensor_info"]) @deprecation.deprecated( None, "This function will only be available through the v1 compatibility " "library as tf.compat.v1.saved_model.utils.get_tensor_from_tensor_info or " "tf.compat.v1.saved_model.get_tensor_from_tensor_info.") def get_tensor_from_tensor_info(tensor_info, graph=None, import_scope=None): """Returns the Tensor or CompositeTensor described by a TensorInfo proto. Args: tensor_info: A TensorInfo proto describing a Tensor or SparseTensor or CompositeTensor. graph: The tf.Graph in which tensors are looked up. If None, the current default graph is used. import_scope: If not None, names in `tensor_info` are prefixed with this string before lookup. Returns: The Tensor or SparseTensor or CompositeTensor in `graph` described by `tensor_info`. Raises: KeyError: If `tensor_info` does not correspond to a tensor in `graph`. ValueError: If `tensor_info` is malformed. """ graph = graph or ops.get_default_graph() def _get_tensor(name): return graph.get_tensor_by_name( ops.prepend_name_scope(name, import_scope=import_scope)) encoding = tensor_info.WhichOneof("encoding") if encoding == "name": return _get_tensor(tensor_info.name) elif encoding == "coo_sparse": return sparse_tensor.SparseTensor( _get_tensor(tensor_info.coo_sparse.indices_tensor_name), _get_tensor(tensor_info.coo_sparse.values_tensor_name), _get_tensor(tensor_info.coo_sparse.dense_shape_tensor_name)) elif encoding == "composite_tensor": struct_coder = nested_structure_coder.StructureCoder() spec_proto = struct_pb2.StructuredValue( type_spec_value=tensor_info.composite_tensor.type_spec) spec = struct_coder.decode_proto(spec_proto) components = [_get_tensor(component.name) for component in tensor_info.composite_tensor.components] return spec.from_components(components) else: raise ValueError("Invalid TensorInfo.encoding: %s" % encoding) def get_element_from_tensor_info(tensor_info, graph=None, import_scope=None): """Returns the element in the graph described by a TensorInfo proto. Args: tensor_info: A TensorInfo proto describing an Op or Tensor by name. graph: The tf.Graph in which tensors are looked up. If None, the current default graph is used. import_scope: If not None, names in `tensor_info` are prefixed with this string before lookup. Returns: Op or tensor in `graph` described by `tensor_info`. Raises: KeyError: If `tensor_info` does not correspond to an op or tensor in `graph` """ graph = graph or ops.get_default_graph() return graph.as_graph_element( ops.prepend_name_scope(tensor_info.name, import_scope=import_scope)) # Path helpers. def get_or_create_variables_dir(export_dir): """Return variables sub-directory, or create one if it doesn't exist.""" variables_dir = get_variables_dir(export_dir) if not file_io.file_exists(variables_dir): file_io.recursive_create_dir(variables_dir) return variables_dir def get_variables_dir(export_dir): """Return variables sub-directory in the SavedModel.""" return os.path.join( compat.as_text(export_dir), compat.as_text(constants.VARIABLES_DIRECTORY)) def get_variables_path(export_dir): """Return the variables path, used as the prefix for checkpoint files.""" return os.path.join( compat.as_text(get_variables_dir(export_dir)), compat.as_text(constants.VARIABLES_FILENAME)) def get_or_create_assets_dir(export_dir): """Return assets sub-directory, or create one if it doesn't exist.""" assets_destination_dir = get_assets_dir(export_dir) if not file_io.file_exists(assets_destination_dir): file_io.recursive_create_dir(assets_destination_dir) return assets_destination_dir def get_assets_dir(export_dir): """Return path to asset directory in the SavedModel.""" return os.path.join( compat.as_text(export_dir), compat.as_text(constants.ASSETS_DIRECTORY))
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/utils_impl.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SavedModel utility functions. Utility functions to assist with setup and construction of the SavedModel proto. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.saved_model.utils_impl import build_tensor_info from tensorflow.python.saved_model.utils_impl import build_tensor_info_from_op from tensorflow.python.saved_model.utils_impl import get_tensor_from_tensor_info # pylint: enable=unused-import
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/utils.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Signature constants for SavedModel save and restore operations. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.util.tf_export import tf_export # Key in the signature def map for `default` serving signatures. The default # signature is used in inference requests where a specific signature was not # specified. DEFAULT_SERVING_SIGNATURE_DEF_KEY = "serving_default" tf_export( "saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY", v1=[ "saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY", "saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY" ], ).export_constant(__name__, "DEFAULT_SERVING_SIGNATURE_DEF_KEY") ################################################################################ # Classification API constants. # Classification inputs. CLASSIFY_INPUTS = "inputs" tf_export( "saved_model.CLASSIFY_INPUTS", v1=[ "saved_model.CLASSIFY_INPUTS", "saved_model.signature_constants.CLASSIFY_INPUTS" ]).export_constant(__name__, "CLASSIFY_INPUTS") # Classification method name used in a SignatureDef. CLASSIFY_METHOD_NAME = "tensorflow/serving/classify" tf_export( "saved_model.CLASSIFY_METHOD_NAME", v1=[ "saved_model.CLASSIFY_METHOD_NAME", "saved_model.signature_constants.CLASSIFY_METHOD_NAME" ]).export_constant(__name__, "CLASSIFY_METHOD_NAME") # Classification classes output. CLASSIFY_OUTPUT_CLASSES = "classes" tf_export( "saved_model.CLASSIFY_OUTPUT_CLASSES", v1=[ "saved_model.CLASSIFY_OUTPUT_CLASSES", "saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES" ]).export_constant(__name__, "CLASSIFY_OUTPUT_CLASSES") # Classification scores output. CLASSIFY_OUTPUT_SCORES = "scores" tf_export( "saved_model.CLASSIFY_OUTPUT_SCORES", v1=[ "saved_model.CLASSIFY_OUTPUT_SCORES", "saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES" ]).export_constant(__name__, "CLASSIFY_OUTPUT_SCORES") ################################################################################ # Prediction API constants. # Predict inputs. PREDICT_INPUTS = "inputs" tf_export( "saved_model.PREDICT_INPUTS", v1=[ "saved_model.PREDICT_INPUTS", "saved_model.signature_constants.PREDICT_INPUTS" ]).export_constant(__name__, "PREDICT_INPUTS") # Prediction method name used in a SignatureDef. PREDICT_METHOD_NAME = "tensorflow/serving/predict" tf_export( "saved_model.PREDICT_METHOD_NAME", v1=[ "saved_model.PREDICT_METHOD_NAME", "saved_model.signature_constants.PREDICT_METHOD_NAME" ]).export_constant(__name__, "PREDICT_METHOD_NAME") # Predict outputs. PREDICT_OUTPUTS = "outputs" tf_export( "saved_model.PREDICT_OUTPUTS", v1=[ "saved_model.PREDICT_OUTPUTS", "saved_model.signature_constants.PREDICT_OUTPUTS" ]).export_constant(__name__, "PREDICT_OUTPUTS") ################################################################################ # Regression API constants. # Regression inputs. REGRESS_INPUTS = "inputs" tf_export( "saved_model.REGRESS_INPUTS", v1=[ "saved_model.REGRESS_INPUTS", "saved_model.signature_constants.REGRESS_INPUTS" ]).export_constant(__name__, "REGRESS_INPUTS") # Regression method name used in a SignatureDef. REGRESS_METHOD_NAME = "tensorflow/serving/regress" tf_export( "saved_model.REGRESS_METHOD_NAME", v1=[ "saved_model.REGRESS_METHOD_NAME", "saved_model.signature_constants.REGRESS_METHOD_NAME" ]).export_constant(__name__, "REGRESS_METHOD_NAME") # Regression outputs. REGRESS_OUTPUTS = "outputs" tf_export( "saved_model.REGRESS_OUTPUTS", v1=[ "saved_model.REGRESS_OUTPUTS", "saved_model.signature_constants.REGRESS_OUTPUTS" ]).export_constant(__name__, "REGRESS_OUTPUTS") ################################################################################ # Train/Eval API constants. # Not exported while export_all_saved_models is experimental. DEFAULT_TRAIN_SIGNATURE_DEF_KEY = "train" DEFAULT_EVAL_SIGNATURE_DEF_KEY = "eval" SUPERVISED_TRAIN_METHOD_NAME = "tensorflow/supervised/training" SUPERVISED_EVAL_METHOD_NAME = "tensorflow/supervised/eval"
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/signature_constants.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Loader functionality for SavedModel with hermetic, language-neutral exports. Load and restore capability for a SavedModel, which may include multiple meta graph defs. Each SavedModel is associated with a single checkpoint. Each meta graph def is saved with one or more tags, which are used to identify the exact meta graph def to load. The `load` operation requires the session in which to restore the graph definition and variables, the tags used to identify the meta graph def to load and the location of the SavedModel. Upon a load, the subset of variables and assets supplied as part of the specific meta graph def, will be restored into the supplied session. The values of the variables though will correspond to the saved values from the first meta graph added to the SavedModel using `add_meta_graph_and_variables(...)` in `builder.py`. Typical usage: ```python ... builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(export_dir) with tf.compat.v1.Session(graph=tf.Graph()) as sess: ... builder.add_meta_graph_and_variables(sess, ["foo-tag"], signature_def_map=foo_signatures, assets_collection=foo_assets) ... with tf.compat.v1.Session(graph=tf.Graph()) as sess: ... builder.add_meta_graph(["bar-tag", "baz-tag"], assets_collection=bar_baz_assets) ... builder.save() ... with tf.compat.v1.Session(graph=tf.Graph()) as sess: tf.compat.v1.saved_model.loader.load(sess, ["foo-tag"], export_dir) ... ``` """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.saved_model.loader_impl import load from tensorflow.python.saved_model.loader_impl import maybe_saved_model_directory # pylint: enable=unused-import
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/loader.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SavedModel.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.core.framework import types_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_ops from tensorflow.python.framework import test_util from tensorflow.python.lib.io import file_io from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.platform import test from tensorflow.python.saved_model import builder as saved_model_builder from tensorflow.python.saved_model import constants from tensorflow.python.saved_model import loader from tensorflow.python.saved_model import loader_impl from tensorflow.python.saved_model import main_op from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.saved_model import tag_constants from tensorflow.python.saved_model import utils from tensorflow.python.training import saver_test_utils from tensorflow.python.training import training from tensorflow.python.util import compat SAVED_MODEL_PATH = ("cc/saved_model/testdata/half_plus_two/00000123") def tearDownModule(): file_io.delete_recursively(test.get_temp_dir()) class SavedModelTestBase(test.TestCase): def _get_export_dir(self, label): return os.path.join(test.get_temp_dir(), label) def _init_and_validate_variable(self, sess, variable_name, variable_value): v = variables.VariableV1(variable_value, name=variable_name) self.evaluate(variables.global_variables_initializer()) self.assertEqual(variable_value, self.evaluate(v)) def _build_asset_collection(self, asset_file_name, asset_file_contents, asset_file_tensor_name, asset_subdir=""): parent_dir = os.path.join( compat.as_bytes(test.get_temp_dir()), compat.as_bytes(asset_subdir)) file_io.recursive_create_dir(parent_dir) asset_filepath = os.path.join( compat.as_bytes(parent_dir), compat.as_bytes(asset_file_name)) file_io.write_string_to_file(asset_filepath, asset_file_contents) asset_file_tensor = constant_op.constant( asset_filepath, name=asset_file_tensor_name) ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, asset_file_tensor) asset_collection = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS) return asset_collection class SavedModelTest(SavedModelTestBase): def _validate_assets(self, export_dir, asset_file_def, expected_asset_file_name, expected_asset_file_contents, expected_asset_tensor_name, asset_id=0): assets_path = os.path.join( compat.as_bytes(export_dir), compat.as_bytes(constants.ASSETS_DIRECTORY), compat.as_bytes(expected_asset_file_name)) actual_asset_contents = file_io.read_file_to_string(assets_path) self.assertEqual(expected_asset_file_contents, compat.as_text(actual_asset_contents)) self.assertEqual(expected_asset_file_name, asset_file_def[asset_id].filename) self.assertEqual(expected_asset_tensor_name, asset_file_def[asset_id].tensor_info.name) def _validate_inputs_tensor_info_fail(self, builder, tensor_info): with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) foo_signature = signature_def_utils.build_signature_def({ "foo_inputs": tensor_info }, dict(), "foo") self.assertRaises( AssertionError, builder.add_meta_graph_and_variables, sess, ["foo"], signature_def_map={"foo_key": foo_signature}) def _validate_inputs_tensor_info_accept(self, builder, tensor_info): with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) foo_signature = signature_def_utils.build_signature_def({ "foo_inputs": tensor_info }, dict(), "foo") builder.add_meta_graph_and_variables( sess, ["foo"], signature_def_map={"foo_key": foo_signature}) def _validate_outputs_tensor_info_fail(self, builder, tensor_info): with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) foo_signature = signature_def_utils.build_signature_def( dict(), {"foo_outputs": tensor_info}, "foo") self.assertRaises( AssertionError, builder.add_meta_graph_and_variables, sess, ["foo"], signature_def_map={"foo_key": foo_signature}) def _validate_outputs_tensor_info_accept(self, builder, tensor_info): with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) foo_signature = signature_def_utils.build_signature_def( dict(), {"foo_outputs": tensor_info}, "foo") builder.add_meta_graph_and_variables( sess, ["foo"], signature_def_map={"foo_key": foo_signature}) def _validate_sig_def_keys(self, builder, valid_tensor_info, invalid_key): with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) foo_signature = signature_def_utils.build_signature_def( dict(), {"foo_key": valid_tensor_info}, "foo") self.assertRaises( KeyError, builder.add_meta_graph_and_variables, sess, ["foo"], signature_def_map={invalid_key: foo_signature}) def testMaybeSavedModelDir(self): base_path = test.test_src_dir_path("/python/saved_model") self.assertFalse(loader.maybe_saved_model_directory(base_path)) base_path = test.test_src_dir_path(SAVED_MODEL_PATH) self.assertTrue(loader.maybe_saved_model_directory(base_path)) base_path = "complete_garbage" self.assertFalse(loader.maybe_saved_model_directory(base_path)) def testBadSavedModelFileFormat(self): export_dir = self._get_export_dir("test_bad_saved_model_file_format") # Attempt to load a SavedModel from an export directory that does not exist. with self.session(graph=ops.Graph()) as sess: with self.assertRaisesRegexp(IOError, "SavedModel file does not exist at: %s" % export_dir): loader.load(sess, ["foo"], export_dir) os.makedirs(export_dir) # Write an invalid binary proto to saved_model.pb. path_to_pb = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PB) with open(path_to_pb, "w") as f: f.write("invalid content") with self.session(graph=ops.Graph()) as sess: with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" % constants.SAVED_MODEL_FILENAME_PB): loader.load(sess, ["foo"], export_dir) # Cleanup the directory and start again. file_io.delete_recursively(export_dir) os.makedirs(export_dir) # Write an invalid text proto to saved_model.pbtxt path_to_pbtxt = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PBTXT) with open(path_to_pbtxt, "w") as f: f.write("invalid content") with self.session(graph=ops.Graph()) as sess: with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" % constants.SAVED_MODEL_FILENAME_PBTXT): loader.load(sess, ["foo"], export_dir) @test_util.run_deprecated_v1 def testVerifySessionGraphUsage(self): export_dir = self._get_export_dir("test_verify_session_graph_usage") builder = saved_model_builder._SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING]) # Save the SavedModel to disk. builder.save() # Build a session and supply it to the load operation. sess = session.Session(graph=ops.Graph()) loader.load(sess, [tag_constants.TRAINING], export_dir) # Check the variable within the scope of the session and its graph. with sess: self.assertEqual( 42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) @test_util.run_deprecated_v1 def testSequence(self): export_dir = self._get_export_dir("test_sequence") builder = saved_model_builder._SavedModelBuilder(export_dir) # Expect an assertion error since add_meta_graph_and_variables() should be # invoked before any add_meta_graph() calls. with self.session(graph=ops.Graph()) as sess: self.assertRaises(AssertionError, builder.add_meta_graph, ["foo"]) # Expect an assertion error for multiple calls of # add_meta_graph_and_variables() since weights should be saved exactly once. with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) builder.add_meta_graph_and_variables(sess, ["bar"]) self.assertRaises(AssertionError, builder.add_meta_graph_and_variables, sess, ["baz"]) @test_util.run_deprecated_v1 def testTags(self): export_dir = self._get_export_dir("test_tags") builder = saved_model_builder._SavedModelBuilder(export_dir) # Graph with a single variable. SavedModel invoked to: # - add with weights. # - a single tag (from predefined constants). with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING]) # Graph that updates the single variable. SavedModel invoked to: # - simply add the model (weights are not updated). # - a single tag (from predefined constants). with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 43) builder.add_meta_graph([tag_constants.SERVING]) # Graph that updates the single variable. SavedModel invoked to: # - simply add the model (weights are not updated). # - multiple tags (from predefined constants). with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 45) builder.add_meta_graph([tag_constants.SERVING, tag_constants.GPU]) # Graph that updates the single variable. SavedModel invoked to: # - simply add the model (weights are not updated). # - multiple tags (from predefined constants for serving on TPU). with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 45) builder.add_meta_graph([tag_constants.SERVING, tag_constants.TPU]) # Graph that updates the single variable. SavedModel is invoked: # - to add the model (weights are not updated). # - multiple custom tags. with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 44) builder.add_meta_graph(["foo", "bar"]) # Save the SavedModel to disk. builder.save() # Restore the graph with a single predefined tag whose variables were saved. with self.session(graph=ops.Graph()) as sess: loader.load(sess, [tag_constants.TRAINING], export_dir) self.assertEqual( 42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) # Restore the graph with a single predefined tag whose variables were not # saved. with self.session(graph=ops.Graph()) as sess: loader.load(sess, [tag_constants.SERVING], export_dir) self.assertEqual( 42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) # Restore the graph with multiple predefined tags whose variables were not # saved. with self.session(graph=ops.Graph()) as sess: loader.load(sess, [tag_constants.SERVING, tag_constants.GPU], export_dir) self.assertEqual( 42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) # Restore the graph with multiple predefined tags (for serving on TPU) # whose variables were not saved. with self.session(graph=ops.Graph()) as sess: loader.load(sess, [tag_constants.SERVING, tag_constants.TPU], export_dir) self.assertEqual( 42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) # Restore the graph with multiple tags. Provide duplicate tags to test set # semantics. with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["foo", "bar", "foo"], export_dir) self.assertEqual( 42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) # Try restoring a graph with a non-existent tag. This should yield a runtime # error. with self.session(graph=ops.Graph()) as sess: self.assertRaises(RuntimeError, loader.load, sess, ["INVALID"], export_dir) # Try restoring a graph where a subset of the tags match. Since tag matching # for meta graph defs follows "all" semantics, this should yield a runtime # error. with self.session(graph=ops.Graph()) as sess: self.assertRaises(RuntimeError, loader.load, sess, ["foo", "baz"], export_dir) @test_util.run_v1_only("b/120545219") def testVariables(self): export_dir = self._get_export_dir("test_variables") builder = saved_model_builder._SavedModelBuilder(export_dir) # Graph with two variables. SavedModel invoked to: # - add with weights. with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v1", 1) self._init_and_validate_variable(sess, "v2", 2) builder.add_meta_graph_and_variables(sess, ["foo"]) # Graph with a single variable (subset of the variables from the previous # graph whose weights were saved). SavedModel invoked to: # - simply add the model (weights are not updated). with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v2", 3) builder.add_meta_graph(["bar"]) # Graph with a single variable (disjoint set of variables from the previous # graph whose weights were saved). SavedModel invoked to: # - simply add the model (weights are not updated). with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v3", 4) builder.add_meta_graph(["baz"]) # Save the SavedModel to disk. builder.save() # Restore the graph with tag "foo", whose variables were saved. with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["foo"], export_dir) collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertEqual(len(collection_vars), 2) self.assertEqual(1, collection_vars[0].eval()) self.assertEqual(2, collection_vars[1].eval()) # Restore the graph with tag "bar", whose variables were not saved. Only the # subset of the variables added to the graph will be restored with the # checkpointed value. with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["bar"], export_dir) collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertEqual(len(collection_vars), 1) self.assertEqual(2, collection_vars[0].eval()) # Try restoring the graph with tag "baz", whose variables were not saved. # Since this graph has a disjoint set of variables from the set that was # saved, this should raise an error. with self.session(graph=ops.Graph()) as sess: self.assertRaises(errors.NotFoundError, loader.load, sess, ["baz"], export_dir) @test_util.run_deprecated_v1 def testGraphWithoutVariables(self): export_dir = self._get_export_dir("test_graph_has_variables") builder = saved_model_builder._SavedModelBuilder(export_dir) # Graph with no variables. with self.session(graph=ops.Graph()) as sess: constant_5_name = constant_op.constant(5.0).name builder.add_meta_graph_and_variables(sess, ["foo"]) # Second graph with no variables with self.session(graph=ops.Graph()) as sess: constant_6_name = constant_op.constant(6.0).name builder.add_meta_graph(["bar"]) # Save the SavedModel to disk. builder.save() # Restore the graph with tag "foo". with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["foo"], export_dir) # Read the constant a from the graph. a = ops.get_default_graph().get_tensor_by_name(constant_5_name) b = constant_op.constant(6.0) c = a * b self.assertEqual(30.0, self.evaluate(c)) # Restore the graph with tag "bar". with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["bar"], export_dir) # Read the constant a from the graph. a = ops.get_default_graph().get_tensor_by_name(constant_6_name) b = constant_op.constant(5.0) c = a * b self.assertEqual(30.0, self.evaluate(c)) @test_util.run_deprecated_v1 def testNoOverwrite(self): export_dir = self._get_export_dir("test_no_overwrite") builder = saved_model_builder._SavedModelBuilder(export_dir) # Graph with a single variable. SavedModel invoked to: # - add with weights. with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) builder.add_meta_graph_and_variables(sess, ["foo"]) # Save the SavedModel to disk in text format. builder.save(as_text=True) # Restore the graph with tag "foo", whose variables were saved. with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["foo"], export_dir) self.assertEqual( 42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) # An attempt to create another builder with the same export directory should # result in an assertion error. self.assertRaises(AssertionError, saved_model_builder._SavedModelBuilder, export_dir) @test_util.run_deprecated_v1 def testSaveAsText(self): export_dir = self._get_export_dir("test_astext") builder = saved_model_builder._SavedModelBuilder(export_dir) # Graph with a single variable. SavedModel invoked to: # - add with weights. with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) builder.add_meta_graph_and_variables(sess, ["foo"]) # Graph with the same single variable. SavedModel invoked to: # - simply add the model (weights are not updated). with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 43) builder.add_meta_graph(["bar"]) # Save the SavedModel to disk in text format. builder.save(as_text=True) # Restore the graph with tag "foo", whose variables were saved. with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["foo"], export_dir) self.assertEqual( 42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) # Restore the graph with tag "bar", whose variables were not saved. with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["bar"], export_dir) self.assertEqual( 42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) @test_util.run_v1_only("b/120545219") def testCollections(self): export_dir = self._get_export_dir("test_collections") builder = saved_model_builder._SavedModelBuilder(export_dir) # Graph with a single variable added to a collection. SavedModel invoked to: # - add with weights. with self.session(graph=ops.Graph()) as sess: v = variables.VariableV1(42, name="v") ops.add_to_collection("foo_vars", v) self.evaluate(variables.global_variables_initializer()) self.assertEqual(42, self.evaluate(v)) builder.add_meta_graph_and_variables(sess, ["foo"]) # Graph with the same single variable added to a different collection. # SavedModel invoked to: # - simply add the model (weights are not updated). with self.session(graph=ops.Graph()) as sess: v = variables.VariableV1(43, name="v") ops.add_to_collection("bar_vars", v) self.evaluate(variables.global_variables_initializer()) self.assertEqual(43, self.evaluate(v)) builder.add_meta_graph(["bar"]) # Save the SavedModel to disk. builder.save() # Restore the graph with tag "foo", whose variables were saved. The # collection 'foo_vars' should contain a single element. The collection # 'bar_vars' should not be found. with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["foo"], export_dir) collection_foo_vars = ops.get_collection("foo_vars") self.assertEqual(len(collection_foo_vars), 1) self.assertEqual(42, collection_foo_vars[0].eval()) self.assertEqual(len(ops.get_collection("bar_vars")), 0) # Restore the graph with tag "bar", whose variables were not saved. The # collection-def exported as part of the meta graph def is updated to # reflect the new collection. The value of the variable in the # collection-def corresponds to the saved value (from the previous graph # with tag "foo"). with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["bar"], export_dir) collection_bar_vars = ops.get_collection("bar_vars") self.assertEqual(len(collection_bar_vars), 1) self.assertEqual(42, collection_bar_vars[0].eval()) self.assertEqual(len(ops.get_collection("foo_vars")), 0) @test_util.run_deprecated_v1 def testSignatureDefs(self): export_dir = self._get_export_dir("test_signature_defs") builder = saved_model_builder._SavedModelBuilder(export_dir) # Graph with a single variable and a single entry in the signature def map. # SavedModel is invoked to add with weights. with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) # Build and populate an empty SignatureDef for testing. foo_signature = signature_def_utils.build_signature_def(dict(), dict(), "foo") builder.add_meta_graph_and_variables( sess, ["foo"], signature_def_map={"foo_key": foo_signature}) # Graph with the same single variable and multiple entries in the signature # def map. No weights are saved by SavedModel. with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 43) # Build and populate a different SignatureDef for testing. bar_signature = signature_def_utils.build_signature_def(dict(), dict(), "bar") # Also, build a different SignatureDef corresponding to "foo_key" defined # in the previous graph. foo_new_signature = signature_def_utils.build_signature_def(dict(), dict(), "foo_new") builder.add_meta_graph( ["bar"], signature_def_map={ "bar_key": bar_signature, "foo_key": foo_new_signature }) # Save the SavedModel to disk. builder.save() # Restore the graph with tag "foo". The single entry in the SignatureDef map # corresponding to "foo_key" should exist. with self.session(graph=ops.Graph()) as sess: foo_graph = loader.load(sess, ["foo"], export_dir) self.assertEqual( 42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) foo_signature = foo_graph.signature_def self.assertEqual(len(foo_signature), 1) self.assertEqual("foo", foo_signature["foo_key"].method_name) # Restore the graph with tag "bar". The SignatureDef map should have two # entries. One corresponding to "bar_key" and another corresponding to the # new value of "foo_key". with self.session(graph=ops.Graph()) as sess: bar_graph = loader.load(sess, ["bar"], export_dir) self.assertEqual( 42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) bar_signature = bar_graph.signature_def self.assertEqual(len(bar_signature), 2) self.assertEqual("bar", bar_signature["bar_key"].method_name) self.assertEqual("foo_new", bar_signature["foo_key"].method_name) def testSignatureDefValidationFails(self): export_dir = self._get_export_dir("test_signature_def_validation_fail") builder = saved_model_builder._SavedModelBuilder(export_dir) tensor_without_encoding = meta_graph_pb2.TensorInfo() tensor_without_encoding.dtype = types_pb2.DT_FLOAT self._validate_inputs_tensor_info_fail(builder, tensor_without_encoding) self._validate_outputs_tensor_info_fail(builder, tensor_without_encoding) tensor_without_dtype = meta_graph_pb2.TensorInfo() tensor_without_dtype.name = "x" self._validate_inputs_tensor_info_fail(builder, tensor_without_dtype) self._validate_outputs_tensor_info_fail(builder, tensor_without_dtype) tensor_empty = meta_graph_pb2.TensorInfo() self._validate_inputs_tensor_info_fail(builder, tensor_empty) self._validate_outputs_tensor_info_fail(builder, tensor_empty) valid_tensor_info = meta_graph_pb2.TensorInfo() valid_tensor_info.name = "foo" valid_tensor_info.dtype = types_pb2.DT_FLOAT self._validate_sig_def_keys(builder, valid_tensor_info, constants.INIT_OP_SIGNATURE_KEY) self._validate_sig_def_keys(builder, valid_tensor_info, constants.TRAIN_OP_SIGNATURE_KEY) @test_util.run_deprecated_v1 def testSignatureDefValidationSucceedsWithName(self): tensor_with_name = meta_graph_pb2.TensorInfo() tensor_with_name.name = "foo" tensor_with_name.dtype = types_pb2.DT_FLOAT export_dir = self._get_export_dir("test_signature_def_validation_name_1") builder = saved_model_builder._SavedModelBuilder(export_dir) self._validate_inputs_tensor_info_accept(builder, tensor_with_name) export_dir = self._get_export_dir("test_signature_def_validation_name_2") builder = saved_model_builder._SavedModelBuilder(export_dir) self._validate_outputs_tensor_info_accept(builder, tensor_with_name) @test_util.run_deprecated_v1 def testSignatureDefValidationSucceedsWithCoo(self): tensor_with_coo = meta_graph_pb2.TensorInfo() # TODO(soergel) test validation of each of the fields of coo_sparse tensor_with_coo.coo_sparse.values_tensor_name = "foo" tensor_with_coo.dtype = types_pb2.DT_FLOAT export_dir = self._get_export_dir("test_signature_def_validation_coo_1") builder = saved_model_builder._SavedModelBuilder(export_dir) self._validate_inputs_tensor_info_accept(builder, tensor_with_coo) export_dir = self._get_export_dir("test_signature_def_validation_coo_2") builder = saved_model_builder._SavedModelBuilder(export_dir) self._validate_outputs_tensor_info_accept(builder, tensor_with_coo) @test_util.run_deprecated_v1 def testSignatureDefValidationSucceedsWithRagged(self): ragged_tensor = ragged_factory_ops.constant([[1, 2], [3]]) tensor_with_ragged = utils.build_tensor_info(ragged_tensor) export_dir = self._get_export_dir("test_signature_def_validation_ragged_1") builder = saved_model_builder._SavedModelBuilder(export_dir) self._validate_inputs_tensor_info_accept(builder, tensor_with_ragged) export_dir = self._get_export_dir("test_signature_def_validation_ragged_2") builder = saved_model_builder._SavedModelBuilder(export_dir) self._validate_outputs_tensor_info_accept(builder, tensor_with_ragged) @test_util.run_deprecated_v1 def testAssets(self): export_dir = self._get_export_dir("test_assets") builder = saved_model_builder._SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) # Build an asset collection. ignored_filepath = os.path.join( compat.as_bytes(test.get_temp_dir()), compat.as_bytes("ignored.txt")) file_io.write_string_to_file(ignored_filepath, "will be ignored") asset_list = self._build_asset_collection("hello42.txt", "foo bar baz", "asset_file_tensor") builder.add_meta_graph_and_variables( sess, ["foo"], assets_list=asset_list) # Save the SavedModel to disk. builder.save() with self.session(graph=ops.Graph()) as sess: foo_graph = loader.load(sess, ["foo"], export_dir) self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt", "foo bar baz", "asset_file_tensor:0") ignored_asset_path = os.path.join( compat.as_bytes(export_dir), compat.as_bytes(constants.ASSETS_DIRECTORY), compat.as_bytes("ignored.txt")) self.assertFalse(file_io.file_exists(ignored_asset_path)) @test_util.run_deprecated_v1 def testAssetsNameCollisionDiffFile(self): export_dir = self._get_export_dir("test_assets_name_collision_diff_file") builder = saved_model_builder._SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) asset_list = self._build_asset_collection( "hello42.txt", "foo bar bak", "asset_file_tensor", asset_subdir="1") asset_list = self._build_asset_collection( "hello42.txt", "foo bar baz", "asset_file_tensor_1", asset_subdir="2") builder.add_meta_graph_and_variables( sess, ["foo"], assets_list=asset_list) # Save the SavedModel to disk. builder.save() with self.session(graph=ops.Graph()) as sess: foo_graph = loader.load(sess, ["foo"], export_dir) self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt", "foo bar bak", "asset_file_tensor:0") self._validate_assets( export_dir, foo_graph.asset_file_def, "hello42.txt_1", "foo bar baz", "asset_file_tensor_1:0", asset_id=1) @test_util.run_deprecated_v1 def testAssetsNameCollisionSameFilepath(self): export_dir = self._get_export_dir("test_assets_name_collision_same_path") builder = saved_model_builder._SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) asset_list = self._build_asset_collection("hello42.txt", "foo bar baz", "asset_file_tensor") asset_list = self._build_asset_collection("hello42.txt", "foo bar baz", "asset_file_tensor_1") builder.add_meta_graph_and_variables( sess, ["foo"], assets_list=asset_list) # Save the SavedModel to disk. builder.save() with self.session(graph=ops.Graph()) as sess: foo_graph = loader.load(sess, ["foo"], export_dir) self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt", "foo bar baz", "asset_file_tensor:0") # The second tensor should be recorded, but the same. self._validate_assets( export_dir, foo_graph.asset_file_def, "hello42.txt", "foo bar baz", "asset_file_tensor_1:0", asset_id=1) ignored_asset_path = os.path.join( compat.as_bytes(export_dir), compat.as_bytes(constants.ASSETS_DIRECTORY), compat.as_bytes("hello42.txt_1")) self.assertFalse(file_io.file_exists(ignored_asset_path)) @test_util.run_deprecated_v1 def testAssetsNameCollisionSameFile(self): export_dir = self._get_export_dir("test_assets_name_collision_same_file") builder = saved_model_builder._SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) asset_list = self._build_asset_collection( "hello42.txt", "foo bar baz", "asset_file_tensor", asset_subdir="1") asset_list = self._build_asset_collection( "hello42.txt", "foo bar baz", "asset_file_tensor_1", asset_subdir="2") builder.add_meta_graph_and_variables( sess, ["foo"], assets_list=asset_list) # Save the SavedModel to disk. builder.save() with self.session(graph=ops.Graph()) as sess: foo_graph = loader.load(sess, ["foo"], export_dir) self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt", "foo bar baz", "asset_file_tensor:0") # The second tensor should be recorded, but the same. self._validate_assets( export_dir, foo_graph.asset_file_def, "hello42.txt", "foo bar baz", "asset_file_tensor_1:0", asset_id=1) ignored_asset_path = os.path.join( compat.as_bytes(export_dir), compat.as_bytes(constants.ASSETS_DIRECTORY), compat.as_bytes("hello42.txt_1")) self.assertFalse(file_io.file_exists(ignored_asset_path)) @test_util.run_deprecated_v1 def testAssetsNameCollisionManyFiles(self): export_dir = self._get_export_dir("test_assets_name_collision_many_files") builder = saved_model_builder._SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) for i in range(5): idx = str(i) asset_list = self._build_asset_collection( "hello42.txt", "foo bar baz " + idx, "asset_file_tensor_" + idx, asset_subdir=idx) builder.add_meta_graph_and_variables( sess, ["foo"], assets_list=asset_list) # Save the SavedModel to disk. builder.save() with self.session(graph=ops.Graph()) as sess: foo_graph = loader.load(sess, ["foo"], export_dir) for i in range(1, 5): idx = str(i) self._validate_assets( export_dir, foo_graph.asset_file_def, "hello42.txt_" + idx, "foo bar baz " + idx, "asset_file_tensor_{}:0".format(idx), asset_id=i) self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt", "foo bar baz 0", "asset_file_tensor_0:0") @test_util.run_v1_only("b/120545219") def testCustomInitOp(self): export_dir = self._get_export_dir("test_main_op") builder = saved_model_builder._SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: # Add `v1` and `v2` variables to the graph. v1 = variables.VariableV1(1, name="v1") ops.add_to_collection("v", v1) v2 = variables.VariableV1(2, name="v2") ops.add_to_collection("v", v2) # Initialize another variable `v3` to 42. v3 = variables.VariableV1(42, name="v3") ops.add_to_collection("v", v3) # Set up an assignment op to be run as part of the main_op. with ops.control_dependencies([main_op.main_op()]): add_v1_v2 = math_ops.add(v1._ref(), v2._ref()) custom_init_op = control_flow_ops.group(state_ops.assign(v3, add_v1_v2)) self.evaluate(custom_init_op) builder.add_meta_graph_and_variables( sess, ["foo"], init_op=custom_init_op) # Save the SavedModel to disk. builder.save() with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["foo"], export_dir) self.assertEqual(1, ops.get_collection("v")[0].eval()) self.assertEqual(2, ops.get_collection("v")[1].eval()) # Evaluates to the sum of the first two variables and assigned as part of # the main_op, following a restore. self.assertEqual(3, ops.get_collection("v")[2].eval()) @test_util.run_v1_only("b/120545219") def testTrainOp(self): export_dir = self._get_export_dir("test_train_op") builder = saved_model_builder._SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: # Add `v1` and `v2` variables to the graph. v1 = variables.VariableV1(1, name="v1") ops.add_to_collection("v", v1) v2 = variables.VariableV1(2, name="v2") ops.add_to_collection("v", v2) self.evaluate(variables.global_variables_initializer()) train_op = state_ops.assign_add(v1, v2) self.evaluate(train_op) builder.add_meta_graph_and_variables(sess, ["foo"], train_op=train_op) # Save the SavedModel to disk. builder.save() with self.session(graph=ops.Graph()) as sess: meta_graph_def = loader.load(sess, ["foo"], export_dir) self.assertEqual(3, ops.get_collection("v")[0].eval()) self.assertEqual(2, ops.get_collection("v")[1].eval()) self.assertIsInstance( loader_impl.get_train_op(meta_graph_def), ops.Tensor) @test_util.run_v1_only("b/120545219") def testTrainOpGroup(self): export_dir = self._get_export_dir("test_train_op_group") builder = saved_model_builder._SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: # Add `v1` and `v2` variables to the graph. v1 = variables.VariableV1(1, name="v1") ops.add_to_collection("v", v1) v2 = variables.VariableV1(2, name="v2") ops.add_to_collection("v", v2) self.evaluate(variables.global_variables_initializer()) train_op = control_flow_ops.group() self.evaluate(train_op) builder.add_meta_graph_and_variables(sess, ["foo"], train_op=train_op) # Save the SavedModel to disk. builder.save() with self.session(graph=ops.Graph()) as sess: meta_graph_def = loader.load(sess, ["foo"], export_dir) self.assertEqual(1, ops.get_collection("v")[0].eval()) self.assertEqual(2, ops.get_collection("v")[1].eval()) self.assertIsInstance( loader_impl.get_train_op(meta_graph_def), ops.Operation) @test_util.run_v1_only("b/120545219") def testTrainOpAfterVariables(self): export_dir = self._get_export_dir("test_train_op_after_variables") builder = saved_model_builder._SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: # Add `v1` and `v2` variables to the graph. v1 = variables.VariableV1(1, name="v1") ops.add_to_collection("v", v1) v2 = variables.VariableV1(2, name="v2") ops.add_to_collection("v", v2) self.evaluate(variables.global_variables_initializer()) builder.add_meta_graph_and_variables(sess, ["pre_foo"]) train_op = state_ops.assign_add(v1, v2) self.evaluate(train_op) builder.add_meta_graph(["foo"], train_op=train_op) # Save the SavedModel to disk. builder.save() with self.session(graph=ops.Graph()) as sess: meta_graph_def = loader.load(sess, ["foo"], export_dir) self.assertIsInstance( loader_impl.get_train_op(meta_graph_def), ops.Tensor) with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["pre_foo"], export_dir) self.assertFalse(ops.get_collection(constants.TRAIN_OP_KEY)) @test_util.run_deprecated_v1 def testMultipleAssets(self): export_dir = self._get_export_dir("test_multiple_assets") builder = saved_model_builder._SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) # Build an asset collection specific to `foo` graph. asset_list = self._build_asset_collection("foo.txt", "content_foo", "asset_file_tensor") # Add the asset collection as part of the graph with tag "foo". builder.add_meta_graph_and_variables( sess, ["foo"], assets_list=asset_list) with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) # Build an asset collection specific to `bar` graph. asset_list = self._build_asset_collection("bar.txt", "content_bar", "asset_file_tensor") # Add the asset collection as part of the graph with tag "bar". builder.add_meta_graph(["bar"], assets_list=asset_list) # Save the SavedModel to disk. builder.save() # Check assets restored for graph with tag "foo". with self.session(graph=ops.Graph()) as sess: foo_graph = loader.load(sess, ["foo"], export_dir) self._validate_assets(export_dir, foo_graph.asset_file_def, "foo.txt", "content_foo", "asset_file_tensor:0") # Check assets restored for graph with tag "bar". with self.session(graph=ops.Graph()) as sess: bar_graph = loader.load(sess, ["bar"], export_dir) self._validate_assets(export_dir, bar_graph.asset_file_def, "bar.txt", "content_bar", "asset_file_tensor:0") @test_util.run_deprecated_v1 def testDuplicateAssets(self): export_dir = self._get_export_dir("test_duplicate_assets") builder = saved_model_builder._SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) # Build an asset collection with `foo.txt` that has `foo` specific # content. asset_list = self._build_asset_collection("foo.txt", "content_foo", "asset_file_tensor") # Add the asset collection as part of the graph with tag "foo". builder.add_meta_graph_and_variables( sess, ["foo"], assets_list=asset_list) with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) # Build an asset collection with `foo.txt` that has `bar` specific # content. asset_list = self._build_asset_collection("foo.txt", "content_bar", "asset_file_tensor") # Add the asset collection as part of the graph with tag "bar". builder.add_meta_graph(["bar"], assets_list=asset_list) # Save the SavedModel to disk. builder.save() # Check assets restored for graph with tag "foo". with self.session(graph=ops.Graph()) as sess: foo_graph = loader.load(sess, ["foo"], export_dir) self._validate_assets(export_dir, foo_graph.asset_file_def, "foo.txt", "content_foo", "asset_file_tensor:0") # Check assets restored for graph with tag "bar". with self.session(graph=ops.Graph()) as sess: bar_graph = loader.load(sess, ["bar"], export_dir) # Validate the assets for `bar` graph. `foo.txt` should contain the # original contents corresponding to `foo` graph since an asset with the # same name across multiple graphs is only stored the first time self._validate_assets(export_dir, bar_graph.asset_file_def, "foo.txt", "content_foo", "asset_file_tensor:0") @test_util.run_v1_only("b/120545219") def testOp(self): export_dir = self._get_export_dir("test_op") builder = saved_model_builder._SavedModelBuilder(export_dir) with session.Session( graph=ops.Graph(), config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v1 = variables.VariableV1(1, name="v1") with sess.graph.device("/cpu:1"): v2 = variables.VariableV1(2, name="v2") # v3 is an unsaved variable derived from v1 and v2. It is used to # exercise the ability to run an init op when restoring a graph. v3 = variables.VariableV1(1, name="v3", trainable=False, collections=[]) assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2)) init_op = control_flow_ops.group(assign_v3, name="init_op") ops.add_to_collection("v", v1) ops.add_to_collection("v", v2) ops.add_to_collection("v", v3) ops.add_to_collection("init_op", init_op) self.evaluate(variables.global_variables_initializer()) self.assertEqual(1, ops.get_collection("v")[0].eval()) self.assertEqual(2, ops.get_collection("v")[1].eval()) builder.add_meta_graph_and_variables(sess, ["foo"]) # Save the SavedModel to disk. builder.save() with session.Session( graph=ops.Graph(), config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess: loader.load(sess, ["foo"], export_dir) # Validate variables, run the init op and verify result. self.assertEqual(1, ops.get_collection("v")[0].eval()) self.assertEqual(2, ops.get_collection("v")[1].eval()) ops.get_collection("init_op")[0].run() self.assertEqual(3, ops.get_collection("v")[2].eval()) def testCustomSaveable(self): export_dir = self._get_export_dir("custom_saveable") builder = saved_model_builder._SavedModelBuilder(export_dir) with session.Session( graph=ops.Graph(), config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess: # CheckpointedOp is a key-value table that can be saved across sessions. # The table register itself in SAVEABLE_OBJECTS collection. v1 = saver_test_utils.CheckpointedOp(name="v1") self.evaluate(variables.global_variables_initializer()) v1.insert("k1", 3.0).run() # Once the table is restored, we can access it through this reference. ops.add_to_collection("table_ref", v1.table_ref) builder.add_meta_graph_and_variables(sess, ["foo"]) # Save the SavedModel to disk. builder.save() with session.Session( graph=ops.Graph(), config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess: loader.load(sess, ["foo"], export_dir) # Instantiate a wrapper object from the checkpointed reference. v1 = saver_test_utils.CheckpointedOp( name="v1", table_ref=ops.get_collection("table_ref")[0]) self.assertEqual(b"k1", v1.keys().eval()) self.assertEqual(3.0, v1.values().eval()) @test_util.run_deprecated_v1 def testCustomSaver(self): export_dir = self._get_export_dir("test_custom_saver") builder = saved_model_builder._SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: variables.VariableV1(1, name="v1") self.evaluate(variables.global_variables_initializer()) custom_saver = training.Saver(name="my_saver") builder.add_meta_graph_and_variables(sess, ["tag"], saver=custom_saver) # Save the SavedModel to disk. builder.save() with ops.Graph().as_default() as graph: with self.session(graph=graph) as sess: saved_graph = loader.load(sess, ["tag"], export_dir) graph_ops = [x.name for x in graph.get_operations()] self.assertTrue("my_saver/restore_all" in graph_ops) self.assertFalse("save/restore_all" in graph_ops) self.assertEqual( saved_graph.saver_def.restore_op_name, "my_saver/restore_all") @test_util.run_deprecated_v1 def testNoCustomSaver(self): export_dir = self._get_export_dir("test_no_custom_saver") builder = saved_model_builder._SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: variables.VariableV1(1, name="v1") self.evaluate(variables.global_variables_initializer()) training.Saver(name="my_saver") builder.add_meta_graph_and_variables(sess, ["tag"]) # Save the SavedModel to disk. builder.save() with ops.Graph().as_default() as graph: with self.session(graph=graph) as sess: saved_graph = loader.load(sess, ["tag"], export_dir) graph_ops = [x.name for x in graph.get_operations()] self.assertTrue("my_saver/restore_all" in graph_ops) self.assertTrue("save/restore_all" in graph_ops) self.assertEqual( saved_graph.saver_def.restore_op_name, "save/restore_all") @test_util.run_deprecated_v1 def testMultipleCustomSavers(self): export_dir = self._get_export_dir("test_multiple_custom_savers") builder = saved_model_builder._SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: variables.VariableV1(1, name="v1") self.evaluate(variables.global_variables_initializer()) builder.add_meta_graph_and_variables(sess, ["tag_0"]) saver_1 = training.Saver() builder.add_meta_graph(["tag_1"], saver=saver_1) saver_2 = training.Saver() builder.add_meta_graph(["tag_2"], saver=saver_2) # Save the SavedModel to disk. builder.save() def _validate_custom_saver(tag_name, saver_name): with ops.Graph().as_default() as graph: with self.session(graph=graph) as sess: saved_graph = loader.load(sess, [tag_name], export_dir) self.assertEqual( saved_graph.saver_def.restore_op_name, saver_name) _validate_custom_saver("tag_0", "save/restore_all") _validate_custom_saver("tag_1", "save_1/restore_all") _validate_custom_saver("tag_2", "save_2/restore_all") @test_util.run_deprecated_v1 def testImportScope(self): export_dir = self._get_export_dir("test_scoped_assets") builder = saved_model_builder._SavedModelBuilder(export_dir) # Build a SavedModel with a variable, an asset, and a constant tensor. with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) asset_list = self._build_asset_collection("foo.txt", "content_foo", "asset_file_tensor") constant_op.constant("constant value", name="constant_tensor_name") builder.add_meta_graph_and_variables( sess, ["tag_name"], assets_list=asset_list) # Save the asset file path for later comparison. asset_file_path = asset_list[0].eval() # Save the SavedModel to disk. builder.save() with self.session(graph=ops.Graph()) as sess: # Restore the SavedModel under an import_scope in a new graph/session. graph_proto = loader.load( sess, ["tag_name"], export_dir, import_scope="scope_name") # The loaded variable tensor should be scoped, but its contents should be # unchanged. self.assertEqual( "scope_name/v:0", ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].name) self.assertEqual( 42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) # The loaded asset tensor should be scoped, but the asset file path and # contents should be unchanged. asset_list = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS) self.assertEqual(1, len(asset_list)) self.assertEqual(asset_file_path, asset_list[0].eval()) self.assertEqual("scope_name/asset_file_tensor:0", asset_list[0].name) # The static asset data inside graph_proto.collection_def should not be # scoped. self._validate_assets(export_dir, graph_proto.asset_file_def, "foo.txt", "content_foo", "asset_file_tensor:0") # The constant tensor should be scoped, but its contents should be # unchanged. self.assertEqual( compat.as_bytes("constant value"), ops.get_default_graph().get_tensor_by_name( "scope_name/constant_tensor_name:0").eval()) @test_util.run_deprecated_v1 def testClearDevices(self): export_dir = self._get_export_dir("test_clear_devices") builder = saved_model_builder._SavedModelBuilder(export_dir) # Specify a device and save a variable. ops.reset_default_graph() with session.Session( target="", config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): self._init_and_validate_variable(sess, "v", 42) builder.add_meta_graph_and_variables( sess, [tag_constants.TRAINING], clear_devices=True) # Save the SavedModel to disk. builder.save() # Restore the graph with a single predefined tag whose variables were saved # without any device information. with self.session(graph=ops.Graph()) as sess: loader.load(sess, [tag_constants.TRAINING], export_dir) self.assertEqual( 42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) # Tests the behavior of loading SavedModels that having missing attrs or attrs # with incorrect types. def testInconsistentConsumerDefaultAttrs(self): export_dir = self._get_export_dir( "test_strip_default_attrs_no_consumer_defaults") builder = saved_model_builder._SavedModelBuilder(export_dir) # Add a graph with a single variable and a test op with a defaultless # float32 attr, "test_attr". with session.Session(graph=ops.Graph()) as sess: variables.VariableV1(1.0, dtype=dtypes.float64, name="var") test_ops.test_attr(T=dtypes.float32, name="test_attr") self.evaluate(variables.global_variables_initializer()) builder.add_meta_graph_and_variables(sess, ["foo"]) # Save the SavedModel to disk in text format. builder.save(as_text=True) # Rewrite the SavedModel to remove the T attr from "test_attr". saved_model_file = os.path.join( export_dir, constants.SAVED_MODEL_FILENAME_PBTXT) with open(saved_model_file) as f: original_saved_model = f.read() no_attr_saved_model = original_saved_model.replace(""" attr { key: "T" value { type: DT_FLOAT } }""", "") with open(saved_model_file, "w") as f: f.write(no_attr_saved_model) # Loading the SavedModel via the loader must fail because the SavedModel # does not have any attr values for the "TestAttr" node, and there is no # default specified in the TestAttr OpDef. sess = session.Session(graph=ops.Graph()) with self.assertRaisesRegexp( ValueError, "NodeDef missing attr 'T' from Op<name=TestAttr"): loader.load(sess, ["foo"], export_dir) # Rewrite the SavedModel to change the type of the T attr in "test_attr" bad_type_saved_model = original_saved_model.replace(""" attr { key: "T" value { type: DT_FLOAT } }""", """ attr { key: "T" value { type: DT_DOUBLE } }""") with open(saved_model_file, "w") as f: f.write(bad_type_saved_model) # Loading the SavedModel via the loader must fail because there is no # OpKernel registered to handle T = double. sess = session.Session(graph=ops.Graph()) with self.assertRaisesRegexp( errors.InvalidArgumentError, "No OpKernel was registered to support Op 'TestAttr' used by node " "test_attr \\(defined at .*\\) with these attrs: \\[.*\\]\n" "Registered devices:.*\n" "Registered kernels:.*" ): loader.load(sess, ["foo"], export_dir) class SavedModelV1Test(SavedModelTestBase): def _validate_asset_collection(self, export_dir, graph_collection_def, expected_asset_file_name, expected_asset_file_contents, expected_asset_tensor_name, asset_id=0): assets_any = graph_collection_def[constants.ASSETS_KEY].any_list.value asset = meta_graph_pb2.AssetFileDef() assets_any[asset_id].Unpack(asset) assets_path = os.path.join( compat.as_bytes(export_dir), compat.as_bytes(constants.ASSETS_DIRECTORY), compat.as_bytes(expected_asset_file_name)) actual_asset_contents = file_io.read_file_to_string(assets_path) self.assertEqual(expected_asset_file_contents, compat.as_text(actual_asset_contents)) self.assertEqual(expected_asset_file_name, asset.filename) self.assertEqual(expected_asset_tensor_name, asset.tensor_info.name) @test_util.run_deprecated_v1 def testWritingAssetsToCollection(self): export_dir = self._get_export_dir("test_writing_assets_to_collection") builder = saved_model_builder.SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: self._init_and_validate_variable(sess, "v", 42) # Build an asset list. ignored_filepath = os.path.join( compat.as_bytes(test.get_temp_dir()), compat.as_bytes("ignored.txt")) file_io.write_string_to_file(ignored_filepath, "will be ignored") asset_collection = self._build_asset_collection( "hello42.txt", "foo bar baz", "asset_file_tensor") builder.add_meta_graph_and_variables( sess, ["foo"], assets_collection=asset_collection) # Save the SavedModel to disk. builder.save() with self.session(graph=ops.Graph()) as sess: foo_graph = loader.load(sess, ["foo"], export_dir) self._validate_asset_collection(export_dir, foo_graph.collection_def, "hello42.txt", "foo bar baz", "asset_file_tensor:0") ignored_asset_path = os.path.join( compat.as_bytes(export_dir), compat.as_bytes(constants.ASSETS_DIRECTORY), compat.as_bytes("ignored.txt")) self.assertFalse(file_io.file_exists(ignored_asset_path)) @test_util.run_deprecated_v1 def testLegacyInitOpWithNonEmptyCollection(self): export_dir = self._get_export_dir( "test_legacy_init_op_with_non_empty_collection") self._testInitOpsWithNonEmptyCollection(export_dir, constants.LEGACY_INIT_OP_KEY) @test_util.run_deprecated_v1 def testMainOpWithNonEmptyCollection(self): export_dir = self._get_export_dir("test_main_op_with_non_empty_collection") self._testInitOpsWithNonEmptyCollection(export_dir, constants.MAIN_OP_KEY) def _testInitOpsWithNonEmptyCollection(self, export_dir, key): builder = saved_model_builder.SavedModelBuilder(export_dir) g = ops.Graph() with self.session(graph=g) as sess: # Initialize variable `v1` to 1. v1 = variables.VariableV1(1, name="v1") ops.add_to_collection("v", v1) # Initialize another variable `v2` to 42. v2 = variables.VariableV1(42, name="v2", trainable=False, collections=[]) ops.add_to_collection("v", v2) # Set up an assignment op to be run as part of the init op. assign_v2 = state_ops.assign(v2, v1) init_op = control_flow_ops.group(assign_v2, name="init_op") self.evaluate(variables.global_variables_initializer()) ops.add_to_collection(key, control_flow_ops.no_op()) # ValueError should be raised since the LEGACY_INIT_OP_KEY collection # is not empty and we don't support multiple init ops. with self.assertRaisesRegexp(ValueError, "Graph already contains"): builder.add_meta_graph_and_variables( sess, ["foo"], legacy_init_op=init_op) # We shouldn't be able to add as MAIN_OP, either. with self.assertRaisesRegexp(ValueError, "Graph already contains"): builder.add_meta_graph_and_variables(sess, ["foo"], main_op=init_op) def testStripDefaultAttrs(self): export_dir = self._get_export_dir("test_strip_default_attrs") builder = saved_model_builder.SavedModelBuilder(export_dir) # Add a graph with two float32 variables and a Complex Op composing them # with strip_default_attrs enabled. with session.Session(graph=ops.Graph()) as sess: real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real") imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag") math_ops.complex(real_num, imag_num, name="complex") self.evaluate(variables.global_variables_initializer()) builder.add_meta_graph_and_variables( sess, ["foo"], strip_default_attrs=True) # Add a graph with the same float32 variables and a Complex Op composing # them with strip_default_attrs disabled. with session.Session(graph=ops.Graph()) as sess: real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real") imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag") math_ops.complex(real_num, imag_num, name="complex") self.evaluate(variables.global_variables_initializer()) builder.add_meta_graph(["bar"], strip_default_attrs=False) # Save the SavedModel to disk in text format. builder.save(as_text=True) # Loading graph "foo" via the loader must restore the defaults for the # "Complex" node based on the "Complex" OpDef in the Op registry. sess = session.Session(graph=ops.Graph()) meta_graph_def = loader.load(sess, ["foo"], export_dir) complex_node = test_util.get_node_def_from_graph("complex", meta_graph_def.graph_def) self.assertIn("T", complex_node.attr) self.assertIn("Tout", complex_node.attr) # Load graph "foo" from disk as-is to verify default attrs are stripped. saved_model_pb = loader_impl.parse_saved_model(export_dir) self.assertIsNotNone(saved_model_pb) meta_graph_foo_def = None meta_graph_bar_def = None for meta_graph_def in saved_model_pb.meta_graphs: if set(meta_graph_def.meta_info_def.tags) == set(["foo"]): meta_graph_foo_def = meta_graph_def elif set(meta_graph_def.meta_info_def.tags) == set(["bar"]): meta_graph_bar_def = meta_graph_def self.assertIsNotNone(meta_graph_foo_def) self.assertIsNotNone(meta_graph_bar_def) # "Complex" Op has 2 attributes with defaults: # o "T" : float32. (input type) # o "Tout" : complex64. (output type) # "Complex" Op in graph "foo" shouldn't have attributes "T" and "Tout". # Graph "foo" was saved with strip_default_attrs set to True. node_def = test_util.get_node_def_from_graph("complex", meta_graph_foo_def.graph_def) self.assertNotIn("T", node_def.attr) self.assertNotIn("Tout", node_def.attr) # "Complex" Op in graph "bar" must have attributes "T" and "Tout". # Graph "bar" was saved with strip_default_attrs set to False. node_def = test_util.get_node_def_from_graph("complex", meta_graph_bar_def.graph_def) self.assertIn("T", node_def.attr) self.assertIn("Tout", node_def.attr) @test_util.run_v1_only("b/120545219") def testLegacyInitOp(self): export_dir = self._get_export_dir("test_legacy_init_op") builder = saved_model_builder.SavedModelBuilder(export_dir) with self.session(graph=ops.Graph()) as sess: # Add `v1` and `v2` variables to the graph. v1 = variables.VariableV1(1, name="v1") ops.add_to_collection("v", v1) v2 = variables.VariableV1(2, name="v2") ops.add_to_collection("v", v2) # Initialize another variable `v3` to 42. v3 = variables.VariableV1(42, name="v3", trainable=False, collections=[]) ops.add_to_collection("v", v3) # Set up an assignment op to be run as part of the init_op. assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2)) legacy_init_op = control_flow_ops.group(assign_v3, name="legacy_init_op") self.evaluate(variables.global_variables_initializer()) builder.add_meta_graph_and_variables( sess, ["foo"], legacy_init_op=legacy_init_op) # Save the SavedModel to disk. builder.save() with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["foo"], export_dir) self.assertEqual(1, ops.get_collection("v")[0].eval()) self.assertEqual(2, ops.get_collection("v")[1].eval()) # Evaluates to the sum of the first two variables and assigned as part of # the legacy_init_op, following a restore. self.assertEqual(3, ops.get_collection("v")[2].eval()) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/saved_model_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for importing a TF v1-style SavedModel when executing eagerly.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil from tensorflow.core.framework import variable_pb2 from tensorflow.python.client import session as session_lib from tensorflow.python.eager import backprop from tensorflow.python.eager import lift_to_graph from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import function as framework_function from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.framework import versions from tensorflow.python.lib.io import file_io from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.saved_model import builder_impl from tensorflow.python.saved_model import load from tensorflow.python.saved_model import save from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.saved_model import simple_save from tensorflow.python.saved_model import tag_constants from tensorflow.python.saved_model import utils_impl class LoadTest(test.TestCase): def _v1_single_metagraph_saved_model(self, use_resource): export_graph = ops.Graph() with export_graph.as_default(): start = array_ops.placeholder( shape=None, dtype=dtypes.float32, name="start") if use_resource: distractor = variables.RefVariable(-1., name="distractor") v = resource_variable_ops.ResourceVariable(3., name="v") else: # "distractor" gets saved in the checkpoint and so used in the restore # function, but not in the pruned function for the signature. This tests # node naming: it needs to be consistent (and ideally always the same as # the node in the original GraphDef) for the resource manager to find # the right variable. distractor = variables.RefVariable(-1., name="distractor") v = variables.RefVariable(3., name="v") local_variable = variables.VariableV1( 1., collections=[ops.GraphKeys.LOCAL_VARIABLES], trainable=False, use_resource=True) output = array_ops.identity(start * v * local_variable, name="output") with session_lib.Session() as session: session.run([v.initializer, distractor.initializer, local_variable.initializer]) path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) simple_save.simple_save( session, path, inputs={"start": start}, outputs={"output": output}, legacy_init_op=local_variable.initializer) return path @test_util.run_in_graph_and_eager_modes def test_resource_variable_import(self): imported = load.load(self._v1_single_metagraph_saved_model( use_resource=True)) self.evaluate(variables.global_variables_initializer()) self.evaluate(variables.local_variables_initializer()) fn = imported.signatures["serving_default"] self.assertEqual({"output": 6.}, self.evaluate(fn(constant_op.constant(2.)))) self.assertAllEqual([3., 1.], self.evaluate(imported.variables)) self.evaluate(imported.variables[0].assign(4.)) self.assertEqual({"output": 8.}, self.evaluate(fn(start=constant_op.constant(2.)))) self.evaluate(imported.variables[1].assign(2.)) self.assertEqual({"output": 24.}, self.evaluate(fn(start=constant_op.constant(3.)))) self.assertTrue(imported.variables[0].trainable) self.assertFalse(imported.variables[1].trainable) with backprop.GradientTape() as tape: output = fn(start=constant_op.constant(4.)) self.assertEqual(imported.variables[:1], list(tape.watched_variables())) self.assertEqual( 8., self.evaluate(tape.gradient(output, imported.variables[0]))) @test_util.run_in_graph_and_eager_modes def test_ref_variable_import(self): saved = self._v1_single_metagraph_saved_model(use_resource=False) imported = load.load(saved) fn = imported.signatures["serving_default"] self.evaluate(lookup_ops.tables_initializer()) self.assertEqual( 6., self.evaluate(fn(start=constant_op.constant(2.))["output"])) def _v1_output_shape_saved_model(self): export_graph = ops.Graph() with export_graph.as_default(): start = array_ops.placeholder( shape=[None], dtype=dtypes.float32, name="start") output = array_ops.identity(start, name="output") output.set_shape([1]) # Ok to use [1] because shape is only informational with session_lib.Session() as session: path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) builder = builder_impl.SavedModelBuilder(path) builder.add_meta_graph_and_variables( session, tags=[tag_constants.SERVING], signature_def_map={ "serving_default": signature_def_utils.build_signature_def( {"start": utils_impl.build_tensor_info(start)}, {"output": utils_impl.build_tensor_info(output)}) }) builder.save() return path def test_restore_output_shapes(self): saved = self._v1_output_shape_saved_model() imported = load.load(saved) fn = imported.signatures["serving_default"] self.assertEqual(tensor_shape.TensorShape([1]), fn.outputs[0].shape) def _v1_multi_metagraph_saved_model(self): export_graph = ops.Graph() with export_graph.as_default(): start = array_ops.placeholder( shape=[None], dtype=dtypes.float32, name="start") v = resource_variable_ops.ResourceVariable(21.) first_output = array_ops.identity(start * v, name="first_output") second_output = array_ops.identity(v, name="second_output") with session_lib.Session() as session: session.run(v.initializer) path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) builder = builder_impl.SavedModelBuilder(path) builder.add_meta_graph_and_variables( session, tags=["first"], signature_def_map={ "first_key": signature_def_utils.build_signature_def( {"first_start": utils_impl.build_tensor_info(start)}, {"first_output": utils_impl.build_tensor_info( first_output)})}) builder.add_meta_graph( tags=["second"], signature_def_map={ "second_key": signature_def_utils.build_signature_def( {"second_start": utils_impl.build_tensor_info(start)}, {"second_output": utils_impl.build_tensor_info( second_output)})}) builder.save() return path def test_multi_meta_graph_loading(self): with self.assertRaisesRegexp(ValueError, "2 MetaGraphs"): load.load(self._v1_multi_metagraph_saved_model()) first_imported = load.load(self._v1_multi_metagraph_saved_model(), tags=["first"]) self.assertEqual({"first_output": 42.}, self.evaluate(first_imported.signatures["first_key"]( first_start=constant_op.constant(2.)))) second_imported = load.load(self._v1_multi_metagraph_saved_model(), tags=set(["second"])) with self.assertRaisesRegexp(TypeError, "second_start"): second_imported.signatures["second_key"](x=constant_op.constant(2.)) with self.assertRaisesRegexp(TypeError, "second_start"): second_imported.signatures["second_key"]( second_start=constant_op.constant(2.), x=constant_op.constant(2.)) self.assertEqual({"second_output": 21.}, self.evaluate(second_imported.signatures["second_key"]( second_start=constant_op.constant(2.)))) def _v1_asset_saved_model(self, clear_shared_name): export_graph = ops.Graph() vocab_path = os.path.join(self.get_temp_dir(), "vocab.txt") with open(vocab_path, "w") as f: f.write("alpha\nbeta\ngamma\n") with export_graph.as_default(): initializer = lookup_ops.TextFileInitializer( vocab_path, key_dtype=dtypes.string, key_index=lookup_ops.TextFileIndex.WHOLE_LINE, value_dtype=dtypes.int64, value_index=lookup_ops.TextFileIndex.LINE_NUMBER) table = lookup_ops.HashTable( initializer, default_value=-1) start = array_ops.placeholder( shape=None, dtype=dtypes.string, name="in") output = table.lookup(start, name="out") if clear_shared_name: export_graph.get_operation_by_name("hash_table")._clear_attr( "shared_name") with session_lib.Session() as session: session.run([table.initializer]) path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) simple_save.simple_save( session, path, inputs={"start": start}, outputs={"output": output}, legacy_init_op=table.initializer) file_io.delete_file(vocab_path) return path @test_util.run_in_graph_and_eager_modes def test_asset_loading(self): first_path = self._v1_asset_saved_model(clear_shared_name=False) imported = load.load(first_path) self.evaluate(lookup_ops.tables_initializer()) fn = imported.signatures["serving_default"] self.assertAllClose({"output": [2, 0]}, fn(start=constant_op.constant(["gamma", "alpha"]))) second_path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) save.save(imported, second_path, signatures=imported.signatures) shutil.rmtree(first_path) del ops.get_collection_ref(ops.GraphKeys.TABLE_INITIALIZERS)[:] second_import = load.load(second_path) self.evaluate(lookup_ops.tables_initializer()) fn = second_import.signatures["serving_default"] self.assertAllClose({"output": [2, 0]}, fn(start=constant_op.constant(["gamma", "alpha"]))) third_path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) save.save(second_import, third_path, signatures=second_import.signatures) shutil.rmtree(second_path) del ops.get_collection_ref(ops.GraphKeys.TABLE_INITIALIZERS)[:] third_import = load.load(third_path) self.evaluate(lookup_ops.tables_initializer()) fn = third_import.signatures["serving_default"] self.assertAllClose({"output": [2, 0]}, fn(start=constant_op.constant(["gamma", "alpha"]))) @test_util.run_in_graph_and_eager_modes def test_node_name_sharing(self): fourth_path = self._v1_asset_saved_model(clear_shared_name=True) fourth_import = load.load(fourth_path) self.evaluate(lookup_ops.tables_initializer()) fn = fourth_import.signatures["serving_default"] self.assertAllClose({"output": [2, 0]}, fn(start=constant_op.constant(["gamma", "alpha"]))) def _v1_cond_saved_model(self): export_graph = ops.Graph() with export_graph.as_default(): branch_selector = array_ops.placeholder( name="branch_selector", shape=[], dtype=dtypes.bool) output = control_flow_ops.cond( branch_selector, lambda: array_ops.ones([]), lambda: array_ops.zeros([])) with session_lib.Session() as session: path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) simple_save.simple_save( session, path, inputs={"branch_selector": branch_selector}, outputs={"output": output}) return path def test_cond(self): first_path = self._v1_cond_saved_model() imported = load.load(first_path) function = imported.signatures["serving_default"] self.assertAllClose({"output": 1.}, function(constant_op.constant(True))) self.assertAllClose({"output": 0.}, function(constant_op.constant(False))) def _v1_while_saved_model(self): export_graph = ops.Graph() with export_graph.as_default(): loop_iterations = array_ops.placeholder( name="loop_iterations", shape=[], dtype=dtypes.int32) _, output = control_flow_ops.while_loop( lambda index, accum: index <= loop_iterations, lambda index, accum: (index + 1, accum + index), [constant_op.constant(0), constant_op.constant(0)]) with session_lib.Session() as session: path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) simple_save.simple_save( session, path, inputs={"loop_iterations": loop_iterations}, outputs={"output": output}) return path def test_while(self): first_path = self._v1_while_saved_model() imported = load.load(first_path) function = imported.signatures["serving_default"] self.assertAllClose({"output": 10}, function(constant_op.constant(4))) self.assertAllClose({"output": 15}, function(constant_op.constant(5))) def _v1_nested_while_saved_model(self): export_graph = ops.Graph() with export_graph.as_default(): def _inner_while(loop_iterations): _, output = control_flow_ops.while_loop( lambda index, accum: index <= loop_iterations, lambda index, accum: (index + 1, accum + index), [constant_op.constant(0), constant_op.constant(0)]) return output loop_iterations = array_ops.placeholder( name="loop_iterations", shape=[], dtype=dtypes.int32) _, output = control_flow_ops.while_loop( lambda index, accum: index <= loop_iterations, lambda index, accum: (index + 1, accum + _inner_while(index)), [constant_op.constant(0), constant_op.constant(0)]) with session_lib.Session() as session: path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) simple_save.simple_save( session, path, inputs={"loop_iterations": loop_iterations}, outputs={"output": output}) return path def test_nested_while(self): first_path = self._v1_nested_while_saved_model() imported = load.load(first_path) function = imported.signatures["serving_default"] self.assertAllClose({"output": 20}, function(constant_op.constant(4))) self.assertAllClose({"output": 35}, function(constant_op.constant(5))) def _no_signatures_model(self): export_graph = ops.Graph() with export_graph.as_default(): inp = array_ops.placeholder(name="x", shape=[], dtype=dtypes.float32) array_ops.identity(inp + 1., name="out") with session_lib.Session() as session: path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) b = builder_impl.SavedModelBuilder(path) b.add_meta_graph_and_variables( session, tags=[tag_constants.SERVING], signature_def_map={}, assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)) b.save() return path def test_no_signature(self): path = self._no_signatures_model() imported = load.load(path) self.assertEqual([], list(imported.signatures.keys())) def _signature_with_no_inputs(self): export_graph = ops.Graph() with export_graph.as_default(): array_ops.placeholder(name="x", shape=[], dtype=dtypes.float32) output = random_ops.random_normal([2]) with session_lib.Session() as session: path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) b = builder_impl.SavedModelBuilder(path) b.add_meta_graph_and_variables( session, tags=[tag_constants.SERVING], signature_def_map={ "key": signature_def_utils.build_signature_def( {}, dict(value=utils_impl.build_tensor_info(output)))}) b.save() return path def test_signature_with_no_inputs(self): path = self._signature_with_no_inputs() imported = load.load(path) self.assertEqual([2], imported.signatures["key"]()["value"].shape) def test_version_info(self): path = self._signature_with_no_inputs() imported = load.load(path) self.assertEqual(versions.__version__, imported.tensorflow_version) self.assertEqual(versions.__git_version__, imported.tensorflow_git_version) def _unfed_placeholder_signature(self): export_graph = ops.Graph() with export_graph.as_default(): x = array_ops.placeholder(name="x", shape=[], dtype=dtypes.float32) output = x * random_ops.random_normal([2]) with session_lib.Session() as session: path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) b = builder_impl.SavedModelBuilder(path) b.add_meta_graph_and_variables( session, tags=[tag_constants.SERVING], signature_def_map={ "key": signature_def_utils.build_signature_def( {}, dict(value=utils_impl.build_tensor_info(output)))}) b.save() return path def test_unfed_placeholder_exception(self): path = self._unfed_placeholder_signature() with self.assertRaisesRegexp( lift_to_graph.UnliftableError, "signature needs an input for each placeholder.*\n\nUnable to lift"): load.load(path) def test_custom_pruning(self): path = self._no_signatures_model() root = load.load(path) fn = root.prune("x:0", "out:0") self.assertEqual(2., self.evaluate(fn(x=array_ops.ones([])))) root.graph.as_graph_element("x:0") def _no_trainable_variable_attribute(self, trainable): """A SavedModel where the VariableDef has no 'trainable' (it's false).""" class _MissingFieldsVariable(resource_variable_ops.ResourceVariable): def to_proto(self, export_scope=None): full_proto = super(_MissingFieldsVariable, self).to_proto(export_scope) return variable_pb2.VariableDef( variable_name=full_proto.variable_name, initial_value_name=full_proto.initial_value_name, initializer_name=full_proto.snapshot_name, save_slice_info_def=full_proto.save_slice_info_def, is_resource=full_proto.is_resource) export_graph = ops.Graph() with export_graph.as_default(): v = _MissingFieldsVariable(3., trainable=trainable) with session_lib.Session() as session: session.run([v.initializer]) path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) b = builder_impl.SavedModelBuilder(path) b.add_meta_graph_and_variables( session, tags=[tag_constants.SERVING], signature_def_map={}) b.save() return path def test_trainable_not_set_in_proto(self): """If a VariableDef has no 'trainable', we fall back to collections.""" real_tf_version = versions.__version__ # Pretend to be exported from an older version of TensorFlow, so trainable # will follow collections instead of checking VariableDefs. versions.__version__ = "1.7.0" path = self._no_trainable_variable_attribute(trainable=True) root = load.load(path) self.assertTrue(root.variables[0].trainable) path = self._no_trainable_variable_attribute(trainable=False) root = load.load(path) self.assertFalse(root.variables[0].trainable) versions.__version__ = real_tf_version def _export_variable(self, **kwargs_for_variable): """A 1.x SavedModel with a single variable.""" export_graph = ops.Graph() with export_graph.as_default(): v = resource_variable_ops.ResourceVariable(3., **kwargs_for_variable) with session_lib.Session() as session: session.run([v.initializer]) path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) b = builder_impl.SavedModelBuilder(path) b.add_meta_graph_and_variables( session, tags=[tag_constants.SERVING], signature_def_map={}) b.save() return path def test_trainable_in_proto(self): """If a VariableDef has a trainable property, we do not use collections.""" path = self._export_variable( trainable=True, collections=[ops.GraphKeys.GLOBAL_VARIABLES]) root = load.load(path) self.assertTrue(root.variables[0].trainable) path = self._export_variable( trainable=False, collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES]) root = load.load(path) self.assertFalse(root.variables[0].trainable) def _model_with_sparse_output(self): """Generate a graph with a SparseTensor output and serialize in V1 format""" export_graph = ops.Graph() with export_graph.as_default(): in_placeholder = array_ops.placeholder(dtype=dtypes.int64, shape=[1]) out_sparse_tensor = sparse_tensor.SparseTensor( indices=[[0]], values=in_placeholder, dense_shape=[1]) * 2 with session_lib.Session() as session: path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) simple_save.simple_save( session, path, inputs={"start": in_placeholder}, outputs={"output": out_sparse_tensor}) return path def test_load_sparse_outputs(self): path = self._model_with_sparse_output() imported = load.load(path) imported_fn = imported.signatures["serving_default"] forty_two = constant_op.constant([42], dtype=dtypes.int64) self.assertEqual([84], imported_fn(forty_two)["output"].values.numpy()) def _model_with_defun(self): """Generate a graph with a Defun and serialize in V1 format.""" export_graph = ops.Graph() with export_graph.as_default(): @framework_function.Defun(dtypes.int64) def z(x): return x + 1 @framework_function.Defun(dtypes.int64) def g(x): return z(x) + 1 @framework_function.Defun(dtypes.int64) def f(x): return g(x) + 1 in_placeholder = array_ops.placeholder(dtype=dtypes.int64, shape=[1]) out = f(in_placeholder) with session_lib.Session() as session: path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) simple_save.simple_save( session, path, inputs={"start": in_placeholder}, outputs={"output": out}) return path def test_load_defun(self): path = self._model_with_defun() imported = load.load(path) imported_fn = imported.signatures["serving_default"] forty_two = constant_op.constant([42], dtype=dtypes.int64) self.assertEqual([45], imported_fn(forty_two)["output"].numpy()) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/load_v1_in_v2_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SavedModel main op implementation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import variables from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export @tf_export(v1=['saved_model.main_op.main_op']) @deprecation.deprecated( None, 'This function will only be available through the v1 compatibility ' 'library as tf.compat.v1.saved_model.main_op.main_op.') def main_op(): """Returns a main op to init variables and tables. Returns the main op including the group of ops that initializes all variables, initializes local variables and initialize all tables. Returns: The set of ops to be run as part of the main op upon the load operation. """ init = variables.global_variables_initializer() init_local = variables.local_variables_initializer() init_tables = lookup_ops.tables_initializer() return control_flow_ops.group(init, init_local, init_tables) # TODO(sukritiramesh): Integrate with Saver for complete restore functionality. @tf_export(v1=['saved_model.main_op_with_restore', 'saved_model.main_op.main_op_with_restore']) @deprecation.deprecated( None, 'This function will only be available through the v1 compatibility ' 'library as tf.compat.v1.saved_model.main_op_with_restore or ' 'tf.compat.v1.saved_model.main_op.main_op_with_restore.') def main_op_with_restore(restore_op_name): """Returns a main op to init variables, tables and restore the graph. Returns the main op including the group of ops that initializes all variables, initialize local variables, initialize all tables and the restore op name. Args: restore_op_name: Name of the op to use to restore the graph. Returns: The set of ops to be run as part of the main op upon the load operation. """ with ops.control_dependencies([main_op()]): main_op_with_restore = control_flow_ops.group(restore_op_name) return main_op_with_restore
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/main_op_impl.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SavedModel simple save functionality.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.saved_model import builder from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.saved_model import tag_constants from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export @tf_export(v1=['saved_model.simple_save']) @deprecation.deprecated( None, 'This function will only be available through the v1 compatibility ' 'library as tf.compat.v1.saved_model.simple_save.') def simple_save(session, export_dir, inputs, outputs, legacy_init_op=None): """Convenience function to build a SavedModel suitable for serving. In many common cases, saving models for serving will be as simple as: simple_save(session, export_dir, inputs={"x": x, "y": y}, outputs={"z": z}) Although in many cases it's not necessary to understand all of the many ways to configure a SavedModel, this method has a few practical implications: - It will be treated as a graph for inference / serving (i.e. uses the tag `saved_model.SERVING`) - The SavedModel will load in TensorFlow Serving and supports the [Predict API](https://github.com/tensorflow/serving/blob/master/tensorflow_serving/apis/predict.proto). To use the Classify, Regress, or MultiInference APIs, please use either [tf.Estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator) or the lower level [SavedModel APIs](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md). - Some TensorFlow ops depend on information on disk or other information called "assets". These are generally handled automatically by adding the assets to the `GraphKeys.ASSET_FILEPATHS` collection. Only assets in that collection are exported; if you need more custom behavior, you'll need to use the [SavedModelBuilder](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/builder.py). More information about SavedModel and signatures can be found here: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md. Args: session: The TensorFlow session from which to save the meta graph and variables. export_dir: The path to which the SavedModel will be stored. inputs: dict mapping string input names to tensors. These are added to the SignatureDef as the inputs. outputs: dict mapping string output names to tensors. These are added to the SignatureDef as the outputs. legacy_init_op: Legacy support for op or group of ops to execute after the restore op upon a load. """ signature_def_map = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_def_utils.predict_signature_def(inputs, outputs) } b = builder.SavedModelBuilder(export_dir) b.add_meta_graph_and_variables( session, tags=[tag_constants.SERVING], signature_def_map=signature_def_map, assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS), main_op=legacy_init_op, clear_devices=True) b.save()
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/simple_save.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SignatureDef utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import types_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import signature_def_utils_impl from tensorflow.python.saved_model import utils # We'll reuse the same tensor_infos in multiple contexts just for the tests. # The validator doesn't check shapes so we just omit them. _STRING = meta_graph_pb2.TensorInfo( name="foobar", dtype=dtypes.string.as_datatype_enum ) _FLOAT = meta_graph_pb2.TensorInfo( name="foobar", dtype=dtypes.float32.as_datatype_enum ) def _make_signature(inputs, outputs, name=None): input_info = { input_name: utils.build_tensor_info(tensor) for input_name, tensor in inputs.items() } output_info = { output_name: utils.build_tensor_info(tensor) for output_name, tensor in outputs.items() } return signature_def_utils_impl.build_signature_def(input_info, output_info, name) class SignatureDefUtilsTest(test.TestCase): @test_util.run_deprecated_v1 def testBuildSignatureDef(self): x = array_ops.placeholder(dtypes.float32, 1, name="x") x_tensor_info = utils.build_tensor_info(x) inputs = {} inputs["foo-input"] = x_tensor_info y = array_ops.placeholder(dtypes.float32, name="y") y_tensor_info = utils.build_tensor_info(y) outputs = {} outputs["foo-output"] = y_tensor_info signature_def = signature_def_utils_impl.build_signature_def( inputs, outputs, "foo-method-name") self.assertEqual("foo-method-name", signature_def.method_name) # Check inputs in signature def. self.assertEqual(1, len(signature_def.inputs)) x_tensor_info_actual = signature_def.inputs["foo-input"] self.assertEqual("x:0", x_tensor_info_actual.name) self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info_actual.dtype) self.assertEqual(1, len(x_tensor_info_actual.tensor_shape.dim)) self.assertEqual(1, x_tensor_info_actual.tensor_shape.dim[0].size) # Check outputs in signature def. self.assertEqual(1, len(signature_def.outputs)) y_tensor_info_actual = signature_def.outputs["foo-output"] self.assertEqual("y:0", y_tensor_info_actual.name) self.assertEqual(types_pb2.DT_FLOAT, y_tensor_info_actual.dtype) self.assertEqual(0, len(y_tensor_info_actual.tensor_shape.dim)) @test_util.run_deprecated_v1 def testRegressionSignatureDef(self): input1 = constant_op.constant("a", name="input-1") output1 = constant_op.constant(2.2, name="output-1") signature_def = signature_def_utils_impl.regression_signature_def( input1, output1) self.assertEqual(signature_constants.REGRESS_METHOD_NAME, signature_def.method_name) # Check inputs in signature def. self.assertEqual(1, len(signature_def.inputs)) x_tensor_info_actual = ( signature_def.inputs[signature_constants.REGRESS_INPUTS]) self.assertEqual("input-1:0", x_tensor_info_actual.name) self.assertEqual(types_pb2.DT_STRING, x_tensor_info_actual.dtype) self.assertEqual(0, len(x_tensor_info_actual.tensor_shape.dim)) # Check outputs in signature def. self.assertEqual(1, len(signature_def.outputs)) y_tensor_info_actual = ( signature_def.outputs[signature_constants.REGRESS_OUTPUTS]) self.assertEqual("output-1:0", y_tensor_info_actual.name) self.assertEqual(types_pb2.DT_FLOAT, y_tensor_info_actual.dtype) self.assertEqual(0, len(y_tensor_info_actual.tensor_shape.dim)) @test_util.run_deprecated_v1 def testClassificationSignatureDef(self): input1 = constant_op.constant("a", name="input-1") output1 = constant_op.constant("b", name="output-1") output2 = constant_op.constant(3.3, name="output-2") signature_def = signature_def_utils_impl.classification_signature_def( input1, output1, output2) self.assertEqual(signature_constants.CLASSIFY_METHOD_NAME, signature_def.method_name) # Check inputs in signature def. self.assertEqual(1, len(signature_def.inputs)) x_tensor_info_actual = ( signature_def.inputs[signature_constants.CLASSIFY_INPUTS]) self.assertEqual("input-1:0", x_tensor_info_actual.name) self.assertEqual(types_pb2.DT_STRING, x_tensor_info_actual.dtype) self.assertEqual(0, len(x_tensor_info_actual.tensor_shape.dim)) # Check outputs in signature def. self.assertEqual(2, len(signature_def.outputs)) classes_tensor_info_actual = ( signature_def.outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES]) self.assertEqual("output-1:0", classes_tensor_info_actual.name) self.assertEqual(types_pb2.DT_STRING, classes_tensor_info_actual.dtype) self.assertEqual(0, len(classes_tensor_info_actual.tensor_shape.dim)) scores_tensor_info_actual = ( signature_def.outputs[signature_constants.CLASSIFY_OUTPUT_SCORES]) self.assertEqual("output-2:0", scores_tensor_info_actual.name) self.assertEqual(types_pb2.DT_FLOAT, scores_tensor_info_actual.dtype) self.assertEqual(0, len(scores_tensor_info_actual.tensor_shape.dim)) @test_util.run_deprecated_v1 def testPredictionSignatureDef(self): input1 = constant_op.constant("a", name="input-1") input2 = constant_op.constant("b", name="input-2") output1 = constant_op.constant("c", name="output-1") output2 = constant_op.constant("d", name="output-2") signature_def = signature_def_utils_impl.predict_signature_def({ "input-1": input1, "input-2": input2 }, {"output-1": output1, "output-2": output2}) self.assertEqual(signature_constants.PREDICT_METHOD_NAME, signature_def.method_name) # Check inputs in signature def. self.assertEqual(2, len(signature_def.inputs)) input1_tensor_info_actual = (signature_def.inputs["input-1"]) self.assertEqual("input-1:0", input1_tensor_info_actual.name) self.assertEqual(types_pb2.DT_STRING, input1_tensor_info_actual.dtype) self.assertEqual(0, len(input1_tensor_info_actual.tensor_shape.dim)) input2_tensor_info_actual = (signature_def.inputs["input-2"]) self.assertEqual("input-2:0", input2_tensor_info_actual.name) self.assertEqual(types_pb2.DT_STRING, input2_tensor_info_actual.dtype) self.assertEqual(0, len(input2_tensor_info_actual.tensor_shape.dim)) # Check outputs in signature def. self.assertEqual(2, len(signature_def.outputs)) output1_tensor_info_actual = (signature_def.outputs["output-1"]) self.assertEqual("output-1:0", output1_tensor_info_actual.name) self.assertEqual(types_pb2.DT_STRING, output1_tensor_info_actual.dtype) self.assertEqual(0, len(output1_tensor_info_actual.tensor_shape.dim)) output2_tensor_info_actual = (signature_def.outputs["output-2"]) self.assertEqual("output-2:0", output2_tensor_info_actual.name) self.assertEqual(types_pb2.DT_STRING, output2_tensor_info_actual.dtype) self.assertEqual(0, len(output2_tensor_info_actual.tensor_shape.dim)) @test_util.run_deprecated_v1 def testTrainSignatureDef(self): self._testSupervisedSignatureDef( signature_def_utils_impl.supervised_train_signature_def, signature_constants.SUPERVISED_TRAIN_METHOD_NAME) @test_util.run_deprecated_v1 def testEvalSignatureDef(self): self._testSupervisedSignatureDef( signature_def_utils_impl.supervised_eval_signature_def, signature_constants.SUPERVISED_EVAL_METHOD_NAME) def _testSupervisedSignatureDef(self, fn_to_test, method_name): inputs = { "input-1": constant_op.constant("a", name="input-1"), "input-2": constant_op.constant("b", name="input-2"), } loss = {"loss-1": constant_op.constant(0.45, name="loss-1")} predictions = { "classes": constant_op.constant([100], name="classes"), } metrics_val = constant_op.constant(100.0, name="metrics_val") metrics = { "metrics/value": metrics_val, "metrics/update_op": array_ops.identity(metrics_val, name="metrics_op"), } signature_def = fn_to_test(inputs, loss, predictions, metrics) self.assertEqual(method_name, signature_def.method_name) # Check inputs in signature def. self.assertEqual(2, len(signature_def.inputs)) input1_tensor_info_actual = (signature_def.inputs["input-1"]) self.assertEqual("input-1:0", input1_tensor_info_actual.name) self.assertEqual(types_pb2.DT_STRING, input1_tensor_info_actual.dtype) self.assertEqual(0, len(input1_tensor_info_actual.tensor_shape.dim)) input2_tensor_info_actual = (signature_def.inputs["input-2"]) self.assertEqual("input-2:0", input2_tensor_info_actual.name) self.assertEqual(types_pb2.DT_STRING, input2_tensor_info_actual.dtype) self.assertEqual(0, len(input2_tensor_info_actual.tensor_shape.dim)) # Check outputs in signature def. self.assertEqual(4, len(signature_def.outputs)) self.assertEqual("loss-1:0", signature_def.outputs["loss-1"].name) self.assertEqual(types_pb2.DT_FLOAT, signature_def.outputs["loss-1"].dtype) self.assertEqual("classes:0", signature_def.outputs["classes"].name) self.assertEqual(1, len(signature_def.outputs["classes"].tensor_shape.dim)) self.assertEqual( "metrics_val:0", signature_def.outputs["metrics/value"].name) self.assertEqual( types_pb2.DT_FLOAT, signature_def.outputs["metrics/value"].dtype) self.assertEqual( "metrics_op:0", signature_def.outputs["metrics/update_op"].name) self.assertEqual( types_pb2.DT_FLOAT, signature_def.outputs["metrics/value"].dtype) @test_util.run_deprecated_v1 def testTrainSignatureDefMissingInputs(self): self._testSupervisedSignatureDefMissingInputs( signature_def_utils_impl.supervised_train_signature_def, signature_constants.SUPERVISED_TRAIN_METHOD_NAME) @test_util.run_deprecated_v1 def testEvalSignatureDefMissingInputs(self): self._testSupervisedSignatureDefMissingInputs( signature_def_utils_impl.supervised_eval_signature_def, signature_constants.SUPERVISED_EVAL_METHOD_NAME) def _testSupervisedSignatureDefMissingInputs(self, fn_to_test, method_name): inputs = { "input-1": constant_op.constant("a", name="input-1"), "input-2": constant_op.constant("b", name="input-2"), } loss = {"loss-1": constant_op.constant(0.45, name="loss-1")} predictions = { "classes": constant_op.constant([100], name="classes"), } metrics_val = constant_op.constant(100, name="metrics_val") metrics = { "metrics/value": metrics_val, "metrics/update_op": array_ops.identity(metrics_val, name="metrics_op"), } with self.assertRaises(ValueError): signature_def = fn_to_test( {}, loss=loss, predictions=predictions, metrics=metrics) signature_def = fn_to_test(inputs, loss=loss) self.assertEqual(method_name, signature_def.method_name) self.assertEqual(1, len(signature_def.outputs)) signature_def = fn_to_test(inputs, metrics=metrics, loss=loss) self.assertEqual(method_name, signature_def.method_name) self.assertEqual(3, len(signature_def.outputs)) def _assertValidSignature(self, inputs, outputs, method_name): signature_def = signature_def_utils_impl.build_signature_def( inputs, outputs, method_name) self.assertTrue( signature_def_utils_impl.is_valid_signature(signature_def)) def _assertInvalidSignature(self, inputs, outputs, method_name): signature_def = signature_def_utils_impl.build_signature_def( inputs, outputs, method_name) self.assertFalse( signature_def_utils_impl.is_valid_signature(signature_def)) def testValidSignaturesAreAccepted(self): self._assertValidSignature( {"inputs": _STRING}, {"classes": _STRING, "scores": _FLOAT}, signature_constants.CLASSIFY_METHOD_NAME) self._assertValidSignature( {"inputs": _STRING}, {"classes": _STRING}, signature_constants.CLASSIFY_METHOD_NAME) self._assertValidSignature( {"inputs": _STRING}, {"scores": _FLOAT}, signature_constants.CLASSIFY_METHOD_NAME) self._assertValidSignature( {"inputs": _STRING}, {"outputs": _FLOAT}, signature_constants.REGRESS_METHOD_NAME) self._assertValidSignature( {"foo": _STRING, "bar": _FLOAT}, {"baz": _STRING, "qux": _FLOAT}, signature_constants.PREDICT_METHOD_NAME) def testInvalidMethodNameSignatureIsRejected(self): # WRONG METHOD self._assertInvalidSignature( {"inputs": _STRING}, {"classes": _STRING, "scores": _FLOAT}, "WRONG method name") def testInvalidClassificationSignaturesAreRejected(self): # CLASSIFY: wrong types self._assertInvalidSignature( {"inputs": _FLOAT}, {"classes": _STRING, "scores": _FLOAT}, signature_constants.CLASSIFY_METHOD_NAME) self._assertInvalidSignature( {"inputs": _STRING}, {"classes": _FLOAT, "scores": _FLOAT}, signature_constants.CLASSIFY_METHOD_NAME) self._assertInvalidSignature( {"inputs": _STRING}, {"classes": _STRING, "scores": _STRING}, signature_constants.CLASSIFY_METHOD_NAME) # CLASSIFY: wrong keys self._assertInvalidSignature( {}, {"classes": _STRING, "scores": _FLOAT}, signature_constants.CLASSIFY_METHOD_NAME) self._assertInvalidSignature( {"inputs_WRONG": _STRING}, {"classes": _STRING, "scores": _FLOAT}, signature_constants.CLASSIFY_METHOD_NAME) self._assertInvalidSignature( {"inputs": _STRING}, {"classes_WRONG": _STRING, "scores": _FLOAT}, signature_constants.CLASSIFY_METHOD_NAME) self._assertInvalidSignature( {"inputs": _STRING}, {}, signature_constants.CLASSIFY_METHOD_NAME) self._assertInvalidSignature( {"inputs": _STRING}, {"classes": _STRING, "scores": _FLOAT, "extra_WRONG": _STRING}, signature_constants.CLASSIFY_METHOD_NAME) def testInvalidRegressionSignaturesAreRejected(self): # REGRESS: wrong types self._assertInvalidSignature( {"inputs": _FLOAT}, {"outputs": _FLOAT}, signature_constants.REGRESS_METHOD_NAME) self._assertInvalidSignature( {"inputs": _STRING}, {"outputs": _STRING}, signature_constants.REGRESS_METHOD_NAME) # REGRESS: wrong keys self._assertInvalidSignature( {}, {"outputs": _FLOAT}, signature_constants.REGRESS_METHOD_NAME) self._assertInvalidSignature( {"inputs_WRONG": _STRING}, {"outputs": _FLOAT}, signature_constants.REGRESS_METHOD_NAME) self._assertInvalidSignature( {"inputs": _STRING}, {"outputs_WRONG": _FLOAT}, signature_constants.REGRESS_METHOD_NAME) self._assertInvalidSignature( {"inputs": _STRING}, {}, signature_constants.REGRESS_METHOD_NAME) self._assertInvalidSignature( {"inputs": _STRING}, {"outputs": _FLOAT, "extra_WRONG": _STRING}, signature_constants.REGRESS_METHOD_NAME) def testInvalidPredictSignaturesAreRejected(self): # PREDICT: wrong keys self._assertInvalidSignature( {}, {"baz": _STRING, "qux": _FLOAT}, signature_constants.PREDICT_METHOD_NAME) self._assertInvalidSignature( {"foo": _STRING, "bar": _FLOAT}, {}, signature_constants.PREDICT_METHOD_NAME) @test_util.run_v1_only("b/120545219") def testOpSignatureDef(self): key = "adding_1_and_2_key" add_op = math_ops.add(1, 2, name="adding_1_and_2") signature_def = signature_def_utils_impl.op_signature_def(add_op, key) self.assertIn(key, signature_def.outputs) self.assertEqual(add_op.name, signature_def.outputs[key].name) @test_util.run_v1_only("b/120545219") def testLoadOpFromSignatureDef(self): key = "adding_1_and_2_key" add_op = math_ops.add(1, 2, name="adding_1_and_2") signature_def = signature_def_utils_impl.op_signature_def(add_op, key) self.assertEqual( add_op, signature_def_utils_impl.load_op_from_signature_def(signature_def, key)) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/signature_def_utils_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helpers for working with signatures in tf.saved_model.save.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import def_function from tensorflow.python.eager import function as defun from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.saved_model import revived_types from tensorflow.python.saved_model import signature_constants from tensorflow.python.training.tracking import base from tensorflow.python.util import compat from tensorflow.python.util import nest from tensorflow.python.util.compat import collections_abc DEFAULT_SIGNATURE_ATTR = "_default_save_signature" SIGNATURE_ATTRIBUTE_NAME = "signatures" def _get_signature(function): if (isinstance(function, (defun.Function, def_function.Function)) and function.input_signature is not None): function = function.get_concrete_function() if not isinstance(function, defun.ConcreteFunction): return None return function def _valid_signature(concrete_function): """Returns whether concrete function can be converted to a signature.""" if not concrete_function.outputs: # Functions without outputs don't make sense as signatures. We just don't # have any way to run an Operation with no outputs as a SignatureDef in the # 1.x style. return False try: _normalize_outputs(concrete_function.structured_outputs, "unused", "unused") except ValueError: return False return True def find_function_to_export(saveable_view): """Function to export, None if no suitable function was found.""" # If the user did not specify signatures, check the root object for a function # that can be made into a signature. functions = saveable_view.list_functions(saveable_view.root) signature = functions.get(DEFAULT_SIGNATURE_ATTR, None) if signature is not None: return signature # TODO(andresp): Discuss removing this behaviour. It can lead to WTFs when a # user decides to annotate more functions with tf.function and suddenly # serving that model way later in the process stops working. possible_signatures = [] for function in functions.values(): concrete = _get_signature(function) if concrete is not None and _valid_signature(concrete): possible_signatures.append(concrete) if len(possible_signatures) == 1: single_function = possible_signatures[0] signature = _get_signature(single_function) if signature and _valid_signature(signature): return signature return None def canonicalize_signatures(signatures): """Converts `signatures` into a dictionary of concrete functions.""" if signatures is None: return {} if not isinstance(signatures, collections_abc.Mapping): signatures = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signatures} concrete_signatures = {} for signature_key, function in signatures.items(): signature_function = _get_signature(function) if signature_function is None: raise ValueError( ("Expected a TensorFlow function to generate a signature for, but " "got {}. Only `tf.functions` with an input signature or " "concrete functions can be used as a signature.").format(function)) # Re-wrap the function so that it returns a dictionary of Tensors. This # matches the format of 1.x-style signatures. # pylint: disable=cell-var-from-loop @def_function.function def signature_wrapper(**kwargs): structured_outputs = signature_function(**kwargs) return _normalize_outputs( structured_outputs, signature_function.name, signature_key) # TODO(b/123902469): Use ConcreteFunction.structured_inputs once their names # always match keyword arguments. tensor_spec_signature = {} for keyword, tensor in zip( signature_function._arg_keywords, # pylint: disable=protected-access signature_function.inputs): keyword = compat.as_str(keyword) tensor_spec_signature[keyword] = tensor_spec.TensorSpec.from_tensor( tensor, name=keyword) final_concrete = signature_wrapper.get_concrete_function( **tensor_spec_signature) # pylint: disable=protected-access if len(final_concrete._arg_keywords) == 1: # If there is only one input to the signature, a very common case, then # ordering is unambiguous and we can let people pass a positional # argument. Since SignatureDefs are unordered (protobuf "map") multiple # arguments means we need to be keyword-only. final_concrete._num_positional_args = 1 else: final_concrete._num_positional_args = 0 # pylint: enable=protected-access concrete_signatures[signature_key] = final_concrete # pylint: enable=cell-var-from-loop return concrete_signatures def _is_flat(sequence): sequence_flat = nest.flatten(sequence) try: nest.assert_same_structure(sequence_flat, sequence, check_types=False) return True except ValueError: return False except TypeError: return False def _normalize_outputs(outputs, function_name, signature_key): """Construct an output dictionary from unnormalized function outputs.""" if isinstance(outputs, collections_abc.Mapping): for key, value in outputs.items(): if not isinstance(value, ops.Tensor): raise ValueError( ("Got a dictionary containing non-Tensor value {} for key {} " "in the output of the function {} used to generate a SavedModel " "signature. Dictionaries outputs for functions used as signatures " "should have one Tensor output per string key.") .format(value, key, compat.as_str_any(function_name))) return outputs else: original_outputs = outputs if not isinstance(outputs, collections_abc.Sequence): outputs = [outputs] if not _is_flat(outputs): raise ValueError( ("Got non-flat outputs '{}' from '{}' for SavedModel " "signature '{}'. Signatures have one Tensor per output, so " "to have predictable names Python functions used to generate " "these signatures should avoid outputting Tensors in nested " "structures.") .format(original_outputs, function_name, signature_key)) return {("output_{}".format(output_index)): output for output_index, output in enumerate(outputs)} # _SignatureMap is immutable to ensure that users do not expect changes to be # reflected in the SavedModel. Using public APIs, tf.saved_model.load() is the # only way to create a _SignatureMap and there is no way to modify it. So we can # safely ignore/overwrite ".signatures" attributes attached to objects being # saved if they contain a _SignatureMap. A ".signatures" attribute containing # any other type (e.g. a regular dict) will raise an exception asking the user # to first "del obj.signatures" if they want it overwritten. class _SignatureMap(collections_abc.Mapping, base.Trackable): """A collection of SavedModel signatures.""" def __init__(self): self._signatures = {} def _add_signature(self, name, concrete_function): """Adds a signature to the _SignatureMap.""" # Ideally this object would be immutable, but restore is streaming so we do # need a private API for adding new signatures to an existing object. self._signatures[name] = concrete_function def __getitem__(self, key): return self._signatures[key] def __iter__(self): return iter(self._signatures) def __len__(self): return len(self._signatures) def __repr__(self): return "_SignatureMap({})".format(self._signatures) def _list_functions_for_serialization(self, unused_serialization_cache): return { key: value for key, value in self.items() if isinstance(value, (def_function.Function, defun.ConcreteFunction)) } revived_types.register_revived_type( "signature_map", lambda obj: isinstance(obj, _SignatureMap), versions=[revived_types.VersionedTypeRegistration( # Standard dependencies are enough to reconstruct the trackable # items in dictionaries, so we don't need to save any extra information. object_factory=lambda proto: _SignatureMap(), version=1, min_producer_version=1, min_consumer_version=1, setter=_SignatureMap._add_signature # pylint: disable=protected-access )]) def create_signature_map(signatures): """Creates an object containing `signatures`.""" signature_map = _SignatureMap() for name, func in signatures.items(): # This true of any signature that came from canonicalize_signatures. Here as # a sanity check on saving; crashing on load (e.g. in _add_signature) would # be more problematic in case future export changes violated these # assertions. assert isinstance(func, defun.ConcreteFunction) assert isinstance(func.structured_outputs, collections_abc.Mapping) # pylint: disable=protected-access if len(func._arg_keywords) == 1: assert 1 == func._num_positional_args else: assert 0 == func._num_positional_args signature_map._add_signature(name, func) # pylint: enable=protected-access return signature_map def validate_saveable_view(saveable_view): """Performs signature-related sanity checks on `saveable_view`.""" for name, dep in saveable_view.list_dependencies( saveable_view.root): if name == SIGNATURE_ATTRIBUTE_NAME: if not isinstance(dep, _SignatureMap): raise ValueError( ("Exporting an object {} which has an attribute named " "'{signatures}'. This is a reserved attribute used to store " "SavedModel signatures in objects which come from " "`tf.saved_model.load`. Delete this attribute " "(e.g. 'del obj.{signatures}') before saving if this shadowing is " "acceptable.").format( saveable_view.root, signatures=SIGNATURE_ATTRIBUTE_NAME)) break
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/signature_serialization.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SavedModelLoader class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil from absl.testing import parameterized from tensorflow.python.client import session from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.saved_model import builder as saved_model_builder from tensorflow.python.saved_model import loader_impl from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.saved_model import utils from tensorflow.python.training import saver as tf_saver def _get_export_dir(label): return os.path.join(test.get_temp_dir(), label) SIMPLE_ADD_SAVED_MODEL = _get_export_dir("simple_add_saved_model") SAVED_MODEL_WITH_MAIN_OP = _get_export_dir("saved_model_with_main_op") def build_graph_helper(): g = ops.Graph() with g.as_default(): x = variables.VariableV1(5, name="x") y = variables.VariableV1(11, name="y") z = x + y foo_sig_def = signature_def_utils.build_signature_def({ "foo_input": utils.build_tensor_info(x) }, {"foo_output": utils.build_tensor_info(z)}) bar_sig_def = signature_def_utils.build_signature_def({ "bar_x": utils.build_tensor_info(x), "bar_y": utils.build_tensor_info(y) }, {"bar_z": utils.build_tensor_info(z)}) return g, {"foo": foo_sig_def, "bar": bar_sig_def}, y @parameterized.parameters((saved_model_builder.SavedModelBuilder,), (saved_model_builder._SavedModelBuilder,)) class SavedModelLoaderTest(test.TestCase, parameterized.TestCase): def export_simple_graph(self, builder_cls): g, sig_def_map, _ = build_graph_helper() with session.Session(graph=g) as sess: self.evaluate(variables.global_variables_initializer()) builder = builder_cls(SIMPLE_ADD_SAVED_MODEL) builder.add_meta_graph_and_variables(sess, ["foo_graph"], sig_def_map) builder.save() def export_graph_with_main_op(self, builder_cls): g, sig_def_map, y = build_graph_helper() with session.Session(graph=g) as sess: self.evaluate(variables.global_variables_initializer()) assign_op = control_flow_ops.group(state_ops.assign(y, 7)) builder = builder_cls(SAVED_MODEL_WITH_MAIN_OP) if builder_cls == saved_model_builder._SavedModelBuilder: builder.add_meta_graph_and_variables( sess, ["foo_graph"], sig_def_map, init_op=assign_op) else: builder.add_meta_graph_and_variables( sess, ["foo_graph"], sig_def_map, main_op=assign_op) builder.save() def tearDown(self): super(SavedModelLoaderTest, self).tearDown() shutil.rmtree(test.get_temp_dir(), ignore_errors=True) @test_util.run_v1_only("b/120545219") def test_load_function(self, builder_cls): self.export_simple_graph(builder_cls) loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL) with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["foo_graph"]) self.assertEqual(5, sess.graph.get_tensor_by_name("x:0").eval()) self.assertEqual(11, sess.graph.get_tensor_by_name("y:0").eval()) self.export_graph_with_main_op(builder_cls) loader2 = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP) with self.session(graph=ops.Graph()) as sess: loader2.load(sess, ["foo_graph"]) self.assertEqual(5, sess.graph.get_tensor_by_name("x:0").eval()) self.assertEqual(7, sess.graph.get_tensor_by_name("y:0").eval()) @test_util.run_v1_only("b/120545219") def test_load_graph(self, builder_cls): self.export_simple_graph(builder_cls) loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL) graph = ops.Graph() loader.load_graph(graph, ["foo_graph"]) x = graph.get_tensor_by_name("x:0") y = graph.get_tensor_by_name("y:0") with self.assertRaises(KeyError): graph.get_tensor_by_name("z:0") with self.session(graph=graph): # Check that x and y are not initialized with self.assertRaises(errors.FailedPreconditionError): self.evaluate(x) with self.assertRaises(errors.FailedPreconditionError): self.evaluate(y) @test_util.run_v1_only("b/120545219") def test_load_with_import_scope(self, builder_cls): self.export_graph_with_main_op(builder_cls) loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP) with self.session(graph=ops.Graph()) as sess: saver, _ = loader.load_graph( sess.graph, ["foo_graph"], import_scope="baz") # The default saver should not work when the import scope is set. with self.assertRaises(errors.NotFoundError): loader.restore_variables(sess, tf_saver.Saver()) loader.restore_variables(sess, saver) if builder_cls == saved_model_builder._SavedModelBuilder: with self.assertRaises(errors.NotFoundError): loader.run_init_ops(sess, ["foo_graph"]) loader.run_init_ops(sess, ["foo_graph"], import_scope="baz") else: loader.run_init_ops(sess, ["foo_graph"]) self.assertEqual(5, sess.graph.get_tensor_by_name("baz/x:0").eval()) self.assertEqual(7, sess.graph.get_tensor_by_name("baz/y:0").eval()) # Test combined load function. loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP) with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["foo_graph"], import_scope="baa") self.assertEqual(5, sess.graph.get_tensor_by_name("baa/x:0").eval()) self.assertEqual(7, sess.graph.get_tensor_by_name("baa/y:0").eval()) @test_util.run_deprecated_v1 def test_restore_variables(self, builder_cls): self.export_graph_with_main_op(builder_cls) loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP) with self.session(graph=ops.Graph()) as sess: x = variables.VariableV1(0, name="x") y = variables.VariableV1(0, name="y") z = x * y self.evaluate(variables.global_variables_initializer()) # There are variables to restore, so a saver must be created. with self.assertRaises(ValueError): loader.restore_variables(sess, None) loader.restore_variables(sess, tf_saver.Saver()) self.assertEqual(55, self.evaluate(z)) @test_util.run_v1_only("b/120545219") def test_run_init_op(self, builder_cls): self.export_graph_with_main_op(builder_cls) loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP) graph = ops.Graph() saver, _ = loader.load_graph(graph, ["foo_graph"]) with self.session(graph=graph) as sess: loader.restore_variables(sess, saver) self.assertEqual(5, sess.graph.get_tensor_by_name("x:0").eval()) self.assertEqual(11, sess.graph.get_tensor_by_name("y:0").eval()) loader.run_init_ops(sess, ["foo_graph"]) self.assertEqual(5, sess.graph.get_tensor_by_name("x:0").eval()) self.assertEqual(7, sess.graph.get_tensor_by_name("y:0").eval()) def test_parse_saved_model(self, builder_cls): self.export_simple_graph(builder_cls) loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL) meta_graph = loader.get_meta_graph_def_from_tags(["foo_graph"]) self.assertIsNotNone(meta_graph) self.assertIn("foo", meta_graph.signature_def) self.assertIn("bar", meta_graph.signature_def) def test_load_invalid_meta_graph(self, builder_cls): self.export_simple_graph(builder_cls) loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL) with self.assertRaises(RuntimeError): loader.get_meta_graph_def_from_tags([]) with self.assertRaises(RuntimeError): loader.get_meta_graph_def_from_tags([""]) with self.assertRaises(RuntimeError): loader.get_meta_graph_def_from_tags(["not_a_graph"]) @test_util.run_v1_only("b/120545219") def test_load_saved_model_with_no_variables(self, builder_cls): """Test that SavedModel runs saver when there appear to be no variables. When no variables are detected, this may mean that the variables were saved to different collections, or the collections weren't saved to the SavedModel. If the SavedModel MetaGraphDef contains a saver, it should still run in either of these cases. Args: builder_cls: SavedModelBuilder or _SavedModelBuilder class """ path = _get_export_dir("no_variable_saved_model") with session.Session(graph=ops.Graph()) as sess: x = variables.VariableV1( 5, name="x", collections=["not_global_variable"]) y = variables.VariableV1( 11, name="y", collections=["not_global_variable"]) self.assertFalse(variables._all_saveable_objects()) z = x + y self.evaluate(variables.variables_initializer([x, y])) foo_sig_def = signature_def_utils.build_signature_def( {"foo_input": utils.build_tensor_info(x)}, {"foo_output": utils.build_tensor_info(z)}) builder = saved_model_builder.SavedModelBuilder(path) builder.add_meta_graph_and_variables( sess, ["foo_graph"], {"foo": foo_sig_def}, saver=tf_saver.Saver([x, y])) builder.save() loader = loader_impl.SavedModelLoader(path) with self.session(graph=ops.Graph()) as sess: saver, _ = loader.load_graph(sess.graph, ["foo_graph"]) self.assertFalse(variables._all_saveable_objects()) self.assertIsNotNone(saver) with self.session(graph=ops.Graph()) as sess: loader.load(sess, ["foo_graph"]) self.assertEqual(5, sess.graph.get_tensor_by_name("x:0").eval()) self.assertEqual(11, sess.graph.get_tensor_by_name("y:0").eval()) def test_load_saved_model_graph_with_return_elements(self, builder_cls): """Ensure that the correct elements are returned.""" self.export_simple_graph(builder_cls) loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL) graph = ops.Graph() _, ret = loader.load_graph(graph, ["foo_graph"], return_elements=["y:0", "x:0"]) self.assertEqual(graph.get_tensor_by_name("y:0"), ret[0]) self.assertEqual(graph.get_tensor_by_name("x:0"), ret[1]) with self.assertRaisesRegexp(ValueError, "not found in graph"): loader.load_graph(graph, ["foo_graph"], return_elements=["z:0"]) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/loader_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Import a trackable object from a SavedModel.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import os from tensorflow.python.distribute import distribution_strategy_context as ds_context from tensorflow.python.distribute import values as ds_values from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import custom_gradient from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.saved_model import function_deserialization from tensorflow.python.saved_model import load_v1_in_v2 from tensorflow.python.saved_model import loader_impl from tensorflow.python.saved_model import nested_structure_coder from tensorflow.python.saved_model import revived_types from tensorflow.python.saved_model import utils_impl as saved_model_utils from tensorflow.python.training.tracking import base from tensorflow.python.training.tracking import graph_view from tensorflow.python.training.tracking import tracking from tensorflow.python.training.tracking import util from tensorflow.python.util import nest from tensorflow.python.util.tf_export import tf_export def _unused_handle(): """Returns a placeholder as handle that is not supposed to be accessed.""" error_message = ("Trying to access a placeholder that is not supposed to be " "executed. This means you are executing a graph generated " "from cross-replica context in an in-replica context.") assert_op = control_flow_ops.Assert( array_ops.placeholder_with_default(False, shape=()), [error_message]) with ops.control_dependencies([assert_op]): return array_ops.placeholder(dtype=dtypes.resource) class _WrapperFunction(function.ConcreteFunction): """A class wraps a concrete function to handle different distributed contexts. The reason for wrapping a concrete function is because the _captured_inputs fields used for in-replica context and cross-replica context are different. When `load()` is called from within a tf.distribute.strategy scope, the captured inputs are distributed variables. When using these distributed variables during calling the function, we need different approaches when it is in-replica and when it is not in-replica. When it is in replica, naturally we should use the corresponding component of the distributed variable; when it is not in-replica, calling the function should mean that it is constructing a graph that is not actually going to be used. A typical use case is when constructing a functional model. In this case, return a placeholder with a control dependency to ensure that is is never accessed. """ def __init__(self, concrete_function): # Shallow copy the concrete_function self.__dict__.update(vars(concrete_function)) def _call_flat(self, args, captured_inputs, cancellation_manager=None): def get_in_replica_handle(x): return x.handle if ds_values.is_distributed_variable(x) else x def get_cross_replica_handle(x): return _unused_handle() if ds_values.is_distributed_variable(x) else x if ds_context.get_replica_context() is not None: # in-replica context captured_inputs = list(map(get_in_replica_handle, captured_inputs)) else: # cross-replica context captured_inputs = list( map(get_cross_replica_handle, captured_inputs)) return super(_WrapperFunction, self)._call_flat(args, captured_inputs, cancellation_manager) class Loader(object): """Helper class to load an object-based SavedModel.""" def __init__(self, object_graph_proto, saved_model_proto, export_dir): meta_graph = saved_model_proto.meta_graphs[0] self._asset_file_def = meta_graph.asset_file_def self._operation_attributes = { node.name: node.attr for node in meta_graph.graph_def.node} self._proto = object_graph_proto self._export_dir = export_dir self._concrete_functions = ( function_deserialization.load_function_def_library( meta_graph.graph_def.library)) for name, concrete_function in self._concrete_functions.items(): # Wrap all the concrete function so that they are capable of dealing with # both in replica and cross replica cases. self._concrete_functions[name] = _WrapperFunction(concrete_function) self._load_all() # TODO(b/124045874): There are limitations with functions whose captures # trigger other functions to be executed. For now it is only guaranteed to # work if the captures of a function only trigger functions without # captures. self._setup_functions_structures() self._setup_functions_captures() self._restore_checkpoint() for node in self._nodes: if isinstance(node, tracking.CapturableResource): init_op = node._initialize() # pylint: disable=protected-access if not context.executing_eagerly(): ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op) def _setup_functions_structures(self): """Setup structure for inputs and outputs of restored functions.""" coder = nested_structure_coder.StructureCoder() for name, proto in sorted(self._proto.concrete_functions.items()): concrete_function = self._concrete_functions[name] # By setting the structured_outputs directly, we can rely on this # function_lib.ConcreteFunction object to perform the output repacking # logic. The only limitation of that logic is that it only works # with output that is convertible to Tensors and the conversion # always happens. For example tf.TensorShape([2, 3]) will be # converted to Tensor representing [2, 3]. original_outputs = coder.decode_proto(proto.output_signature) # The original_outputs here had Tensors converted to TensorSpecs, so # the restored function's structured_outputs field will not be # exactly the same. Fortunately the repacking logic cares only about # the structure. # TODO(vbardiovsky): Should we just replicate the structures, with # Nones instead of real objects? concrete_function._func_graph.structured_outputs = original_outputs # pylint: disable=protected-access concrete_function._func_graph.structured_input_signature = ( # pylint: disable=protected-access coder.decode_proto(proto.canonicalized_input_signature)) def _setup_functions_captures(self): """Setup captures and variables in restored functions.""" concrete_functions = sorted(self._proto.concrete_functions.items()) for name, proto in concrete_functions: concrete_function = self._concrete_functions[name] bound_inputs = [ self._get_tensor_from_node(node_id) for node_id in proto.bound_inputs] bound_variables = [ self._nodes[node_id] for node_id in proto.bound_inputs if self._proto.nodes[node_id].WhichOneof("kind") == "variable" ] # TODO(andresp): This is only injecting the captured inputs into the # concrete function, note that we did not modify the FuncGraph # itself. concrete_function._captured_inputs = bound_inputs # pylint: disable=protected-access concrete_function._func_graph.variables = bound_variables # pylint: disable=protected-access if bound_inputs: for bound_input, internal_capture in zip( bound_inputs, concrete_function.inputs[-len(bound_inputs):]): if ds_values.is_distributed_variable(bound_input): concrete_function.graph.capture_distributed_variable( bound_input, internal_capture) else: concrete_function.graph._captures[ops.tensor_id(bound_input)] = ( # pylint: disable=protected-access bound_input, internal_capture) if internal_capture.dtype == dtypes.resource: if resource_variable_ops.is_resource_variable(bound_input): try: handle = bound_input.handle except ValueError: # For mirrored variables we'll copy handle data for components # as they get captured. pass else: custom_gradient.copy_handle_data(handle, internal_capture) else: custom_gradient.copy_handle_data(bound_input, internal_capture) # Setting "captures" first means "capture" won't create a new # placeholder for this input. concrete_function.graph.capture(bound_input) def _get_tensor_from_node(self, node_id): """Resolves a node id into a tensor to be captured for a function.""" with ops.init_scope(): obj = self._nodes[node_id] if ds_values.is_distributed_variable(obj): return obj elif resource_variable_ops.is_resource_variable(obj): return obj.handle elif isinstance(obj, tracking.TrackableAsset): return obj.asset_path elif tensor_util.is_tensor(obj): return obj elif isinstance(obj, tracking.CapturableResource): # Note: this executes restored functions in the CapturableResource. return obj.resource_handle raise ValueError("Can't convert node %s to tensor" % (type(obj))) def _load_all(self): """Load all saved objects and wire their properties.""" # Maps from node ids to recreated objects nodes = {} # Maps from node ids to setter functions (same signature as setattr) for # setting dependencies. node_setters = {} # Figure out which objects are slot variables. These objects are created # with Optimizer.add_slot rather than _recreate_variable. slot_variable_node_ids = set() for proto in self._proto.nodes: for slot_variable_proto in proto.slot_variables: slot_variable_node_ids.add(slot_variable_proto.slot_variable_node_id) # Re-create everything except slot variables. for node_id, proto in enumerate(self._proto.nodes): if node_id in slot_variable_node_ids: # Defer recreating slot variables so we can use the public Optimizer # interface. continue node, setter = self._recreate(proto) nodes[node_id] = node node_setters[node_id] = setter # Now that we have created the variables being optimized, we have enough # information to re-create slot variables for them. for node_id, proto in enumerate(self._proto.nodes): optimizer_object = nodes[node_id] for slot_variable_proto in proto.slot_variables: optimized_variable = nodes[ slot_variable_proto.original_variable_node_id] slot_variable = optimizer_object.add_slot( var=optimized_variable, slot_name=slot_variable_proto.slot_name) nodes[slot_variable_proto.slot_variable_node_id] = slot_variable node_setters[slot_variable_proto.slot_variable_node_id] = setattr self._nodes = [] # After creating the objects, construct the edges between the objects. for node_id, object_proto in enumerate(self._proto.nodes): obj = nodes[node_id] setter = node_setters[node_id] self._nodes.append(obj) for reference in object_proto.children: setter(obj, reference.local_name, nodes[reference.node_id]) # Note: if an object has an attribute `__call__` add a class method # that allows `obj()` syntax to work. This is done per-instance to # allow `callable` to be used to find out if an object is callable. if reference.local_name == "__call__" and not callable(obj): setattr(type(obj), "__call__", _call_attribute) def _restore_checkpoint(self): """Load state from checkpoint into the deserialized objects.""" variables_path = saved_model_utils.get_variables_path(self._export_dir) # TODO(andresp): Clean use of private methods of TrackableSaver. # pylint: disable=protected-access saver = util.TrackableSaver(graph_view.ObjectGraphView(self.get(0))) with ops.device("CPU"): saver._file_prefix_placeholder = constant_op.constant(variables_path) load_status = saver.restore(variables_path) load_status.assert_existing_objects_matched() checkpoint = load_status._checkpoint # When running in eager mode, the `restore` call above has already run and # restored the state of trackables, call `position.restore_ops()` will # return an empty list as there is nothing left to do. In graph mode, that # will return the list of ops that must run to restore the object on that # position. We have to wire them in the initializers of the objects so that # they get initialized properly when using common practices (e.g. the ones # used by ManagedSession) without further user action. for object_id, obj in dict(checkpoint.object_by_proto_id).items(): position = base.CheckpointPosition(checkpoint=checkpoint, proto_id=object_id) restore_ops = position.restore_ops() if restore_ops: if resource_variable_ops.is_resource_variable(obj): obj._initializer_op = restore_ops else: raise NotImplementedError( ("Missing functionality to restore state of object " "%r from the checkpoint." % obj)) def get(self, node_id): return self._nodes[node_id] def _recreate(self, proto): """Creates a Python object from a SavedObject protocol buffer.""" factory = { "user_object": lambda: self._recreate_user_object(proto.user_object), "asset": lambda: self._recreate_asset(proto.asset), "function": lambda: self._recreate_function(proto.function), "bare_concrete_function": functools.partial( self._recreate_bare_concrete_function, proto.bare_concrete_function), "variable": lambda: self._recreate_variable(proto.variable), "constant": lambda: self._recreate_constant(proto.constant), "resource": lambda: self._recreate_resource(proto.resource), } kind = proto.WhichOneof("kind") if kind not in factory: raise ValueError("Unknown SavedObject type: %r" % kind) return factory[kind]() def _recreate_user_object(self, proto): """Instantiates a SavedUserObject.""" looked_up = revived_types.deserialize(proto) if looked_up is None: return self._recreate_base_user_object(proto) return looked_up def _recreate_base_user_object(self, proto): del proto # Note: each user object has its own class. This allows to make each one # individually callable by adding a `__call__` method to the classes of # the objects instances that have a `__call__` property. class _UserObject(tracking.AutoTrackable): pass return _UserObject(), setattr def _recreate_asset(self, proto): filename = os.path.join( saved_model_utils.get_assets_dir(self._export_dir), self._asset_file_def[proto.asset_file_def_index].filename) return tracking.TrackableAsset(filename), setattr def _recreate_function(self, proto): return function_deserialization.recreate_function( proto, self._concrete_functions), setattr def _recreate_bare_concrete_function(self, proto): return function_deserialization.setup_bare_concrete_function( proto, self._concrete_functions), setattr def _recreate_variable(self, proto): name = proto.name if proto.name else None if name is not None: dbg_name = name else: dbg_name = "<variable loaded from saved model>" synchronization, aggregation, trainable = ( variables.validate_synchronization_aggregation_trainable( proto.synchronization, proto.aggregation, proto.trainable, name=dbg_name)) def uninitialized_variable_creator(next_creator, **kwargs): """A variable creator that creates uninitialized variables.""" del next_creator return resource_variable_ops.UninitializedVariable(**kwargs) # Create a variable_creator_scope that creates uninitialized variables with # a lower priority such that a potential distributed variable_creator_scope # can take precedence. with ops.get_default_graph()._variable_creator_scope( # pylint: disable=protected-access uninitialized_variable_creator, priority=50): return variables.Variable( shape=proto.shape, dtype=proto.dtype, name=name, trainable=trainable, synchronization=synchronization, aggregation=aggregation), setattr def _recreate_constant(self, proto): tensor_proto = self._operation_attributes[proto.operation]["value"].tensor ndarray = tensor_util.MakeNdarray(tensor_proto) if dtypes.as_dtype(tensor_proto.dtype) == dtypes.string: with ops.device("CPU"): imported_constant = constant_op.constant(ndarray) else: imported_constant = constant_op.constant(ndarray) return imported_constant, setattr def _recreate_resource(self, proto): return _RestoredResource(device=proto.device), setattr # TODO(b/124205571,b/124092991): Solve destruction of resources. class _RestoredResource(tracking.TrackableResource): """Restored SavedResource.""" def __init__(self, device=""): super(_RestoredResource, self).__init__(device=device) self._destroy_resource_fn = None def _create_resource(self): raise RuntimeError() def _initialize(self): raise RuntimeError() @property def _destroy_resource(self): return self._destroy_resource_fn @_destroy_resource.setter def _destroy_resource(self, destroy_resource_fn): self._resource_deleter = tracking.CapturableResourceDeleter( destroy_resource_fn) self._destroy_resource_fn = destroy_resource_fn def _list_functions_for_serialization(self, unused_serialization_cache): # Overwrite this method to avoid the implementation of # base class to re-wrap the polymorphic functions into # another layer of `tf.function`. functions = { "_create_resource": self._create_resource, "_initialize": self._initialize, } if self._destroy_resource: functions.update(_destroy_resource=self._destroy_resource) return functions def _call_attribute(instance, *args, **kwargs): return instance.__call__(*args, **kwargs) @tf_export("saved_model.load", v1=["saved_model.load_v2"]) def load(export_dir, tags=None): """Load a SavedModel from `export_dir`. Signatures associated with the SavedModel are available as functions: ```python imported = tf.saved_model.load(path) f = imported.signatures["serving_default"] print(f(x=tf.constant([[1.]]))) ``` Objects exported with `tf.saved_model.save` additionally have trackable objects and functions assigned to attributes: ```python exported = tf.train.Checkpoint(v=tf.Variable(3.)) exported.f = tf.function( lambda x: exported.v * x, input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)]) tf.saved_model.save(exported, path) imported = tf.saved_model.load(path) assert 3. == imported.v.numpy() assert 6. == imported.f(x=tf.constant(2.)).numpy() ``` _Loading Keras models_ Keras models are trackable, so they can be saved to SavedModel. The object returned by `tf.saved_model.load` is not a Keras object (i.e. doesn't have `.fit`, `.predict`, etc. methods). A few attributes and functions are still available: `.variables`, `.trainable_variables` and `.__call__`. ```python model = tf.keras.Model(...) tf.saved_model.save(model, path) imported = tf.saved_model.load(path) outputs = imported(inputs) ``` Use `tf.keras.models.load_model` to restore the Keras model. _Importing SavedModels from TensorFlow 1.x_ SavedModels from `tf.estimator.Estimator` or 1.x SavedModel APIs have a flat graph instead of `tf.function` objects. These SavedModels will have functions corresponding to their signatures in the `.signatures` attribute, but also have a `.prune` method which allows you to extract functions for new subgraphs. This is equivalent to importing the SavedModel and naming feeds and fetches in a Session from TensorFlow 1.x. ```python imported = tf.saved_model.load(path_to_v1_saved_model) pruned = imported.prune("x:0", "out:0") pruned(tf.ones([])) ``` See `tf.compat.v1.wrap_function` for details. These SavedModels also have a `.variables` attribute containing imported variables, and a `.graph` attribute representing the whole imported graph. For SavedModels exported from `tf.saved_model.save`, variables are instead assigned to whichever attributes they were assigned before export. Args: export_dir: The SavedModel directory to load from. tags: A tag or sequence of tags identifying the MetaGraph to load. Optional if the SavedModel contains a single MetaGraph, as for those exported from `tf.saved_model.load`. Returns: A trackable object with a `signatures` attribute mapping from signature keys to functions. If the SavedModel was exported by `tf.saved_model.load`, it also points to trackable objects and functions which were attached to the exported object. Raises: ValueError: If `tags` don't match a MetaGraph in the SavedModel. """ return load_internal(export_dir, tags) def load_internal(export_dir, tags=None, loader_cls=Loader): """Loader implementation.""" if tags is not None and not isinstance(tags, set): # Supports e.g. tags=SERVING and tags=[SERVING]. Sets aren't considered # sequences for nest.flatten, so we put those through as-is. tags = nest.flatten(tags) saved_model_proto = loader_impl.parse_saved_model(export_dir) if (len(saved_model_proto.meta_graphs) == 1 and saved_model_proto.meta_graphs[0].HasField("object_graph_def")): meta_graph_def = saved_model_proto.meta_graphs[0] if (tags is not None and set(tags) != set(meta_graph_def.meta_info_def.tags)): raise ValueError( ("The SavedModel at {} has one MetaGraph with tags {}, but got an " "incompatible argument tags={} to tf.saved_model.load. You may omit " "it, pass 'None', or pass matching tags.") .format(export_dir, meta_graph_def.meta_info_def.tags, tags)) object_graph_proto = meta_graph_def.object_graph_def with ops.init_scope(): loader = loader_cls(object_graph_proto, saved_model_proto, export_dir) root = loader.get(0) root.tensorflow_version = meta_graph_def.meta_info_def.tensorflow_version root.tensorflow_git_version = ( meta_graph_def.meta_info_def.tensorflow_git_version) else: with ops.init_scope(): root = load_v1_in_v2.load(export_dir, tags) return root
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/load.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Module that encodes (decodes) nested structures into (from) protos. The intended use is to serialize everything needed to restore a `Function` that was saved into a SavedModel. This may include concrete function inputs and outputs, signatures, function specs, etc. Example use: coder = nested_structure_coder.StructureCoder() # Encode into proto. signature_proto = coder.encode_structure(function.input_signature) # Decode into a Python object. restored_signature = coder.decode_proto(signature_proto) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools import six from tensorflow.core.protobuf import struct_pb2 from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.data.ops import optional_ops from tensorflow.python.distribute import values from tensorflow.python.framework import dtypes from tensorflow.python.framework import indexed_slices from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.util import compat from tensorflow.python.util.compat import collections_abc class NotEncodableError(Exception): """Error raised when a coder cannot encode an object.""" class StructureCoder(object): """Encoder and decoder for nested structures into protos.""" _codecs = [] @classmethod def register_codec(cls, x): cls._codecs.append(x) @classmethod def _get_encoders(cls): return [(c.can_encode, c.do_encode) for c in cls._codecs] @classmethod def _get_decoders(cls): return [(c.can_decode, c.do_decode) for c in cls._codecs] def _map_structure(self, pyobj, coders): for can, do in coders: if can(pyobj): recursion_fn = functools.partial(self._map_structure, coders=coders) return do(pyobj, recursion_fn) raise NotEncodableError( "No encoder for object [%s] of type [%s]." % (str(pyobj), type(pyobj))) def encode_structure(self, nested_structure): """Encodes nested structures composed of encodable types into a proto. Args: nested_structure: Structure to encode. Returns: Encoded proto. Raises: NotEncodableError: For values for which there are no encoders. """ return self._map_structure(nested_structure, self._get_encoders()) def can_encode(self, nested_structure): """Determines whether a nested structure can be encoded into a proto. Args: nested_structure: Structure to encode. Returns: True if the nested structured can be encoded. """ try: self.encode_structure(nested_structure) except NotEncodableError: return False return True def decode_proto(self, proto): """Decodes proto representing a nested structure. Args: proto: Proto to decode. Returns: Decoded structure. Raises: NotEncodableError: For values for which there are no encoders. """ return self._map_structure(proto, self._get_decoders()) class _ListCodec(object): """Codec for lists.""" def can_encode(self, pyobj): return isinstance(pyobj, list) def do_encode(self, list_value, encode_fn): encoded_list = struct_pb2.StructuredValue() encoded_list.list_value.CopyFrom(struct_pb2.ListValue()) for element in list_value: encoded_list.list_value.values.add().CopyFrom(encode_fn(element)) return encoded_list def can_decode(self, value): return value.HasField("list_value") def do_decode(self, value, decode_fn): return [decode_fn(element) for element in value.list_value.values] StructureCoder.register_codec(_ListCodec()) def _is_tuple(obj): return not _is_named_tuple(obj) and isinstance(obj, tuple) def _is_named_tuple(instance): """Returns True iff `instance` is a `namedtuple`. Args: instance: An instance of a Python object. Returns: True if `instance` is a `namedtuple`. """ if not isinstance(instance, tuple): return False return (hasattr(instance, "_fields") and isinstance(instance._fields, collections_abc.Sequence) and all(isinstance(f, six.string_types) for f in instance._fields)) class _TupleCodec(object): """Codec for tuples.""" def can_encode(self, pyobj): return _is_tuple(pyobj) def do_encode(self, tuple_value, encode_fn): encoded_tuple = struct_pb2.StructuredValue() encoded_tuple.tuple_value.CopyFrom(struct_pb2.TupleValue()) for element in tuple_value: encoded_tuple.tuple_value.values.add().CopyFrom(encode_fn(element)) return encoded_tuple def can_decode(self, value): return value.HasField("tuple_value") def do_decode(self, value, decode_fn): return tuple(decode_fn(element) for element in value.tuple_value.values) StructureCoder.register_codec(_TupleCodec()) class _DictCodec(object): """Codec for dicts.""" def can_encode(self, pyobj): return isinstance(pyobj, dict) def do_encode(self, dict_value, encode_fn): encoded_dict = struct_pb2.StructuredValue() encoded_dict.dict_value.CopyFrom(struct_pb2.DictValue()) for key, value in dict_value.items(): encoded_dict.dict_value.fields[key].CopyFrom(encode_fn(value)) return encoded_dict def can_decode(self, value): return value.HasField("dict_value") def do_decode(self, value, decode_fn): return {key: decode_fn(val) for key, val in value.dict_value.fields.items()} StructureCoder.register_codec(_DictCodec()) class _NamedTupleCodec(object): """Codec for namedtuples. Encoding and decoding a namedtuple reconstructs a namedtuple with a different actual Python type, but with same `typename` and `fields`. """ def can_encode(self, pyobj): return _is_named_tuple(pyobj) def do_encode(self, named_tuple_value, encode_fn): encoded_named_tuple = struct_pb2.StructuredValue() encoded_named_tuple.named_tuple_value.CopyFrom(struct_pb2.NamedTupleValue()) encoded_named_tuple.named_tuple_value.name = \ named_tuple_value.__class__.__name__ for key in named_tuple_value._fields: pair = encoded_named_tuple.named_tuple_value.values.add() pair.key = key pair.value.CopyFrom(encode_fn(named_tuple_value._asdict()[key])) return encoded_named_tuple def can_decode(self, value): return value.HasField("named_tuple_value") def do_decode(self, value, decode_fn): key_value_pairs = value.named_tuple_value.values items = [(pair.key, decode_fn(pair.value)) for pair in key_value_pairs] named_tuple_type = collections.namedtuple(value.named_tuple_value.name, [item[0] for item in items]) return named_tuple_type(**dict(items)) StructureCoder.register_codec(_NamedTupleCodec()) class _Float64Codec(object): """Codec for floats.""" def can_encode(self, pyobj): return isinstance(pyobj, float) def do_encode(self, float64_value, encode_fn): del encode_fn value = struct_pb2.StructuredValue() value.float64_value = float64_value return value def can_decode(self, value): return value.HasField("float64_value") def do_decode(self, value, decode_fn): del decode_fn return value.float64_value StructureCoder.register_codec(_Float64Codec()) class _Int64Codec(object): """Codec for Python integers (limited to 64 bit values).""" def can_encode(self, pyobj): return not isinstance(pyobj, bool) and isinstance(pyobj, int) def do_encode(self, int_value, encode_fn): del encode_fn value = struct_pb2.StructuredValue() value.int64_value = int_value return value def can_decode(self, value): return value.HasField("int64_value") def do_decode(self, value, decode_fn): del decode_fn return int(value.int64_value) StructureCoder.register_codec(_Int64Codec()) class _StringCodec(object): """Codec for strings. See StructuredValue.string_value in proto/struct.proto for more detailed explanation. """ def can_encode(self, pyobj): return isinstance(pyobj, str) def do_encode(self, string_value, encode_fn): del encode_fn value = struct_pb2.StructuredValue() value.string_value = string_value return value def can_decode(self, value): return value.HasField("string_value") def do_decode(self, value, decode_fn): del decode_fn return compat.as_str(value.string_value) StructureCoder.register_codec(_StringCodec()) class _NoneCodec(object): """Codec for None.""" def can_encode(self, pyobj): return pyobj is None def do_encode(self, none_value, encode_fn): del encode_fn, none_value value = struct_pb2.StructuredValue() value.none_value.CopyFrom(struct_pb2.NoneValue()) return value def can_decode(self, value): return value.HasField("none_value") def do_decode(self, value, decode_fn): del decode_fn, value return None StructureCoder.register_codec(_NoneCodec()) class _BoolCodec(object): """Codec for booleans.""" def can_encode(self, pyobj): return isinstance(pyobj, bool) def do_encode(self, bool_value, encode_fn): del encode_fn value = struct_pb2.StructuredValue() value.bool_value = bool_value return value def can_decode(self, value): return value.HasField("bool_value") def do_decode(self, value, decode_fn): del decode_fn return value.bool_value StructureCoder.register_codec(_BoolCodec()) class _TensorShapeCodec(object): """Codec for `TensorShape`.""" def can_encode(self, pyobj): return isinstance(pyobj, tensor_shape.TensorShape) def do_encode(self, tensor_shape_value, encode_fn): del encode_fn encoded_tensor_shape = struct_pb2.StructuredValue() encoded_tensor_shape.tensor_shape_value.CopyFrom( tensor_shape_value.as_proto()) return encoded_tensor_shape def can_decode(self, value): return value.HasField("tensor_shape_value") def do_decode(self, value, decode_fn): del decode_fn return tensor_shape.TensorShape(value.tensor_shape_value) StructureCoder.register_codec(_TensorShapeCodec()) class _TensorTypeCodec(object): """Codec for `TensorType`.""" def can_encode(self, pyobj): return isinstance(pyobj, dtypes.DType) def do_encode(self, tensor_dtype_value, encode_fn): del encode_fn encoded_tensor_type = struct_pb2.StructuredValue() encoded_tensor_type.tensor_dtype_value = tensor_dtype_value.as_datatype_enum return encoded_tensor_type def can_decode(self, value): return value.HasField("tensor_dtype_value") def do_decode(self, value, decode_fn): del decode_fn return dtypes.DType(value.tensor_dtype_value) StructureCoder.register_codec(_TensorTypeCodec()) class _TensorSpecCodec(object): """Codec for `TensorSpec`.""" def can_encode(self, pyobj): return isinstance(pyobj, tensor_spec.TensorSpec) def do_encode(self, tensor_spec_value, encode_fn): encoded_tensor_spec = struct_pb2.StructuredValue() encoded_tensor_spec.tensor_spec_value.CopyFrom( struct_pb2.TensorSpecProto( shape=encode_fn(tensor_spec_value.shape).tensor_shape_value, dtype=encode_fn(tensor_spec_value.dtype).tensor_dtype_value, name=tensor_spec_value.name)) return encoded_tensor_spec def can_decode(self, value): return value.HasField("tensor_spec_value") def do_decode(self, value, decode_fn): name = value.tensor_spec_value.name return tensor_spec.TensorSpec( shape=decode_fn( struct_pb2.StructuredValue( tensor_shape_value=value.tensor_spec_value.shape)), dtype=decode_fn( struct_pb2.StructuredValue( tensor_dtype_value=value.tensor_spec_value.dtype)), name=(name if name else None)) StructureCoder.register_codec(_TensorSpecCodec()) class _TypeSpecCodec(object): """Codec for `tf.TypeSpec`.""" # Mapping from enum value to type (TypeSpec subclass). TYPE_SPEC_CLASS_FROM_PROTO = { struct_pb2.TypeSpecProto.SPARSE_TENSOR_SPEC: sparse_tensor.SparseTensorSpec, struct_pb2.TypeSpecProto.INDEXED_SLICES_SPEC: indexed_slices.IndexedSlicesSpec, struct_pb2.TypeSpecProto.RAGGED_TENSOR_SPEC: ragged_tensor.RaggedTensorSpec, struct_pb2.TypeSpecProto.TENSOR_ARRAY_SPEC: tensor_array_ops.TensorArraySpec, struct_pb2.TypeSpecProto.DATA_DATASET_SPEC: dataset_ops.DatasetSpec, struct_pb2.TypeSpecProto.DATA_ITERATOR_SPEC: iterator_ops.IteratorSpec, struct_pb2.TypeSpecProto.OPTIONAL_SPEC: optional_ops.OptionalSpec, struct_pb2.TypeSpecProto.PER_REPLICA_SPEC: values.PerReplicaSpec, } # Mapping from type (TypeSpec subclass) to enum value. TYPE_SPEC_CLASS_TO_PROTO = dict( (cls, enum) for (enum, cls) in TYPE_SPEC_CLASS_FROM_PROTO.items()) def can_encode(self, pyobj): # pylint: disable=unidiomatic-typecheck return type(pyobj) in self.TYPE_SPEC_CLASS_TO_PROTO def do_encode(self, type_spec_value, encode_fn): """Returns an encoded proto for the given `tf.TypeSpec`.""" type_spec_class = self.TYPE_SPEC_CLASS_TO_PROTO[type(type_spec_value)] type_state = type_spec_value._serialize() # pylint: disable=protected-access encoded_type_spec = struct_pb2.StructuredValue() encoded_type_spec.type_spec_value.CopyFrom( struct_pb2.TypeSpecProto( type_spec_class=type_spec_class, type_state=encode_fn(type_state), type_spec_class_name=type(type_spec_value).__name__)) return encoded_type_spec def can_decode(self, value): return value.HasField("type_spec_value") def do_decode(self, value, decode_fn): """Returns the `tf.TypeSpec` encoded by the proto `value`.""" type_spec_proto = value.type_spec_value type_spec_class_enum = type_spec_proto.type_spec_class if type_spec_class_enum not in self.TYPE_SPEC_CLASS_FROM_PROTO: raise ValueError( "The type '%s' is not supported by this version of TensorFlow. " "(The object you are loading must have been created with a newer " "version of TensorFlow.)" % type_spec_proto.type_spec_class_name) type_spec_class = self.TYPE_SPEC_CLASS_FROM_PROTO[type_spec_class_enum] # pylint: disable=protected-access return type_spec_class._deserialize(decode_fn(type_spec_proto.type_state)) StructureCoder.register_codec(_TypeSpecCodec())
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/nested_structure_coder.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tools for serializing `Function`s.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.protobuf import saved_object_graph_pb2 from tensorflow.python.framework import func_graph as func_graph_module from tensorflow.python.saved_model import nested_structure_coder def _serialize_function_spec(function_spec, coder): """Serialize a FunctionSpec object into its proto representation.""" if function_spec.is_method and not function_spec.fullargspec.args: raise NotImplementedError( "Missing support to serialize a method function without a named " "'self' argument.") proto = saved_object_graph_pb2.FunctionSpec() proto.fullargspec.CopyFrom(coder.encode_structure(function_spec.fullargspec)) proto.is_method = function_spec.is_method proto.input_signature.CopyFrom( coder.encode_structure(function_spec.input_signature)) return proto def serialize_concrete_function(concrete_function, node_ids, coder): """Build a SavedConcreteFunction.""" bound_inputs = [] try: for capture in concrete_function.captured_inputs: bound_inputs.append(node_ids[capture]) except KeyError: raise KeyError( "Failed to add concrete function %s to object based saved model as it " "captures tensor %s which is unsupported or not reachable from root. " "One reason could be that a stateful object or a variable that the " "function depends on is not assigned to an attribute of the serialized " "trackable object " "(see SaveTest.test_captures_unreachable_variable)." % (concrete_function.name, capture)) concrete_function_proto = saved_object_graph_pb2.SavedConcreteFunction() structured_outputs = func_graph_module.convert_structure_to_signature( concrete_function.structured_outputs) concrete_function_proto.canonicalized_input_signature.CopyFrom( coder.encode_structure(concrete_function.structured_input_signature)) concrete_function_proto.output_signature.CopyFrom( coder.encode_structure(structured_outputs)) concrete_function_proto.bound_inputs.extend(bound_inputs) return concrete_function_proto def serialize_bare_concrete_function(concrete_function): """Build a SavedBareConcreteFunction.""" # pylint: disable=protected-access return saved_object_graph_pb2.SavedBareConcreteFunction( concrete_function_name=concrete_function.name, allowed_positional_arguments=concrete_function._num_positional_args, argument_keywords=concrete_function._arg_keywords) # pylint: enable=protected-access def serialize_function(function): """Build a SavedFunction proto.""" coder = nested_structure_coder.StructureCoder() proto = saved_object_graph_pb2.SavedFunction() function_spec_proto = _serialize_function_spec(function.function_spec, coder) proto.function_spec.CopyFrom(function_spec_proto) all_concrete_functions = \ function._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access for concrete_function in all_concrete_functions: proto.concrete_functions.append(concrete_function.name) return proto
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/function_serialization.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SignatureDef utility functions. Utility functions for building and inspecting SignatureDef protos. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def from tensorflow.python.saved_model.signature_def_utils_impl import classification_signature_def from tensorflow.python.saved_model.signature_def_utils_impl import is_valid_signature from tensorflow.python.saved_model.signature_def_utils_impl import load_op_from_signature_def from tensorflow.python.saved_model.signature_def_utils_impl import op_signature_def from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def from tensorflow.python.saved_model.signature_def_utils_impl import regression_signature_def from tensorflow.python.saved_model.signature_def_utils_impl import supervised_eval_signature_def from tensorflow.python.saved_model.signature_def_utils_impl import supervised_train_signature_def # pylint: enable=unused-import del absolute_import del division del print_function
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/signature_def_utils.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Convenience functions to save a model. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.saved_model import builder from tensorflow.python.saved_model import constants from tensorflow.python.saved_model import loader from tensorflow.python.saved_model import main_op from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.saved_model import tag_constants from tensorflow.python.saved_model import utils from tensorflow.python.saved_model.load import load from tensorflow.python.saved_model.save import save # pylint: enable=unused-import # pylint: disable=wildcard-import from tensorflow.python.saved_model.simple_save import * # pylint: enable=wildcard-import
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/saved_model.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tools for deserializing `Function`s.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import re from tensorflow.core.framework import function_pb2 from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import function as function_lib from tensorflow.python.framework import func_graph as func_graph_lib from tensorflow.python.framework import function_def_to_graph as function_def_lib from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import type_spec from tensorflow.python.ops import resource_variable_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import nested_structure_coder from tensorflow.python.util import compat from tensorflow.python.util import nest from tensorflow.python.util import tf_decorator from tensorflow.python.util import tf_inspect def _is_tensor(t): return isinstance(t, (ops.Tensor, resource_variable_ops.BaseResourceVariable)) def _call_concrete_function(function, inputs): """Calls a restored Function with structured inputs. This differs from `function.__call__` in that inputs and outputs are structured and that it casts inputs to tensors if needed. Note: this does not checks that non-tensor inputs match. That should be done before via `_concrete_function_callable_with`. Args: function: ConcreteFunction to call. inputs: Structured inputs compatible with `function.graph.structured_input_signature`. Returns: The structured function output. """ expected_structure = function.graph.structured_input_signature flatten_inputs = nest.flatten_up_to( expected_structure, inputs, expand_composites=True) flatten_expected = nest.flatten(expected_structure, expand_composites=True) tensor_inputs = [] for arg, expected in zip(flatten_inputs, flatten_expected): if isinstance(expected, tensor_spec.TensorSpec): tensor_inputs.append( ops.convert_to_tensor(arg, dtype_hint=expected.dtype)) result = function._call_flat(tensor_inputs, function._captured_inputs) # pylint: disable=protected-access if isinstance(result, ops.Operation): return None return result def _try_convert_to_tensor_spec(arg, dtype_hint): """Returns None or TensorSpec obtained if `arg` is converted to tensor.""" try: # Note: try conversion in a FuncGraph to avoid poluting current context. with func_graph_lib.FuncGraph(name="guess_conversion").as_default(): result = ops.convert_to_tensor(arg, dtype_hint=dtype_hint) return tensor_spec.TensorSpec(shape=result.shape, dtype=result.dtype) except (TypeError, ValueError): return None def _concrete_function_callable_with(function, inputs, allow_conversion): """Returns whether concrete `function` can be called with `inputs`.""" expected_structure = function.graph.structured_input_signature try: flatten_inputs = nest.flatten_up_to(expected_structure, inputs) except (TypeError, ValueError): return False try: # Verify that no input elements were dropped during flattening. repacked = nest.pack_sequence_as(expected_structure, flatten_inputs) # TODO(b/129422719): Namedtuple subclasses re-created through # saved_model.load don't compare equal in type to the original in # assert_same_structure. Fix that and we can take out check_types=False # here. nest.assert_same_structure(inputs, repacked, check_types=False) except (TypeError, ValueError): return False for arg, expected in zip(flatten_inputs, nest.flatten(expected_structure)): if isinstance(expected, tensor_spec.TensorSpec): if allow_conversion: arg = _try_convert_to_tensor_spec(arg, dtype_hint=expected.dtype) if not _is_tensor(arg) and not isinstance(arg, tensor_spec.TensorSpec): return False if arg.dtype != expected.dtype: return False if not expected.shape.is_compatible_with(arg.shape): return False elif isinstance(expected, type_spec.TypeSpec): return expected.is_compatible_with(arg) elif (_is_tensor(arg) and id(arg) != id(expected)) or (not _is_tensor(arg) and arg != expected): return False return True def _deserialize_function_spec_as_nonmethod(function_spec_proto, coder): """Deserialize a FunctionSpec object from its proto representation.""" typeless_fullargspec = coder.decode_proto(function_spec_proto.fullargspec) # Convert a method function into a non method. if function_spec_proto.is_method: if not typeless_fullargspec.args: raise NotImplementedError( "Missing support to deserialize a method function without a named " "'self' argument.") args = typeless_fullargspec.args[1:] else: args = typeless_fullargspec.args fullargspec = tf_inspect.FullArgSpec( args=args, varargs=typeless_fullargspec.varargs, varkw=typeless_fullargspec.varkw, defaults=typeless_fullargspec.defaults, kwonlyargs=typeless_fullargspec.kwonlyargs, kwonlydefaults=typeless_fullargspec.kwonlydefaults, annotations=typeless_fullargspec.annotations) input_signature = coder.decode_proto(function_spec_proto.input_signature) return function_lib.FunctionSpec(fullargspec=fullargspec, is_method=False, args_to_prepend=[], kwargs_to_include={}, input_signature=input_signature) # TODO(allenl): The fact that we can't derive ConcreteFunction calling # conventions from the serialized input spec right now is unfortunate. Merging # these would be good, maybe by adding TensorSpec names to cache keys so renamed # keyword arguments would yield different ConcreteFunctions. def setup_bare_concrete_function(saved_bare_concrete_function, concrete_functions): """Makes a restored bare concrete function callable.""" # Bare concrete functions accept only flat lists of Tensors with unique # names. concrete_function = concrete_functions[ saved_bare_concrete_function.concrete_function_name] # pylint: disable=protected-access concrete_function._arg_keywords = ( saved_bare_concrete_function.argument_keywords) concrete_function._num_positional_args = ( saved_bare_concrete_function.allowed_positional_arguments) # pylint: enable=protected-access concrete_function.add_to_graph() return concrete_function class RestoredFunction(def_function.Function): """Wrapper class for a function that has been restored from saved state. See `def_function.Function`. """ def __init__(self, python_function, name, function_spec, concrete_functions): # TODO(mdan): We may enable autograph once exceptions are supported. super(RestoredFunction, self).__init__( python_function, name, autograph=False) self.concrete_functions = concrete_functions self._function_spec = function_spec def _list_all_concrete_functions_for_serialization(self): return self.concrete_functions def _defun_with_scope(self, scope): func = super(RestoredFunction, self)._defun_with_scope(scope) func._function_spec = self._function_spec # pylint: disable=protected-access return func def recreate_function(saved_function, concrete_functions): """Creates a `Function` from a `SavedFunction`. Args: saved_function: `SavedFunction` proto. concrete_functions: map from function name to `ConcreteFunction`. Returns: A `Function`. """ # TODO(andresp): Construct a `Function` with the cache populated # instead of creating a new `Function` backed by a Python layer to # glue things together. Current approach is nesting functions deeper for each # serialization cycle. coder = nested_structure_coder.StructureCoder() # Note: handling method functions is tricky since make_decorator does not # allows control of "ismethod". Additionally since restored functions do # not behave as methods i.e. they always use the same captured tensors # independent of the object they are bound to, there is little value on # propagating that correctly. # # Ideally this conversion should happen at serialization time. But since # there are SavedModels which have "ismethod" populated and have an extra # argument that they expect to be ignored, we do it at deserialization. function_spec = _deserialize_function_spec_as_nonmethod( saved_function.function_spec, coder) def restored_function_body(*args, **kwargs): """Calls a restored function.""" # This is the format of function.graph.structured_input_signature. At this # point, the args and kwargs have already been canonicalized. inputs = (args, kwargs) # First try to find a concrete function that can be called without input # conversions. This allows one to pick a more specific trace in case there # was also a more expensive one that supported tensors. for allow_conversion in [False, True]: for function_name in saved_function.concrete_functions: function = concrete_functions[function_name] if _concrete_function_callable_with(function, inputs, allow_conversion): return _call_concrete_function(function, inputs) signature_descriptions = [] def _pretty_format_positional(positional): return "Positional arguments ({} total):\n * {}".format( len(positional), "\n * ".join([str(a) for a in positional])) for index, function_name in enumerate(saved_function.concrete_functions): concrete_function = concrete_functions[function_name] positional, keyword = concrete_function.structured_input_signature signature_descriptions.append( "Option {}:\n {}\n Keyword arguments: {}" .format(index + 1, _pretty_format_positional(positional), keyword)) raise ValueError( "Could not find matching function to call loaded from the SavedModel. " "Got:\n {}\n Keyword arguments: {}\n\nExpected " "these arguments to match one of the following {} option(s):\n\n{}" .format(_pretty_format_positional(args), kwargs, len(saved_function.concrete_functions), "\n\n".join(signature_descriptions))) concrete_function_objects = [] for concrete_function_name in saved_function.concrete_functions: concrete_function_objects.append(concrete_functions[concrete_function_name]) restored_function = RestoredFunction( restored_function_body, restored_function_body.__name__, function_spec, concrete_function_objects) return tf_decorator.make_decorator( restored_function_body, restored_function, decorator_argspec=function_spec.fullargspec) def load_function_def_library(library, load_shared_name_suffix=None): """Load a set of functions as concrete functions without captured inputs. Functions names are manipulated during load such that they do not overlap with previously created ones. Args: library: FunctionDefLibrary proto message. load_shared_name_suffix: If specified, used to uniquify shared names. Otherwise a unique name is generated. Returns: Map of original function names in the library to instances of `ConcreteFunction` without captured inputs. Raises: ValueError: if functions dependencies have a cycle. """ library_function_names = set(fdef.signature.name for fdef in library.function) functions = {} if load_shared_name_suffix is None: load_shared_name_suffix = "_load_{}".format(ops.uid()) for fdef in _sort_function_defs(library, library_function_names): copy = _fix_fdef(fdef, functions, load_shared_name_suffix) # There is no need to copy all functions into the function def graph. It # leads to a O(n^2) increase of memory when importing functions and the # extra function definitions are a no-op since they already imported as a # function before and passed in explicitly (due to the topologic sort # import). func_graph = function_def_lib.function_def_to_graph( copy, copy_functions=False) for dep in _list_function_deps(fdef, library_function_names): functions[dep].add_to_graph(func_graph) func = function_lib.ConcreteFunction(func_graph) func.add_to_graph() if context.executing_eagerly(): func.add_to_graph(ops.get_default_graph()) functions[fdef.signature.name] = func # Also register the gradients in the current root context. with ops.init_scope(): func._register_delayed_rewrite_gradient() # pylint: disable=protected-access return functions def _sort_function_defs(library, library_function_names): """Return a topologic sort of FunctionDefs in a library.""" edges = collections.defaultdict(list) in_count = collections.defaultdict(lambda: 0) for fdef in library.function: for dep in _list_function_deps(fdef, library_function_names): edges[dep].append(fdef.signature.name) in_count[fdef.signature.name] += 1 ready = [ fdef.signature.name for fdef in library.function if in_count[fdef.signature.name] == 0 ] output = [] while ready: node = ready.pop() output.append(node) for dest in edges[node]: in_count[dest] -= 1 if not in_count[dest]: ready.append(dest) if len(output) != len(library.function): failed_to_resolve = sorted(set(in_count.keys()) - set(output)) raise ValueError("There is a cyclic-dependency between functions. ", "Could not resolve %r." % (failed_to_resolve,)) reverse = {fdef.signature.name: fdef for fdef in library.function} return [reverse[x] for x in output] def fix_node_def(node_def, functions, shared_name_suffix, debug_name): """Replace functions calls and shared names in `node_def`.""" if "_gradient_op_type" in node_def.attr: if node_def.op in ["StatefulPartitionedCall", "PartitionedCall"]: # TODO(andresp): This code assumes that the gradient registered for this # function call is the default gradient for the function and not a # custom one. fname = node_def.attr["f"].func.name gradient_name = functions[fname]._register_delayed_rewrite_gradient() # pylint: disable=protected-access node_def.attr["_gradient_op_type"].s = compat.as_bytes(gradient_name) else: logging.warning("Importing a function (%s) with ops with custom " "gradients. Will likely fail if a gradient is " "requested.", debug_name) if node_def.op in functions: node_def.op = functions[node_def.op].name for _, attr_value in node_def.attr.items(): if attr_value.func.name: attr_value.func.name = functions[attr_value.func.name].name # Fix old table creation bug. if node_def.op == "HashTableV2": if ("use_node_name_sharing" not in node_def.attr or not node_def.attr["use_node_name_sharing"].b): node_def.attr["use_node_name_sharing"].b = True # We are turning on node mame sharing, so have to make sure we don't # accidentally share a table resource. shared_name_suffix += "_{}".format(ops.uid()) # TODO(b/124205571): Avoid accidental sharing and destruction of restored # resources. For now uniquify "shared_name" when loading functions to avoid # sharing. if "shared_name" in node_def.attr: if node_def.attr["shared_name"].s: node_def.attr["shared_name"].s += compat.as_bytes(shared_name_suffix) else: # Blank shared_name attributes would use the node name, so we'll start # with that when uniquifying. node_def.attr["shared_name"].s = ( compat.as_bytes(node_def.name) + compat.as_bytes(shared_name_suffix)) def _fix_fdef(orig_fdef, functions, shared_name_suffix): """Fixes a FunctionDef proto to be loaded in current context. In particular, when loading a function library into an eager context, one must rename the functions to avoid conflicts with existent functions. Args: orig_fdef: FunctionDef proto to fix. It is not modified. functions: map from function name to a ConcreteFunction instance. shared_name_suffix: A unique string for this load which helps to avoid `shared_name` collisions across loads. Two functions from the same load using the same `shared_name` still need to share, but functions from different loads with the same `shared_name` should not. Returns: A fixed copy of the original FunctionDef. """ fdef = function_pb2.FunctionDef() fdef.CopyFrom(orig_fdef) for node_def in fdef.node_def: fix_node_def(node_def, functions, shared_name_suffix, fdef.signature.name) fdef.signature.name = _clean_function_name(fdef.signature.name) return fdef def _list_function_deps(fdef, library_function_names): """Find functions referenced in `fdef`.""" # TODO(andresp): Recurse into list attributes and into NameAttrList attrs both # when listing deps and when fixing them. `function_def_to_graph` also # requires fixes. deps = set() for node_def in fdef.node_def: if node_def.op in library_function_names: deps.add(node_def.op) else: for _, attr_value in node_def.attr.items(): if attr_value.WhichOneof("value") == "func": deps.add(attr_value.func.name) return deps def _clean_function_name(name): """Vanity function to keep the function names comprehensible.""" # Note: each time a function is wrapped into `function_lib.ConcreteFunction` # its name becomes "__inference_<orig>_xyz". match = re.search(r"^__inference_(.*)_\d+$", name) if match: return match.group(1) else: return name
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/function_deserialization.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SavedModel simple save functionality.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.saved_model import loader from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import simple_save from tensorflow.python.saved_model import tag_constants class SimpleSaveTest(test.TestCase): def _init_and_validate_variable(self, sess, variable_name, variable_value): v = variables.Variable(variable_value, name=variable_name) self.evaluate(variables.global_variables_initializer()) self.assertEqual(variable_value, self.evaluate(v)) return v def _check_variable_info(self, actual_variable, expected_variable): self.assertEqual(actual_variable.name, expected_variable.name) self.assertEqual(actual_variable.dtype, expected_variable.dtype) self.assertEqual(len(actual_variable.shape), len(expected_variable.shape)) for i in range(len(actual_variable.shape)): self.assertEqual(actual_variable.shape[i], expected_variable.shape[i]) def _check_tensor_info(self, actual_tensor_info, expected_tensor): self.assertEqual(actual_tensor_info.name, expected_tensor.name) self.assertEqual(actual_tensor_info.dtype, expected_tensor.dtype) self.assertEqual( len(actual_tensor_info.tensor_shape.dim), len(expected_tensor.shape)) for i in range(len(actual_tensor_info.tensor_shape.dim)): self.assertEqual(actual_tensor_info.tensor_shape.dim[i].size, expected_tensor.shape[i]) @test_util.run_deprecated_v1 def testSimpleSave(self): """Test simple_save that uses the default parameters.""" export_dir = os.path.join(test.get_temp_dir(), "test_simple_save") # Initialize input and output variables and save a prediction graph using # the default parameters. with self.session(graph=ops.Graph()) as sess: var_x = self._init_and_validate_variable(sess, "var_x", 1) var_y = self._init_and_validate_variable(sess, "var_y", 2) inputs = {"x": var_x} outputs = {"y": var_y} simple_save.simple_save(sess, export_dir, inputs, outputs) # Restore the graph with a valid tag and check the global variables and # signature def map. with self.session(graph=ops.Graph()) as sess: graph = loader.load(sess, [tag_constants.SERVING], export_dir) collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) # Check value and metadata of the saved variables. self.assertEqual(len(collection_vars), 2) self.assertEqual(1, collection_vars[0].eval()) self.assertEqual(2, collection_vars[1].eval()) self._check_variable_info(collection_vars[0], var_x) self._check_variable_info(collection_vars[1], var_y) # Check that the appropriate signature_def_map is created with the # default key and method name, and the specified inputs and outputs. signature_def_map = graph.signature_def self.assertEqual(1, len(signature_def_map)) self.assertEqual(signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, list(signature_def_map.keys())[0]) signature_def = signature_def_map[ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] self.assertEqual(signature_constants.PREDICT_METHOD_NAME, signature_def.method_name) self.assertEqual(1, len(signature_def.inputs)) self._check_tensor_info(signature_def.inputs["x"], var_x) self.assertEqual(1, len(signature_def.outputs)) self._check_tensor_info(signature_def.outputs["y"], var_y) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/simple_save_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Import a TF v1-style SavedModel when executing eagerly.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from tensorflow.python.eager import context from tensorflow.python.eager import lift_to_graph from tensorflow.python.eager import wrap_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import function_deserialization from tensorflow.python.saved_model import loader_impl from tensorflow.python.saved_model import signature_serialization from tensorflow.python.training import monitored_session from tensorflow.python.training import saver as tf_saver from tensorflow.python.training.tracking import tracking class _Initializer(tracking.CapturableResource): """Represents an initialization operation restored from a SavedModel. Without this object re-export of imported 1.x SavedModels would omit the original SavedModel's initialization procedure. Created when `tf.saved_model.load` loads a TF 1.x-style SavedModel with an initialization op. This object holds a function which runs the initialization. It does not require any manual user intervention; `tf.saved_model.save` will see this object and automatically add it to the exported SavedModel, and `tf.saved_model.load` runs the initialization function automatically. """ def __init__(self, init_fn, asset_paths): super(_Initializer, self).__init__() self._asset_paths = asset_paths self._init_fn = init_fn def _create_resource(self): return array_ops.placeholder( dtype=dtypes.resource, shape=[], name="unused_resource") def _initialize(self): return self._init_fn(*[path.asset_path for path in self._asset_paths]) class _EagerSavedModelLoader(loader_impl.SavedModelLoader): """Loads a SavedModel without using Sessions.""" def get_meta_graph_def_from_tags(self, tags): """Override to support implicit one-MetaGraph loading with tags=None.""" if tags is None: if len(self._saved_model.meta_graphs) != 1: tag_sets = [mg.meta_info_def.tags for mg in self._saved_model.meta_graphs] raise ValueError( ("Importing a SavedModel with tf.saved_model.load requires a " "'tags=' argument if there is more than one MetaGraph. Got " "'tags=None', but there are {} MetaGraphs in the SavedModel with " "tag sets {}. Pass a 'tags=' argument to load this SavedModel.") .format(len(self._saved_model.meta_graphs), tag_sets)) return self._saved_model.meta_graphs[0] return super(_EagerSavedModelLoader, self).get_meta_graph_def_from_tags( tags) def load_graph(self, returns, meta_graph_def): """Called from wrap_function to import `meta_graph_def`.""" # pylint: disable=protected-access saver, _ = tf_saver._import_meta_graph_with_return_elements( meta_graph_def) # pylint: enable=protected-access returns[0] = saver def restore_variables(self, wrapped, saver): """Restores variables from the checkpoint.""" if saver is not None: saver_def = saver.saver_def filename_tensor = wrapped.graph.as_graph_element( saver_def.filename_tensor_name) # We both feed and fetch filename_tensor so we have an operation to use to # feed into variable initializers (only relevant for v1 graph building). restore_fn = wrapped.prune( feeds=[filename_tensor], fetches=[filename_tensor, wrapped.graph.as_graph_element(saver_def.restore_op_name)]) initializer, _ = restore_fn(constant_op.constant(self._variables_path)) if not ops.executing_eagerly_outside_functions(): # Add the initialization operation to the table initializers collection # in case we don't have any lifted variables to attach it to. There # isn't another great place to put it. ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, initializer) one_unlifted = False for variable in wrapped.graph.get_collection_ref( ops.GraphKeys.GLOBAL_VARIABLES): if variable.graph is wrapped.graph: one_unlifted = True # pylint: disable=protected-access variable._initializer_op = initializer # pylint: enable=protected-access if one_unlifted: logging.warning( "Some variables could not be lifted out of a loaded function. " "Run the tf.initializers.tables_initializer() operation to " "restore these variables.") def _extract_signatures(self, wrapped, meta_graph_def): """Creates ConcreteFunctions for signatures in `meta_graph_def`.""" signature_functions = {} for signature_key, signature_def in meta_graph_def.signature_def.items(): if signature_def.inputs: input_names, input_specs = zip(*signature_def.inputs.items()) else: input_names = [] input_specs = [] # TODO(allenl): Support optional arguments feeds = [wrapped.graph.as_graph_element(inp.name) for inp in input_specs] fetches = {name: out for name, out in signature_def.outputs.items()} try: signature_fn = wrapped.prune(feeds=feeds, fetches=fetches) except lift_to_graph.UnliftableError as ex: # Mutate the exception to add a bit more detail. args = ex.args if not args: message = "" else: message = args[0] message = ( ("A SavedModel signature needs an input for each placeholder the " "signature's outputs use. An output for signature '{}' depends on " "a placeholder which is not an input (i.e. the placeholder is not " "fed a value).\n\n").format(signature_key) + message) ex.args = (message,) + args[1:] raise # pylint: disable=protected-access signature_fn._arg_keywords = input_names if len(input_names) == 1: # Allowing positional arguments does not create any ambiguity if there's # only one. signature_fn._num_positional_args = 1 else: signature_fn._num_positional_args = 0 # pylint: enable=protected-access signature_functions[signature_key] = signature_fn return signature_functions def load(self, tags): """Creates an object from the MetaGraph identified by `tags`.""" meta_graph_def = self.get_meta_graph_def_from_tags(tags) load_shared_name_suffix = "_load_{}".format(ops.uid()) functions = function_deserialization.load_function_def_library( meta_graph_def.graph_def.library, load_shared_name_suffix=load_shared_name_suffix) # Replace existing functions in the MetaGraphDef with renamed functions so # we don't have duplicates or name collisions. meta_graph_def.graph_def.library.Clear() for function in functions.values(): meta_graph_def.graph_def.library.function.add().CopyFrom( function.function_def) # We've renamed functions and shared names. We need the same operation on # the GraphDef itself for consistency. for node_def in meta_graph_def.graph_def.node: function_deserialization.fix_node_def(node_def, functions, load_shared_name_suffix, debug_name="MetaGraph import") load_graph_returns = [None] wrapped = wrap_function.wrap_function( functools.partial(self.load_graph, load_graph_returns, meta_graph_def), signature=[]) saver, = load_graph_returns self.restore_variables(wrapped, saver) with wrapped.graph.as_default(): init_op = loader_impl.get_init_op( meta_graph_def) or monitored_session.Scaffold.default_local_init_op() # Add a dummy Tensor we know we can fetch to add control dependencies to. init_anchor = constant_op.constant(0., name="dummy_fetch") root = tracking.AutoTrackable() asset_feed_tensors = [] asset_paths = [] for tensor_name, value in loader_impl.get_asset_tensors( self._export_dir, meta_graph_def).items(): asset_feed_tensors.append(wrapped.graph.as_graph_element(tensor_name)) asset_paths.append(tracking.TrackableAsset(value)) init_fn = wrapped.prune( feeds=asset_feed_tensors, fetches=[init_anchor, wrapped.graph.as_graph_element(init_op)]) initializer = _Initializer(init_fn, asset_paths) # pylint: disable=protected-access local_init_op, _ = initializer._initialize() # pylint: enable=protected-access with ops.init_scope(): if not context.executing_eagerly(): ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, local_init_op) for variable in wrapped.graph.get_collection_ref( ops.GraphKeys.LOCAL_VARIABLES): # pylint: disable=protected-access variable._initializer_op = local_init_op # pylint: enable=protected-access root.initializer = initializer root.asset_paths = asset_paths signature_functions = self._extract_signatures(wrapped, meta_graph_def) root.signatures = signature_serialization.create_signature_map( signature_functions) root.variables = list(wrapped.graph.variables) root.tensorflow_version = ( meta_graph_def.meta_info_def.tensorflow_version) root.tensorflow_git_version = ( meta_graph_def.meta_info_def.tensorflow_git_version) root.graph = wrapped.graph root.prune = wrapped.prune return root def load(export_dir, tags): """Load a v1-style SavedModel as an object.""" loader = _EagerSavedModelLoader(export_dir) return loader.load(tags=tags)
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/load_v1_in_v2.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utils for managing different mode strings used by Keras and Estimator models. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.util.compat import collections_abc class KerasModeKeys(object): """Standard names for model modes. The following standard keys are defined: * `TRAIN`: training/fitting mode. * `TEST`: testing/evaluation mode. * `PREDICT`: prediction/inference mode. """ TRAIN = 'train' TEST = 'test' PREDICT = 'predict' # TODO(kathywu): Remove copy in Estimator after nightlies class EstimatorModeKeys(object): """Standard names for Estimator model modes. The following standard keys are defined: * `TRAIN`: training/fitting mode. * `EVAL`: testing/evaluation mode. * `PREDICT`: predication/inference mode. """ TRAIN = 'train' EVAL = 'eval' PREDICT = 'infer' def is_predict(mode): return mode in [KerasModeKeys.PREDICT, EstimatorModeKeys.PREDICT] def is_eval(mode): return mode in [KerasModeKeys.TEST, EstimatorModeKeys.EVAL] def is_train(mode): return mode in [KerasModeKeys.TRAIN, EstimatorModeKeys.TRAIN] class ModeKeyMap(collections_abc.Mapping): """Map using ModeKeys as keys. This class creates an immutable mapping from modes to values. For example, SavedModel export of Keras and Estimator models use this to map modes to their corresponding MetaGraph tags/SignatureDef keys. Since this class uses modes, rather than strings, as keys, both "predict" (Keras's PREDICT ModeKey) and "infer" (Estimator's PREDICT ModeKey) map to the same value. """ def __init__(self, **kwargs): self._internal_dict = {} self._keys = [] for key in kwargs: self._keys.append(key) dict_key = self._get_internal_key(key) if dict_key in self._internal_dict: raise ValueError( 'Error creating ModeKeyMap. Multiple keys/values found for {} mode.' .format(dict_key)) self._internal_dict[dict_key] = kwargs[key] def _get_internal_key(self, key): """Return keys used for the internal dictionary.""" if is_train(key): return KerasModeKeys.TRAIN if is_eval(key): return KerasModeKeys.TEST if is_predict(key): return KerasModeKeys.PREDICT raise ValueError('Invalid mode key: {}.'.format(key)) def __getitem__(self, key): return self._internal_dict[self._get_internal_key(key)] def __iter__(self): return iter(self._keys) def __len__(self): return len(self._keys)
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/model_utils/mode_keys.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for creating SavedModels.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os import time import six from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.saved_model import tag_constants from tensorflow.python.saved_model.model_utils import export_output as export_output_lib from tensorflow.python.saved_model.model_utils import mode_keys from tensorflow.python.saved_model.model_utils.mode_keys import KerasModeKeys as ModeKeys from tensorflow.python.util import compat # Mapping of the modes to appropriate MetaGraph tags in the SavedModel. EXPORT_TAG_MAP = mode_keys.ModeKeyMap(**{ ModeKeys.PREDICT: [tag_constants.SERVING], ModeKeys.TRAIN: [tag_constants.TRAINING], ModeKeys.TEST: [tag_constants.EVAL]}) # For every exported mode, a SignatureDef map should be created using the # functions `export_outputs_for_mode` and `build_all_signature_defs`. By # default, this map will contain a single Signature that defines the input # tensors and output predictions, losses, and/or metrics (depending on the mode) # The default keys used in the SignatureDef map are defined below. SIGNATURE_KEY_MAP = mode_keys.ModeKeyMap(**{ ModeKeys.PREDICT: signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, ModeKeys.TRAIN: signature_constants.DEFAULT_TRAIN_SIGNATURE_DEF_KEY, ModeKeys.TEST: signature_constants.DEFAULT_EVAL_SIGNATURE_DEF_KEY}) # Default names used in the SignatureDef input map, which maps strings to # TensorInfo protos. SINGLE_FEATURE_DEFAULT_NAME = 'feature' SINGLE_RECEIVER_DEFAULT_NAME = 'input' SINGLE_LABEL_DEFAULT_NAME = 'label' ### Below utilities are specific to SavedModel exports. def build_all_signature_defs(receiver_tensors, export_outputs, receiver_tensors_alternatives=None, serving_only=True): """Build `SignatureDef`s for all export outputs. Args: receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying input nodes where this receiver expects to be fed by default. Typically, this is a single placeholder expecting serialized `tf.Example` protos. export_outputs: a dict of ExportOutput instances, each of which has an as_signature_def instance method that will be called to retrieve the signature_def for all export output tensors. receiver_tensors_alternatives: a dict of string to additional groups of receiver tensors, each of which may be a `Tensor` or a dict of string to `Tensor`. These named receiver tensor alternatives generate additional serving signatures, which may be used to feed inputs at different points within the input receiver subgraph. A typical usage is to allow feeding raw feature `Tensor`s *downstream* of the tf.io.parse_example() op. Defaults to None. serving_only: boolean; if true, resulting signature defs will only include valid serving signatures. If false, all requested signatures will be returned. Returns: signature_def representing all passed args. Raises: ValueError: if export_outputs is not a dict """ if not isinstance(receiver_tensors, dict): receiver_tensors = {SINGLE_RECEIVER_DEFAULT_NAME: receiver_tensors} if export_outputs is None or not isinstance(export_outputs, dict): raise ValueError('export_outputs must be a dict and not' '{}'.format(type(export_outputs))) signature_def_map = {} excluded_signatures = {} for output_key, export_output in export_outputs.items(): signature_name = '{}'.format(output_key or 'None') try: signature = export_output.as_signature_def(receiver_tensors) signature_def_map[signature_name] = signature except ValueError as e: excluded_signatures[signature_name] = str(e) if receiver_tensors_alternatives: for receiver_name, receiver_tensors_alt in ( six.iteritems(receiver_tensors_alternatives)): if not isinstance(receiver_tensors_alt, dict): receiver_tensors_alt = { SINGLE_RECEIVER_DEFAULT_NAME: receiver_tensors_alt } for output_key, export_output in export_outputs.items(): signature_name = '{}:{}'.format(receiver_name or 'None', output_key or 'None') try: signature = export_output.as_signature_def(receiver_tensors_alt) signature_def_map[signature_name] = signature except ValueError as e: excluded_signatures[signature_name] = str(e) _log_signature_report(signature_def_map, excluded_signatures) # The above calls to export_output_lib.as_signature_def should return only # valid signatures; if there is a validity problem, they raise a ValueError, # in which case we exclude that signature from signature_def_map above. # The is_valid_signature check ensures that the signatures produced are # valid for serving, and acts as an additional sanity check for export # signatures produced for serving. We skip this check for training and eval # signatures, which are not intended for serving. if serving_only: signature_def_map = { k: v for k, v in signature_def_map.items() if signature_def_utils.is_valid_signature(v) } return signature_def_map _FRIENDLY_METHOD_NAMES = { signature_constants.CLASSIFY_METHOD_NAME: 'Classify', signature_constants.REGRESS_METHOD_NAME: 'Regress', signature_constants.PREDICT_METHOD_NAME: 'Predict', signature_constants.SUPERVISED_TRAIN_METHOD_NAME: 'Train', signature_constants.SUPERVISED_EVAL_METHOD_NAME: 'Eval', } def _log_signature_report(signature_def_map, excluded_signatures): """Log a report of which signatures were produced.""" sig_names_by_method_name = collections.defaultdict(list) # We'll collect whatever method_names are present, but also we want to make # sure to output a line for each of the three standard methods even if they # have no signatures. for method_name in _FRIENDLY_METHOD_NAMES: sig_names_by_method_name[method_name] = [] for signature_name, sig in signature_def_map.items(): sig_names_by_method_name[sig.method_name].append(signature_name) # TODO(b/67733540): consider printing the full signatures, not just names for method_name, sig_names in sig_names_by_method_name.items(): if method_name in _FRIENDLY_METHOD_NAMES: method_name = _FRIENDLY_METHOD_NAMES[method_name] logging.info('Signatures INCLUDED in export for {}: {}'.format( method_name, sig_names if sig_names else 'None')) if excluded_signatures: logging.info('Signatures EXCLUDED from export because they cannot be ' 'be served via TensorFlow Serving APIs:') for signature_name, message in excluded_signatures.items(): logging.info('\'{}\' : {}'.format(signature_name, message)) if not signature_def_map: logging.warn('Export includes no signatures!') elif (signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in signature_def_map): logging.warn('Export includes no default signature!') # When we create a timestamped directory, there is a small chance that the # directory already exists because another process is also creating these # directories. In this case we just wait one second to get a new timestamp and # try again. If this fails several times in a row, then something is seriously # wrong. MAX_DIRECTORY_CREATION_ATTEMPTS = 10 def get_timestamped_export_dir(export_dir_base): """Builds a path to a new subdirectory within the base directory. Each export is written into a new subdirectory named using the current time. This guarantees monotonically increasing version numbers even across multiple runs of the pipeline. The timestamp used is the number of seconds since epoch UTC. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name. """ attempts = 0 while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS: timestamp = int(time.time()) result_dir = os.path.join( compat.as_bytes(export_dir_base), compat.as_bytes(str(timestamp))) if not gfile.Exists(result_dir): # Collisions are still possible (though extremely unlikely): this # directory is not actually created yet, but it will be almost # instantly on return from this function. return result_dir time.sleep(1) attempts += 1 logging.warn('Directory {} already exists; retrying (attempt {}/{})'.format( result_dir, attempts, MAX_DIRECTORY_CREATION_ATTEMPTS)) raise RuntimeError('Failed to obtain a unique export directory name after ' '{} attempts.'.format(MAX_DIRECTORY_CREATION_ATTEMPTS)) def get_temp_export_dir(timestamped_export_dir): """Builds a directory name based on the argument but starting with 'temp-'. This relies on the fact that TensorFlow Serving ignores subdirectories of the base directory that can't be parsed as integers. Args: timestamped_export_dir: the name of the eventual export directory, e.g. /foo/bar/<timestamp> Returns: A sister directory prefixed with 'temp-', e.g. /foo/bar/temp-<timestamp>. """ (dirname, basename) = os.path.split(timestamped_export_dir) temp_export_dir = os.path.join( compat.as_bytes(dirname), compat.as_bytes('temp-{}'.format(basename))) return temp_export_dir def export_outputs_for_mode( mode, serving_export_outputs=None, predictions=None, loss=None, metrics=None): """Util function for constructing a `ExportOutput` dict given a mode. The returned dict can be directly passed to `build_all_signature_defs` helper function as the `export_outputs` argument, used for generating a SignatureDef map. Args: mode: A `ModeKeys` specifying the mode. serving_export_outputs: Describes the output signatures to be exported to `SavedModel` and used during serving. Should be a dict or None. predictions: A dict of Tensors or single Tensor representing model predictions. This argument is only used if serving_export_outputs is not set. loss: A dict of Tensors or single Tensor representing calculated loss. metrics: A dict of (metric_value, update_op) tuples, or a single tuple. metric_value must be a Tensor, and update_op must be a Tensor or Op Returns: Dictionary mapping the a key to an `tf.estimator.export.ExportOutput` object The key is the expected SignatureDef key for the mode. Raises: ValueError: if an appropriate ExportOutput cannot be found for the mode. """ if mode not in SIGNATURE_KEY_MAP: raise ValueError( 'Export output type not found for mode: {}. Expected one of: {}.\n' 'One likely error is that V1 Estimator Modekeys were somehow passed to ' 'this function. Please ensure that you are using the new ModeKeys.' .format(mode, SIGNATURE_KEY_MAP.keys())) signature_key = SIGNATURE_KEY_MAP[mode] if mode_keys.is_predict(mode): return get_export_outputs(serving_export_outputs, predictions) elif mode_keys.is_train(mode): return {signature_key: export_output_lib.TrainOutput( loss=loss, predictions=predictions, metrics=metrics)} else: return {signature_key: export_output_lib.EvalOutput( loss=loss, predictions=predictions, metrics=metrics)} def get_export_outputs(export_outputs, predictions): """Validate export_outputs or create default export_outputs. Args: export_outputs: Describes the output signatures to be exported to `SavedModel` and used during serving. Should be a dict or None. predictions: Predictions `Tensor` or dict of `Tensor`. Returns: Valid export_outputs dict Raises: TypeError: if export_outputs is not a dict or its values are not ExportOutput instances. """ if export_outputs is None: default_output = export_output_lib.PredictOutput(predictions) export_outputs = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: default_output} if not isinstance(export_outputs, dict): raise TypeError('export_outputs must be dict, given: {}'.format( export_outputs)) for v in six.itervalues(export_outputs): if not isinstance(v, export_output_lib.ExportOutput): raise TypeError( 'Values in export_outputs must be ExportOutput objects. ' 'Given: {}'.format(export_outputs)) _maybe_add_default_serving_output(export_outputs) return export_outputs def _maybe_add_default_serving_output(export_outputs): """Add a default serving output to the export_outputs if not present. Args: export_outputs: Describes the output signatures to be exported to `SavedModel` and used during serving. Should be a dict. Returns: export_outputs dict with default serving signature added if necessary Raises: ValueError: if multiple export_outputs were provided without a default serving key. """ if len(export_outputs) == 1: (key, value), = export_outputs.items() if key != signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: export_outputs[ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = value if len(export_outputs) > 1: if (signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in export_outputs): raise ValueError( 'Multiple export_outputs were provided, but none of them is ' 'specified as the default. Do this by naming one of them with ' 'signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.') return export_outputs
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/model_utils/export_utils.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for export.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import tensor_shape_pb2 from tensorflow.core.framework import types_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.keras import metrics as metrics_module from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.platform import test from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model.model_utils import export_output as export_output_lib class ExportOutputTest(test.TestCase): def test_regress_value_must_be_float(self): with context.graph_mode(): value = array_ops.placeholder(dtypes.string, 1, name='output-tensor-1') with self.assertRaisesRegexp( ValueError, 'Regression output value must be a float32 Tensor'): export_output_lib.RegressionOutput(value) def test_classify_classes_must_be_strings(self): with context.graph_mode(): classes = array_ops.placeholder(dtypes.float32, 1, name='output-tensor-1') with self.assertRaisesRegexp( ValueError, 'Classification classes must be a string Tensor'): export_output_lib.ClassificationOutput(classes=classes) def test_classify_scores_must_be_float(self): with context.graph_mode(): scores = array_ops.placeholder(dtypes.string, 1, name='output-tensor-1') with self.assertRaisesRegexp( ValueError, 'Classification scores must be a float32 Tensor'): export_output_lib.ClassificationOutput(scores=scores) def test_classify_requires_classes_or_scores(self): with self.assertRaisesRegexp( ValueError, 'At least one of scores and classes must be set.'): export_output_lib.ClassificationOutput() def test_build_standardized_signature_def_regression(self): with context.graph_mode(): input_tensors = { 'input-1': array_ops.placeholder( dtypes.string, 1, name='input-tensor-1') } value = array_ops.placeholder(dtypes.float32, 1, name='output-tensor-1') export_output = export_output_lib.RegressionOutput(value) actual_signature_def = export_output.as_signature_def(input_tensors) expected_signature_def = meta_graph_pb2.SignatureDef() shape = tensor_shape_pb2.TensorShapeProto( dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]) dtype_float = types_pb2.DataType.Value('DT_FLOAT') dtype_string = types_pb2.DataType.Value('DT_STRING') expected_signature_def.inputs[ signature_constants.REGRESS_INPUTS].CopyFrom( meta_graph_pb2.TensorInfo(name='input-tensor-1:0', dtype=dtype_string, tensor_shape=shape)) expected_signature_def.outputs[ signature_constants.REGRESS_OUTPUTS].CopyFrom( meta_graph_pb2.TensorInfo(name='output-tensor-1:0', dtype=dtype_float, tensor_shape=shape)) expected_signature_def.method_name = ( signature_constants.REGRESS_METHOD_NAME) self.assertEqual(actual_signature_def, expected_signature_def) def test_build_standardized_signature_def_classify_classes_only(self): """Tests classification with one output tensor.""" with context.graph_mode(): input_tensors = { 'input-1': array_ops.placeholder( dtypes.string, 1, name='input-tensor-1') } classes = array_ops.placeholder(dtypes.string, 1, name='output-tensor-1') export_output = export_output_lib.ClassificationOutput(classes=classes) actual_signature_def = export_output.as_signature_def(input_tensors) expected_signature_def = meta_graph_pb2.SignatureDef() shape = tensor_shape_pb2.TensorShapeProto( dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]) dtype_string = types_pb2.DataType.Value('DT_STRING') expected_signature_def.inputs[ signature_constants.CLASSIFY_INPUTS].CopyFrom( meta_graph_pb2.TensorInfo(name='input-tensor-1:0', dtype=dtype_string, tensor_shape=shape)) expected_signature_def.outputs[ signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom( meta_graph_pb2.TensorInfo(name='output-tensor-1:0', dtype=dtype_string, tensor_shape=shape)) expected_signature_def.method_name = ( signature_constants.CLASSIFY_METHOD_NAME) self.assertEqual(actual_signature_def, expected_signature_def) def test_build_standardized_signature_def_classify_both(self): """Tests multiple output tensors that include classes and scores.""" with context.graph_mode(): input_tensors = { 'input-1': array_ops.placeholder( dtypes.string, 1, name='input-tensor-1') } classes = array_ops.placeholder(dtypes.string, 1, name='output-tensor-classes') scores = array_ops.placeholder(dtypes.float32, 1, name='output-tensor-scores') export_output = export_output_lib.ClassificationOutput( scores=scores, classes=classes) actual_signature_def = export_output.as_signature_def(input_tensors) expected_signature_def = meta_graph_pb2.SignatureDef() shape = tensor_shape_pb2.TensorShapeProto( dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]) dtype_float = types_pb2.DataType.Value('DT_FLOAT') dtype_string = types_pb2.DataType.Value('DT_STRING') expected_signature_def.inputs[ signature_constants.CLASSIFY_INPUTS].CopyFrom( meta_graph_pb2.TensorInfo(name='input-tensor-1:0', dtype=dtype_string, tensor_shape=shape)) expected_signature_def.outputs[ signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom( meta_graph_pb2.TensorInfo(name='output-tensor-classes:0', dtype=dtype_string, tensor_shape=shape)) expected_signature_def.outputs[ signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom( meta_graph_pb2.TensorInfo(name='output-tensor-scores:0', dtype=dtype_float, tensor_shape=shape)) expected_signature_def.method_name = ( signature_constants.CLASSIFY_METHOD_NAME) self.assertEqual(actual_signature_def, expected_signature_def) def test_build_standardized_signature_def_classify_scores_only(self): """Tests classification without classes tensor.""" with context.graph_mode(): input_tensors = { 'input-1': array_ops.placeholder( dtypes.string, 1, name='input-tensor-1') } scores = array_ops.placeholder(dtypes.float32, 1, name='output-tensor-scores') export_output = export_output_lib.ClassificationOutput( scores=scores) actual_signature_def = export_output.as_signature_def(input_tensors) expected_signature_def = meta_graph_pb2.SignatureDef() shape = tensor_shape_pb2.TensorShapeProto( dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]) dtype_float = types_pb2.DataType.Value('DT_FLOAT') dtype_string = types_pb2.DataType.Value('DT_STRING') expected_signature_def.inputs[ signature_constants.CLASSIFY_INPUTS].CopyFrom( meta_graph_pb2.TensorInfo(name='input-tensor-1:0', dtype=dtype_string, tensor_shape=shape)) expected_signature_def.outputs[ signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom( meta_graph_pb2.TensorInfo(name='output-tensor-scores:0', dtype=dtype_float, tensor_shape=shape)) expected_signature_def.method_name = ( signature_constants.CLASSIFY_METHOD_NAME) self.assertEqual(actual_signature_def, expected_signature_def) def test_predict_outputs_valid(self): """Tests that no errors are raised when provided outputs are valid.""" outputs = { 'output0': constant_op.constant([0]), u'output1': constant_op.constant(['foo']), } export_output_lib.PredictOutput(outputs) # Single Tensor is OK too export_output_lib.PredictOutput(constant_op.constant([0])) def test_predict_outputs_invalid(self): with self.assertRaisesRegexp( ValueError, 'Prediction output key must be a string'): export_output_lib.PredictOutput({1: constant_op.constant([0])}) with self.assertRaisesRegexp( ValueError, 'Prediction output value must be a Tensor'): export_output_lib.PredictOutput({ 'prediction1': sparse_tensor.SparseTensor( indices=[[0, 0]], values=[1], dense_shape=[1, 1]), }) class MockSupervisedOutput(export_output_lib._SupervisedOutput): """So that we can test the abstract class methods directly.""" def _get_signature_def_fn(self): pass class SupervisedOutputTest(test.TestCase): def test_supervised_outputs_valid(self): """Tests that no errors are raised when provided outputs are valid.""" with context.graph_mode(): loss = {'my_loss': constant_op.constant([0])} predictions = {u'output1': constant_op.constant(['foo'])} metric_obj = metrics_module.Mean() metric_obj.update_state(constant_op.constant([0])) metrics = { 'metrics': metric_obj, 'metrics2': (constant_op.constant([0]), constant_op.constant([10])) } outputter = MockSupervisedOutput(loss, predictions, metrics) self.assertEqual(outputter.loss['loss/my_loss'], loss['my_loss']) self.assertEqual( outputter.predictions['predictions/output1'], predictions['output1']) self.assertEqual(outputter.metrics['metrics/update_op'].name, 'metric_op_wrapper:0') self.assertEqual( outputter.metrics['metrics2/update_op'], metrics['metrics2'][1]) # Single Tensor is OK too outputter = MockSupervisedOutput( loss['my_loss'], predictions['output1'], metrics['metrics']) self.assertEqual(outputter.loss, {'loss': loss['my_loss']}) self.assertEqual( outputter.predictions, {'predictions': predictions['output1']}) self.assertEqual(outputter.metrics['metrics/update_op'].name, 'metric_op_wrapper_1:0') def test_supervised_outputs_none(self): outputter = MockSupervisedOutput( constant_op.constant([0]), None, None) self.assertEqual(len(outputter.loss), 1) self.assertEqual(outputter.predictions, None) self.assertEqual(outputter.metrics, None) def test_supervised_outputs_invalid(self): with self.assertRaisesRegexp(ValueError, 'predictions output value must'): MockSupervisedOutput(constant_op.constant([0]), [3], None) with self.assertRaisesRegexp(ValueError, 'loss output value must'): MockSupervisedOutput('str', None, None) with self.assertRaisesRegexp(ValueError, 'metrics output value must'): MockSupervisedOutput(None, None, (15.3, 4)) with self.assertRaisesRegexp(ValueError, 'loss output key must'): MockSupervisedOutput({25: 'Tensor'}, None, None) def test_supervised_outputs_tuples(self): """Tests that no errors are raised when provided outputs are valid.""" with context.graph_mode(): loss = {('my', 'loss'): constant_op.constant([0])} predictions = {(u'output1', '2'): constant_op.constant(['foo'])} metric_obj = metrics_module.Mean() metric_obj.update_state(constant_op.constant([0])) metrics = { ('metrics', '1'): metric_obj, ('metrics', '2'): (constant_op.constant([0]), constant_op.constant([10])) } outputter = MockSupervisedOutput(loss, predictions, metrics) self.assertEqual(set(outputter.loss.keys()), set(['loss/my/loss'])) self.assertEqual(set(outputter.predictions.keys()), set(['predictions/output1/2'])) self.assertEqual( set(outputter.metrics.keys()), set([ 'metrics/1/value', 'metrics/1/update_op', 'metrics/2/value', 'metrics/2/update_op' ])) def test_supervised_outputs_no_prepend(self): """Tests that no errors are raised when provided outputs are valid.""" with context.graph_mode(): loss = {'loss': constant_op.constant([0])} predictions = {u'predictions': constant_op.constant(['foo'])} metric_obj = metrics_module.Mean() metric_obj.update_state(constant_op.constant([0])) metrics = { 'metrics_1': metric_obj, 'metrics_2': (constant_op.constant([0]), constant_op.constant([10])) } outputter = MockSupervisedOutput(loss, predictions, metrics) self.assertEqual(set(outputter.loss.keys()), set(['loss'])) self.assertEqual(set(outputter.predictions.keys()), set(['predictions'])) self.assertEqual( set(outputter.metrics.keys()), set([ 'metrics_1/value', 'metrics_1/update_op', 'metrics_2/update_op', 'metrics_2/value' ])) def test_train_signature_def(self): with context.graph_mode(): loss = {'my_loss': constant_op.constant([0])} predictions = {u'output1': constant_op.constant(['foo'])} metric_obj = metrics_module.Mean() metric_obj.update_state(constant_op.constant([0])) metrics = { 'metrics_1': metric_obj, 'metrics_2': (constant_op.constant([0]), constant_op.constant([10])) } outputter = export_output_lib.TrainOutput(loss, predictions, metrics) receiver = {u'features': constant_op.constant(100, shape=(100, 2)), 'labels': constant_op.constant(100, shape=(100, 1))} sig_def = outputter.as_signature_def(receiver) self.assertTrue('loss/my_loss' in sig_def.outputs) self.assertTrue('metrics_1/value' in sig_def.outputs) self.assertTrue('metrics_2/value' in sig_def.outputs) self.assertTrue('predictions/output1' in sig_def.outputs) self.assertTrue('features' in sig_def.inputs) def test_eval_signature_def(self): with context.graph_mode(): loss = {'my_loss': constant_op.constant([0])} predictions = {u'output1': constant_op.constant(['foo'])} outputter = export_output_lib.EvalOutput(loss, predictions, None) receiver = {u'features': constant_op.constant(100, shape=(100, 2)), 'labels': constant_op.constant(100, shape=(100, 1))} sig_def = outputter.as_signature_def(receiver) self.assertTrue('loss/my_loss' in sig_def.outputs) self.assertFalse('metrics/value' in sig_def.outputs) self.assertTrue('predictions/output1' in sig_def.outputs) self.assertTrue('features' in sig_def.inputs) def test_metric_op_is_tensor(self): """Tests that ops.Operation is wrapped by a tensor for metric_ops.""" with context.graph_mode(): loss = {'my_loss': constant_op.constant([0])} predictions = {u'output1': constant_op.constant(['foo'])} metric_obj = metrics_module.Mean() metric_obj.update_state(constant_op.constant([0])) metrics = { 'metrics_1': metric_obj, 'metrics_2': (constant_op.constant([0]), control_flow_ops.no_op()) } outputter = MockSupervisedOutput(loss, predictions, metrics) self.assertTrue(outputter.metrics['metrics_1/update_op'].name.startswith( 'metric_op_wrapper')) self.assertTrue( isinstance(outputter.metrics['metrics_1/update_op'], ops.Tensor)) self.assertTrue( isinstance(outputter.metrics['metrics_1/value'], ops.Tensor)) self.assertEqual(outputter.metrics['metrics_2/value'], metrics['metrics_2'][0]) self.assertTrue(outputter.metrics['metrics_2/update_op'].name.startswith( 'metric_op_wrapper')) self.assertTrue( isinstance(outputter.metrics['metrics_2/update_op'], ops.Tensor)) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/model_utils/export_output_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utils for saving a Keras Model or Estimator to the SavedModel format.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=wildcard-import from tensorflow.python.saved_model.model_utils.export_output import * from tensorflow.python.saved_model.model_utils.export_utils import build_all_signature_defs from tensorflow.python.saved_model.model_utils.export_utils import export_outputs_for_mode from tensorflow.python.saved_model.model_utils.export_utils import EXPORT_TAG_MAP from tensorflow.python.saved_model.model_utils.export_utils import get_export_outputs from tensorflow.python.saved_model.model_utils.export_utils import get_temp_export_dir from tensorflow.python.saved_model.model_utils.export_utils import get_timestamped_export_dir from tensorflow.python.saved_model.model_utils.export_utils import SIGNATURE_KEY_MAP # pylint: enable=wildcard-import
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/model_utils/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ModeKey Tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.platform import test from tensorflow.python.saved_model.model_utils import mode_keys class ModeKeyMapTest(test.TestCase): def test_map(self): mode_map = mode_keys.ModeKeyMap(**{ mode_keys.KerasModeKeys.PREDICT: 3, mode_keys.KerasModeKeys.TEST: 1 }) # Test dictionary __getitem__ self.assertEqual(3, mode_map[mode_keys.KerasModeKeys.PREDICT]) self.assertEqual(3, mode_map[mode_keys.EstimatorModeKeys.PREDICT]) self.assertEqual(1, mode_map[mode_keys.KerasModeKeys.TEST]) self.assertEqual(1, mode_map[mode_keys.EstimatorModeKeys.EVAL]) with self.assertRaises(KeyError): _ = mode_map[mode_keys.KerasModeKeys.TRAIN] with self.assertRaises(KeyError): _ = mode_map[mode_keys.EstimatorModeKeys.TRAIN] with self.assertRaisesRegexp(ValueError, 'Invalid mode'): _ = mode_map['serve'] # Test common dictionary methods self.assertLen(mode_map, 2) self.assertEqual({1, 3}, set(mode_map.values())) self.assertEqual( {mode_keys.KerasModeKeys.TEST, mode_keys.KerasModeKeys.PREDICT}, set(mode_map.keys())) # Map is immutable with self.assertRaises(TypeError): mode_map[mode_keys.KerasModeKeys.TEST] = 1 def test_invalid_init(self): with self.assertRaisesRegexp(ValueError, 'Multiple keys/values found'): _ = mode_keys.ModeKeyMap(**{ mode_keys.KerasModeKeys.PREDICT: 3, mode_keys.EstimatorModeKeys.PREDICT: 1 }) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/model_utils/mode_keys_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classes for different types of export output.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.saved_model import signature_def_utils class ExportOutput(object): """Represents an output of a model that can be served. These typically correspond to model heads. """ __metaclass__ = abc.ABCMeta _SEPARATOR_CHAR = '/' @abc.abstractmethod def as_signature_def(self, receiver_tensors): """Generate a SignatureDef proto for inclusion in a MetaGraphDef. The SignatureDef will specify outputs as described in this ExportOutput, and will use the provided receiver_tensors as inputs. Args: receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying input nodes that will be fed. """ pass def _check_output_key(self, key, error_label): # For multi-head models, the key can be a tuple. if isinstance(key, tuple): key = self._SEPARATOR_CHAR.join(key) if not isinstance(key, six.string_types): raise ValueError( '{} output key must be a string; got {}.'.format(error_label, key)) return key def _wrap_and_check_outputs( self, outputs, single_output_default_name, error_label=None): """Wraps raw tensors as dicts and checks type. Note that we create a new dict here so that we can overwrite the keys if necessary. Args: outputs: A `Tensor` or a dict of string to `Tensor`. single_output_default_name: A string key for use in the output dict if the provided `outputs` is a raw tensor. error_label: descriptive string for use in error messages. If none, single_output_default_name will be used. Returns: A dict of tensors Raises: ValueError: if the outputs dict keys are not strings or tuples of strings or the values are not Tensors. """ if not isinstance(outputs, dict): outputs = {single_output_default_name: outputs} output_dict = {} for key, value in outputs.items(): error_name = error_label or single_output_default_name key = self._check_output_key(key, error_name) if not isinstance(value, ops.Tensor): raise ValueError( '{} output value must be a Tensor; got {}.'.format( error_name, value)) output_dict[key] = value return output_dict class ClassificationOutput(ExportOutput): """Represents the output of a classification head. Either classes or scores or both must be set. The classes `Tensor` must provide string labels, not integer class IDs. If only classes is set, it is interpreted as providing top-k results in descending order. If only scores is set, it is interpreted as providing a score for every class in order of class ID. If both classes and scores are set, they are interpreted as zipped, so each score corresponds to the class at the same index. Clients should not depend on the order of the entries. """ def __init__(self, scores=None, classes=None): """Constructor for `ClassificationOutput`. Args: scores: A float `Tensor` giving scores (sometimes but not always interpretable as probabilities) for each class. May be `None`, but only if `classes` is set. Interpretation varies-- see class doc. classes: A string `Tensor` giving predicted class labels. May be `None`, but only if `scores` is set. Interpretation varies-- see class doc. Raises: ValueError: if neither classes nor scores is set, or one of them is not a `Tensor` with the correct dtype. """ if (scores is not None and not (isinstance(scores, ops.Tensor) and scores.dtype.is_floating)): raise ValueError('Classification scores must be a float32 Tensor; ' 'got {}'.format(scores)) if (classes is not None and not (isinstance(classes, ops.Tensor) and dtypes.as_dtype(classes.dtype) == dtypes.string)): raise ValueError('Classification classes must be a string Tensor; ' 'got {}'.format(classes)) if scores is None and classes is None: raise ValueError('At least one of scores and classes must be set.') self._scores = scores self._classes = classes @property def scores(self): return self._scores @property def classes(self): return self._classes def as_signature_def(self, receiver_tensors): if len(receiver_tensors) != 1: raise ValueError('Classification input must be a single string Tensor; ' 'got {}'.format(receiver_tensors)) (_, examples), = receiver_tensors.items() if dtypes.as_dtype(examples.dtype) != dtypes.string: raise ValueError('Classification input must be a single string Tensor; ' 'got {}'.format(receiver_tensors)) return signature_def_utils.classification_signature_def( examples, self.classes, self.scores) class RegressionOutput(ExportOutput): """Represents the output of a regression head.""" def __init__(self, value): """Constructor for `RegressionOutput`. Args: value: a float `Tensor` giving the predicted values. Required. Raises: ValueError: if the value is not a `Tensor` with dtype tf.float32. """ if not (isinstance(value, ops.Tensor) and value.dtype.is_floating): raise ValueError('Regression output value must be a float32 Tensor; ' 'got {}'.format(value)) self._value = value @property def value(self): return self._value def as_signature_def(self, receiver_tensors): if len(receiver_tensors) != 1: raise ValueError('Regression input must be a single string Tensor; ' 'got {}'.format(receiver_tensors)) (_, examples), = receiver_tensors.items() if dtypes.as_dtype(examples.dtype) != dtypes.string: raise ValueError('Regression input must be a single string Tensor; ' 'got {}'.format(receiver_tensors)) return signature_def_utils.regression_signature_def(examples, self.value) class PredictOutput(ExportOutput): """Represents the output of a generic prediction head. A generic prediction need not be either a classification or a regression. Named outputs must be provided as a dict from string to `Tensor`, """ _SINGLE_OUTPUT_DEFAULT_NAME = 'output' def __init__(self, outputs): """Constructor for PredictOutput. Args: outputs: A `Tensor` or a dict of string to `Tensor` representing the predictions. Raises: ValueError: if the outputs is not dict, or any of its keys are not strings, or any of its values are not `Tensor`s. """ self._outputs = self._wrap_and_check_outputs( outputs, self._SINGLE_OUTPUT_DEFAULT_NAME, error_label='Prediction') @property def outputs(self): return self._outputs def as_signature_def(self, receiver_tensors): return signature_def_utils.predict_signature_def(receiver_tensors, self.outputs) class _SupervisedOutput(ExportOutput): """Represents the output of a supervised training or eval process.""" __metaclass__ = abc.ABCMeta LOSS_NAME = 'loss' PREDICTIONS_NAME = 'predictions' METRICS_NAME = 'metrics' METRIC_VALUE_SUFFIX = 'value' METRIC_UPDATE_SUFFIX = 'update_op' _loss = None _predictions = None _metrics = None def __init__(self, loss=None, predictions=None, metrics=None): """Constructor for SupervisedOutput (ie, Train or Eval output). Args: loss: dict of Tensors or single Tensor representing calculated loss. predictions: dict of Tensors or single Tensor representing model predictions. metrics: Dict of metric results keyed by name. The values of the dict can be one of the following: (1) instance of `Metric` class. (2) (metric_value, update_op) tuples, or a single tuple. metric_value must be a Tensor, and update_op must be a Tensor or Op. Raises: ValueError: if any of the outputs' dict keys are not strings or tuples of strings or the values are not Tensors (or Operations in the case of update_op). """ if loss is not None: loss_dict = self._wrap_and_check_outputs(loss, self.LOSS_NAME) self._loss = self._prefix_output_keys(loss_dict, self.LOSS_NAME) if predictions is not None: pred_dict = self._wrap_and_check_outputs( predictions, self.PREDICTIONS_NAME) self._predictions = self._prefix_output_keys( pred_dict, self.PREDICTIONS_NAME) if metrics is not None: self._metrics = self._wrap_and_check_metrics(metrics) def _prefix_output_keys(self, output_dict, output_name): """Prepend output_name to the output_dict keys if it doesn't exist. This produces predictable prefixes for the pre-determined outputs of SupervisedOutput. Args: output_dict: dict of string to Tensor, assumed valid. output_name: prefix string to prepend to existing keys. Returns: dict with updated keys and existing values. """ new_outputs = {} for key, val in output_dict.items(): key = self._prefix_key(key, output_name) new_outputs[key] = val return new_outputs def _prefix_key(self, key, output_name): if key.find(output_name) != 0: key = output_name + self._SEPARATOR_CHAR + key return key def _wrap_and_check_metrics(self, metrics): """Handle the saving of metrics. Metrics is either a tuple of (value, update_op), or a dict of such tuples. Here, we separate out the tuples and create a dict with names to tensors. Args: metrics: Dict of metric results keyed by name. The values of the dict can be one of the following: (1) instance of `Metric` class. (2) (metric_value, update_op) tuples, or a single tuple. metric_value must be a Tensor, and update_op must be a Tensor or Op. Returns: dict of output_names to tensors Raises: ValueError: if the dict key is not a string, or the metric values or ops are not tensors. """ if not isinstance(metrics, dict): metrics = {self.METRICS_NAME: metrics} outputs = {} for key, value in metrics.items(): if isinstance(value, tuple): metric_val, metric_op = value else: # value is a keras.Metrics object metric_val = value.result() assert len(value.updates) == 1 # We expect only one update op. metric_op = value.updates[0] key = self._check_output_key(key, self.METRICS_NAME) key = self._prefix_key(key, self.METRICS_NAME) val_name = key + self._SEPARATOR_CHAR + self.METRIC_VALUE_SUFFIX op_name = key + self._SEPARATOR_CHAR + self.METRIC_UPDATE_SUFFIX if not isinstance(metric_val, ops.Tensor): raise ValueError( '{} output value must be a Tensor; got {}.'.format( key, metric_val)) if (not isinstance(metric_op, ops.Tensor) and not isinstance(metric_op, ops.Operation)): raise ValueError( '{} update_op must be a Tensor or Operation; got {}.'.format( key, metric_op)) # We must wrap any ops in a Tensor before export, as the SignatureDef # proto expects tensors only. See b/109740581 metric_op_tensor = metric_op if isinstance(metric_op, ops.Operation): with ops.control_dependencies([metric_op]): metric_op_tensor = constant_op.constant([], name='metric_op_wrapper') outputs[val_name] = metric_val outputs[op_name] = metric_op_tensor return outputs @property def loss(self): return self._loss @property def predictions(self): return self._predictions @property def metrics(self): return self._metrics @abc.abstractmethod def _get_signature_def_fn(self): """Returns a function that produces a SignatureDef given desired outputs.""" pass def as_signature_def(self, receiver_tensors): signature_def_fn = self._get_signature_def_fn() return signature_def_fn( receiver_tensors, self.loss, self.predictions, self.metrics) class TrainOutput(_SupervisedOutput): """Represents the output of a supervised training process. This class generates the appropriate signature def for exporting training output by type-checking and wrapping loss, predictions, and metrics values. """ def _get_signature_def_fn(self): return signature_def_utils.supervised_train_signature_def class EvalOutput(_SupervisedOutput): """Represents the output of a supervised eval process. This class generates the appropriate signature def for exporting eval output by type-checking and wrapping loss, predictions, and metrics values. """ def _get_signature_def_fn(self): return signature_def_utils.supervised_eval_signature_def
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/model_utils/export_output.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for export utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile import time from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.platform import test from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.saved_model.model_utils import export_output from tensorflow.python.saved_model.model_utils import export_utils from tensorflow.python.saved_model.model_utils.mode_keys import KerasModeKeys class ExportTest(test_util.TensorFlowTestCase): @test_util.deprecated_graph_mode_only def test_build_all_signature_defs_without_receiver_alternatives(self): receiver_tensor = array_ops.placeholder(dtypes.string) output_1 = constant_op.constant([1.]) output_2 = constant_op.constant(["2"]) output_3 = constant_op.constant(["3"]) export_outputs = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: export_output.RegressionOutput(value=output_1), "head-2": export_output.ClassificationOutput(classes=output_2), "head-3": export_output.PredictOutput(outputs={ "some_output_3": output_3 }), } signature_defs = export_utils.build_all_signature_defs( receiver_tensor, export_outputs) expected_signature_defs = { "serving_default": signature_def_utils.regression_signature_def(receiver_tensor, output_1), "head-2": signature_def_utils.classification_signature_def(receiver_tensor, output_2, None), "head-3": signature_def_utils.predict_signature_def({ "input": receiver_tensor }, {"some_output_3": output_3}) } self.assertDictEqual(expected_signature_defs, signature_defs) @test_util.deprecated_graph_mode_only def test_build_all_signature_defs_with_dict_alternatives(self): receiver_tensor = array_ops.placeholder(dtypes.string) receiver_tensors_alternative_1 = { "foo": array_ops.placeholder(dtypes.int64), "bar": array_ops.sparse_placeholder(dtypes.float32)} receiver_tensors_alternatives = {"other": receiver_tensors_alternative_1} output_1 = constant_op.constant([1.]) output_2 = constant_op.constant(["2"]) output_3 = constant_op.constant(["3"]) export_outputs = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: export_output.RegressionOutput(value=output_1), "head-2": export_output.ClassificationOutput(classes=output_2), "head-3": export_output.PredictOutput(outputs={ "some_output_3": output_3 }), } signature_defs = export_utils.build_all_signature_defs( receiver_tensor, export_outputs, receiver_tensors_alternatives) expected_signature_defs = { "serving_default": signature_def_utils.regression_signature_def( receiver_tensor, output_1), "head-2": signature_def_utils.classification_signature_def( receiver_tensor, output_2, None), "head-3": signature_def_utils.predict_signature_def( {"input": receiver_tensor}, {"some_output_3": output_3}), "other:head-3": signature_def_utils.predict_signature_def( receiver_tensors_alternative_1, {"some_output_3": output_3}) # Note that the alternatives 'other:serving_default' and # 'other:head-2' are invalid, because regession and classification # signatures must take a single string input. Here we verify that # these invalid signatures are not included in the export_utils. } self.assertDictEqual(expected_signature_defs, signature_defs) @test_util.deprecated_graph_mode_only def test_build_all_signature_defs_with_single_alternatives(self): receiver_tensor = array_ops.placeholder(dtypes.string) receiver_tensors_alternative_1 = array_ops.placeholder(dtypes.int64) receiver_tensors_alternative_2 = array_ops.sparse_placeholder( dtypes.float32) # Note we are passing single Tensors as values of # receiver_tensors_alternatives, where normally that is a dict. # In this case a dict will be created using the default receiver tensor # name "input". receiver_tensors_alternatives = {"other1": receiver_tensors_alternative_1, "other2": receiver_tensors_alternative_2} output_1 = constant_op.constant([1.]) output_2 = constant_op.constant(["2"]) output_3 = constant_op.constant(["3"]) export_outputs = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: export_output.RegressionOutput(value=output_1), "head-2": export_output.ClassificationOutput(classes=output_2), "head-3": export_output.PredictOutput(outputs={ "some_output_3": output_3 }), } signature_defs = export_utils.build_all_signature_defs( receiver_tensor, export_outputs, receiver_tensors_alternatives) expected_signature_defs = { "serving_default": signature_def_utils.regression_signature_def( receiver_tensor, output_1), "head-2": signature_def_utils.classification_signature_def( receiver_tensor, output_2, None), "head-3": signature_def_utils.predict_signature_def( {"input": receiver_tensor}, {"some_output_3": output_3}), "other1:head-3": signature_def_utils.predict_signature_def( {"input": receiver_tensors_alternative_1}, {"some_output_3": output_3}), "other2:head-3": signature_def_utils.predict_signature_def( {"input": receiver_tensors_alternative_2}, {"some_output_3": output_3}) # Note that the alternatives 'other:serving_default' and 'other:head-2' # are invalid, because regession and classification signatures must take # a single string input. Here we verify that these invalid signatures # are not included in the export_utils. } self.assertDictEqual(expected_signature_defs, signature_defs) def test_build_all_signature_defs_export_outputs_required(self): receiver_tensor = constant_op.constant(["11"]) with self.assertRaises(ValueError) as e: export_utils.build_all_signature_defs(receiver_tensor, None) self.assertTrue(str(e.exception).startswith( "export_outputs must be a dict")) def test_get_timestamped_export_dir(self): export_dir_base = tempfile.mkdtemp() + "export/" export_dir_1 = export_utils.get_timestamped_export_dir( export_dir_base) time.sleep(2) export_dir_2 = export_utils.get_timestamped_export_dir( export_dir_base) time.sleep(2) export_dir_3 = export_utils.get_timestamped_export_dir( export_dir_base) # Export directories should be named using a timestamp that is seconds # since epoch. Such a timestamp is 10 digits long. time_1 = os.path.basename(export_dir_1) self.assertEqual(10, len(time_1)) time_2 = os.path.basename(export_dir_2) self.assertEqual(10, len(time_2)) time_3 = os.path.basename(export_dir_3) self.assertEqual(10, len(time_3)) self.assertTrue(int(time_1) < int(time_2)) self.assertTrue(int(time_2) < int(time_3)) @test_util.deprecated_graph_mode_only def test_build_all_signature_defs_serving_only(self): receiver_tensor = {"input": array_ops.placeholder(dtypes.string)} output_1 = constant_op.constant([1.]) export_outputs = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: export_output.PredictOutput(outputs=output_1), "train": export_output.TrainOutput(loss=output_1), } signature_defs = export_utils.build_all_signature_defs( receiver_tensor, export_outputs) expected_signature_defs = { "serving_default": signature_def_utils.predict_signature_def( receiver_tensor, {"output": output_1}) } self.assertDictEqual(expected_signature_defs, signature_defs) signature_defs = export_utils.build_all_signature_defs( receiver_tensor, export_outputs, serving_only=False) expected_signature_defs.update({ "train": signature_def_utils.supervised_train_signature_def( receiver_tensor, loss={"loss": output_1}) }) self.assertDictEqual(expected_signature_defs, signature_defs) @test_util.deprecated_graph_mode_only def test_export_outputs_for_mode(self): predictions = {"predictions": constant_op.constant([1.])} loss = {"loss": constant_op.constant([2.])} metrics = { "metrics": (constant_op.constant([3.]), constant_op.constant([4.]))} expected_metrics = { "metrics/value": metrics["metrics"][0], "metrics/update_op": metrics["metrics"][1] } def _build_export_output(mode): return export_utils.export_outputs_for_mode( mode, None, predictions, loss, metrics) ret = _build_export_output(KerasModeKeys.TRAIN) self.assertIn(signature_constants.DEFAULT_TRAIN_SIGNATURE_DEF_KEY, ret) export_out = ret[signature_constants.DEFAULT_TRAIN_SIGNATURE_DEF_KEY] self.assertIsInstance(export_out, export_output.TrainOutput) self.assertEqual(export_out.predictions, predictions) self.assertEqual(export_out.loss, loss) self.assertEqual(export_out.metrics, expected_metrics) ret = _build_export_output(KerasModeKeys.TEST) self.assertIn(signature_constants.DEFAULT_EVAL_SIGNATURE_DEF_KEY, ret) export_out = ret[signature_constants.DEFAULT_EVAL_SIGNATURE_DEF_KEY] self.assertIsInstance(export_out, export_output.EvalOutput) self.assertEqual(export_out.predictions, predictions) self.assertEqual(export_out.loss, loss) self.assertEqual(export_out.metrics, expected_metrics) ret = _build_export_output(KerasModeKeys.PREDICT) self.assertIn(signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, ret) export_out = ret[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] self.assertIsInstance(export_out, export_output.PredictOutput) self.assertEqual(export_out.outputs, predictions) classes = constant_op.constant(["class5"]) ret = export_utils.export_outputs_for_mode( KerasModeKeys.PREDICT, {"classify": export_output.ClassificationOutput( classes=classes)}) self.assertIn("classify", ret) export_out = ret["classify"] self.assertIsInstance(export_out, export_output.ClassificationOutput) self.assertEqual(export_out.classes, classes) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/saved_model/model_utils/export_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.layers.base.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import numpy as np from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras.engine import base_layer as keras_base_layer from tensorflow.python.keras.engine import input_spec from tensorflow.python.layers import base as base_layers from tensorflow.python.layers import core as core_layers from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import random_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import test class BaseLayerTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def testLayerProperties(self): layer = base_layers.Layer(name='my_layer') self.assertEqual(layer.variables, []) self.assertEqual(layer.trainable_variables, []) self.assertEqual(layer.non_trainable_variables, []) if not context.executing_eagerly(): # updates, losses only supported in GRAPH mode self.assertEqual(layer.updates, []) self.assertEqual(layer.losses, []) self.assertEqual(layer.built, False) layer = base_layers.Layer(name='my_layer', trainable=False) self.assertEqual(layer.trainable, False) @test_util.run_in_graph_and_eager_modes def testInt64Layer(self): layer = base_layers.Layer(name='my_layer', dtype='int64') layer.add_variable('my_var', [2, 2]) self.assertEqual(layer.name, 'my_layer') @test_util.run_in_graph_and_eager_modes def testKerasStyleAddWeight(self): keras_layer = keras_base_layer.Layer(name='keras_layer') with ops.name_scope('foo'): keras_variable = keras_layer.add_variable( 'my_var', [2, 2], initializer=init_ops.zeros_initializer()) self.assertEqual(keras_variable.name, 'foo/my_var:0') with ops.name_scope('baz'): old_style_layer = base_layers.Layer(name='my_layer') # Test basic variable creation. variable = old_style_layer.add_variable( 'my_var', [2, 2], initializer=init_ops.zeros_initializer()) self.assertEqual(variable.name, 'my_layer/my_var:0') with base_layers.keras_style_scope(): layer = base_layers.Layer(name='my_layer') # Test basic variable creation. with ops.name_scope('bar'): variable = layer.add_variable( 'my_var', [2, 2], initializer=init_ops.zeros_initializer()) self.assertEqual(variable.name, 'bar/my_var:0') @test_util.run_in_graph_and_eager_modes def testAddWeight(self): layer = base_layers.Layer(name='my_layer') # Test basic variable creation. variable = layer.add_variable( 'my_var', [2, 2], initializer=init_ops.zeros_initializer()) self.assertEqual(variable.name, 'my_layer/my_var:0') self.assertEqual(layer.variables, [variable]) self.assertEqual(layer.trainable_variables, [variable]) self.assertEqual(layer.non_trainable_variables, []) if not context.executing_eagerly(): self.assertEqual( layer.variables, ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)) # Test non-trainable variable creation. # layer.add_variable should work even outside `build` and `call`. variable_2 = layer.add_variable( 'non_trainable_var', [2, 2], initializer=init_ops.zeros_initializer(), trainable=False) self.assertEqual(layer.variables, [variable, variable_2]) self.assertEqual(layer.trainable_variables, [variable]) self.assertEqual(layer.non_trainable_variables, [variable_2]) if not context.executing_eagerly(): self.assertEqual( len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1) regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3 _ = layer.add_variable( 'reg_var', [2, 2], initializer=init_ops.zeros_initializer(), regularizer=regularizer) self.assertEqual(len(layer.losses), 1) added_variable = [False] # Test that sync `ON_READ` variables are defaulted to be non-trainable. variable_3 = layer.add_variable( 'sync_on_read_var', [2, 2], initializer=init_ops.zeros_initializer(), synchronization=variable_scope.VariableSynchronization.ON_READ, aggregation=variable_scope.VariableAggregation.SUM) self.assertEqual(layer.non_trainable_variables, [variable_2, variable_3]) @def_function.function def function_adds_weight(): if not added_variable[0]: layer.add_variable( 'reg_var_from_function', [2, 2], initializer=init_ops.zeros_initializer(), regularizer=regularizer) added_variable[0] = True function_adds_weight() self.assertEqual(len(layer.losses), 2) def testInvalidTrainableSynchronizationCombination(self): layer = base_layers.Layer(name='my_layer') with self.assertRaisesRegexp( ValueError, 'Synchronization value can be set to ' 'VariableSynchronization.ON_READ only for non-trainable variables. ' 'You have specified trainable=True and ' 'synchronization=VariableSynchronization.ON_READ.'): _ = layer.add_variable( 'v', [2, 2], initializer=init_ops.zeros_initializer(), synchronization=variable_scope.VariableSynchronization.ON_READ, trainable=True) @test_util.run_deprecated_v1 def testReusePartitionedVaraiblesAndRegularizers(self): regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3 partitioner = partitioned_variables.fixed_size_partitioner(3) for reuse in [False, True]: with variable_scope.variable_scope(variable_scope.get_variable_scope(), partitioner=partitioner, reuse=reuse): layer = base_layers.Layer(name='my_layer') _ = layer.add_variable( 'reg_part_var', [4, 4], initializer=init_ops.zeros_initializer(), regularizer=regularizer) self.assertEqual( len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 3) @test_util.run_in_graph_and_eager_modes def testCall(self): class MyLayer(base_layers.Layer): def call(self, inputs): return math_ops.square(inputs) layer = MyLayer(name='my_layer') inputs = random_ops.random_uniform((5,), seed=1) outputs = layer.apply(inputs) self.assertEqual(layer.built, True) if not context.executing_eagerly(): # op is only supported in GRAPH mode self.assertEqual(outputs.op.name, 'my_layer/Square') @test_util.run_in_graph_and_eager_modes def testDeepCopy(self): class MyLayer(base_layers.Layer): def call(self, inputs): return math_ops.square(inputs) layer = MyLayer(name='my_layer') layer._private_tensor = random_ops.random_uniform(()) inputs = random_ops.random_uniform((5,), seed=1) outputs = layer.apply(inputs) self.assertEqual(layer.built, True) if not context.executing_eagerly(): # op only supported in GRAPH mode. self.assertEqual(outputs.op.name, 'my_layer/Square') layer_copy = copy.deepcopy(layer) self.assertEqual(layer_copy.name, layer.name) self.assertEqual(layer_copy._scope.name, layer._scope.name) self.assertEqual(layer_copy._private_tensor, layer._private_tensor) @test_util.run_in_graph_and_eager_modes def testScopeNaming(self): class PrivateLayer(base_layers.Layer): def call(self, inputs): return inputs inputs = random_ops.random_uniform((5,)) default_layer = PrivateLayer() _ = default_layer.apply(inputs) self.assertEqual(default_layer._scope.name, 'private_layer') default_layer1 = PrivateLayer() default_layer1.apply(inputs) self.assertEqual(default_layer1._scope.name, 'private_layer_1') my_layer = PrivateLayer(name='my_layer') my_layer.apply(inputs) self.assertEqual(my_layer._scope.name, 'my_layer') my_layer1 = PrivateLayer(name='my_layer') my_layer1.apply(inputs) self.assertEqual(my_layer1._scope.name, 'my_layer_1') my_layer2 = PrivateLayer(name='my_layer') my_layer2.apply(inputs) self.assertEqual(my_layer2._scope.name, 'my_layer_2') # Name scope shouldn't affect names. with ops.name_scope('some_name_scope'): default_layer2 = PrivateLayer() default_layer2.apply(inputs) self.assertEqual(default_layer2._scope.name, 'private_layer_2') my_layer3 = PrivateLayer(name='my_layer') my_layer3.apply(inputs) self.assertEqual(my_layer3._scope.name, 'my_layer_3') other_layer = PrivateLayer(name='other_layer') other_layer.apply(inputs) self.assertEqual(other_layer._scope.name, 'other_layer') # Variable scope gets added to scope names. with variable_scope.variable_scope('var_scope'): default_layer_scoped = PrivateLayer() default_layer_scoped.apply(inputs) self.assertEqual(default_layer_scoped._scope.name, 'var_scope/private_layer') my_layer_scoped = PrivateLayer(name='my_layer') my_layer_scoped.apply(inputs) self.assertEqual(my_layer_scoped._scope.name, 'var_scope/my_layer') my_layer_scoped1 = PrivateLayer(name='my_layer') my_layer_scoped1.apply(inputs) self.assertEqual(my_layer_scoped1._scope.name, 'var_scope/my_layer_1') @test_util.run_in_graph_and_eager_modes def testInputSpecNdimCheck(self): class CustomerLayer(base_layers.Layer): def __init__(self): super(CustomerLayer, self).__init__() self.input_spec = input_spec.InputSpec(ndim=2) def call(self, inputs): return inputs if not context.executing_eagerly(): layer = CustomerLayer() with self.assertRaisesRegexp(ValueError, r'requires a defined rank'): layer.apply(array_ops.placeholder('int32')) layer = CustomerLayer() with self.assertRaisesRegexp(ValueError, r'expected ndim=2'): layer.apply(constant_op.constant([1])) # Note that we re-create the layer since in Eager mode, input spec checks # only happen on first call. # Works layer = CustomerLayer() layer.apply(constant_op.constant([[1], [2]])) @test_util.run_in_graph_and_eager_modes def testInputSpecMinNdimCheck(self): class CustomerLayer(base_layers.Layer): def __init__(self): super(CustomerLayer, self).__init__() self.input_spec = input_spec.InputSpec(min_ndim=2) def call(self, inputs): return inputs if not context.executing_eagerly(): layer = CustomerLayer() with self.assertRaisesRegexp(ValueError, r'requires a defined rank'): layer.apply(array_ops.placeholder('int32')) layer = CustomerLayer() with self.assertRaisesRegexp(ValueError, r'expected min_ndim=2'): layer.apply(constant_op.constant([1])) # Works layer = CustomerLayer() layer.apply(constant_op.constant([[1], [2]])) layer = CustomerLayer() layer.apply(constant_op.constant([[[1], [2]]])) @test_util.run_in_graph_and_eager_modes def testInputSpecMaxNdimCheck(self): class CustomerLayer(base_layers.Layer): def __init__(self): super(CustomerLayer, self).__init__() self.input_spec = input_spec.InputSpec(max_ndim=2) def call(self, inputs): return inputs if not context.executing_eagerly(): layer = CustomerLayer() with self.assertRaisesRegexp(ValueError, r'requires a defined rank'): layer.apply(array_ops.placeholder('int32')) layer = CustomerLayer() with self.assertRaisesRegexp(ValueError, r'expected max_ndim=2'): layer.apply(constant_op.constant([[[1], [2]]])) # Works layer = CustomerLayer() layer.apply(constant_op.constant([1])) layer = CustomerLayer() layer.apply(constant_op.constant([[1], [2]])) @test_util.run_in_graph_and_eager_modes def testInputSpecDtypeCheck(self): class CustomerLayer(base_layers.Layer): def __init__(self): super(CustomerLayer, self).__init__() self.input_spec = input_spec.InputSpec(dtype='float32') def call(self, inputs): return inputs layer = CustomerLayer() with self.assertRaisesRegexp(ValueError, r'expected dtype=float32'): layer.apply(constant_op.constant(1, dtype=dtypes.int32)) # Works layer = CustomerLayer() layer.apply(constant_op.constant(1.0, dtype=dtypes.float32)) @test_util.run_in_graph_and_eager_modes def testInputSpecAxesCheck(self): class CustomerLayer(base_layers.Layer): def __init__(self): super(CustomerLayer, self).__init__() self.input_spec = input_spec.InputSpec(axes={-1: 2}) def call(self, inputs): return inputs layer = CustomerLayer() with self.assertRaisesRegexp(ValueError, r'expected axis'): layer.apply(constant_op.constant([1, 2, 3])) # Works layer = CustomerLayer() layer.apply(constant_op.constant([1, 2])) layer = CustomerLayer() layer.apply(constant_op.constant([[1, 2], [3, 4], [5, 6]])) @test_util.run_in_graph_and_eager_modes def testInputSpecShapeCheck(self): class CustomerLayer(base_layers.Layer): def __init__(self): super(CustomerLayer, self).__init__() self.input_spec = input_spec.InputSpec(shape=(None, 3)) def call(self, inputs): return inputs layer = CustomerLayer() with self.assertRaisesRegexp(ValueError, r'expected shape'): layer.apply(constant_op.constant([[1, 2]])) # Works layer = CustomerLayer() layer.apply(constant_op.constant([[1, 2, 3], [4, 5, 6]])) @test_util.run_in_graph_and_eager_modes def testNoInputSpec(self): class CustomerLayer(base_layers.Layer): def __init__(self): super(CustomerLayer, self).__init__() self.input_spec = None def call(self, inputs): return inputs layer = CustomerLayer() layer.apply(constant_op.constant(1)) # Works if not context.executing_eagerly(): layer.apply(array_ops.placeholder('int32')) layer.apply(array_ops.placeholder('int32', shape=(2, 3))) @test_util.run_in_graph_and_eager_modes def test_count_params(self): dense = core_layers.Dense(16) dense.build((None, 4)) self.assertEqual(dense.count_params(), 16 * 4 + 16) dense = core_layers.Dense(16) with self.assertRaises(ValueError): dense.count_params() @test_util.run_in_graph_and_eager_modes def testDictInputOutput(self): class DictLayer(base_layers.Layer): def call(self, inputs): return {'l' + key: inputs[key] for key in inputs} layer = DictLayer() if context.executing_eagerly(): i1 = constant_op.constant(3) i2 = constant_op.constant(4.0) result = layer.apply({'abel': i1, 'ogits': i2}) self.assertTrue(isinstance(result, dict)) self.assertEqual(set(['label', 'logits']), set(result.keys())) self.assertEqual(3, result['label'].numpy()) self.assertEqual(4.0, result['logits'].numpy()) else: i1 = array_ops.placeholder('int32') i2 = array_ops.placeholder('float32') result = layer.apply({'abel': i1, 'ogits': i2}) self.assertTrue(isinstance(result, dict)) self.assertEqual(set(['label', 'logits']), set(result.keys())) @test_util.run_deprecated_v1 def testActivityRegularizer(self): regularizer = math_ops.reduce_sum layer = base_layers.Layer(activity_regularizer=regularizer) x = array_ops.placeholder('int32') layer.apply(x) self.assertEqual(len(layer.get_losses_for(x)), 1) def testNameScopeIsConsistentWithVariableScope(self): # Github issue 13429. class MyLayer(base_layers.Layer): def build(self, input_shape): self.my_var = self.add_variable('my_var', (), dtypes.float32) self.built = True def call(self, inputs): return math_ops.multiply(inputs, self.my_var, name='my_op') def _gen_layer(x, name=None): layer = MyLayer(name=name) out = layer.apply(x) return layer, out # unnamed layer with ops.Graph().as_default(): x = array_ops.placeholder(dtypes.float32, (), 'x') layer, op = _gen_layer(x) layer1, op1 = _gen_layer(op) layer2, op2 = _gen_layer(op1) self.assertEqual(layer.my_var.name, 'my_layer/my_var:0') self.assertEqual(op.name, 'my_layer/my_op:0') self.assertEqual(layer1.my_var.name, 'my_layer_1/my_var:0') self.assertEqual(op1.name, 'my_layer_1/my_op:0') self.assertEqual(layer2.my_var.name, 'my_layer_2/my_var:0') self.assertEqual(op2.name, 'my_layer_2/my_op:0') # name starts from zero with ops.Graph().as_default(): x = array_ops.placeholder(dtypes.float32, (), 'x') layer, op = _gen_layer(x, name='name') layer1, op1 = _gen_layer(op, name='name_1') layer2, op2 = _gen_layer(op1, name='name_2') self.assertEqual(layer.my_var.name, 'name/my_var:0') self.assertEqual(op.name, 'name/my_op:0') self.assertEqual(layer1.my_var.name, 'name_1/my_var:0') self.assertEqual(op1.name, 'name_1/my_op:0') self.assertEqual(layer2.my_var.name, 'name_2/my_var:0') self.assertEqual(op2.name, 'name_2/my_op:0') # name starts from one with ops.Graph().as_default(): x = array_ops.placeholder(dtypes.float32, (), 'x') layer, op = _gen_layer(x, name='name_1') layer1, op1 = _gen_layer(op, name='name_2') layer2, op2 = _gen_layer(op1, name='name_3') self.assertEqual(layer.my_var.name, 'name_1/my_var:0') self.assertEqual(op.name, 'name_1/my_op:0') self.assertEqual(layer1.my_var.name, 'name_2/my_var:0') self.assertEqual(op1.name, 'name_2/my_op:0') self.assertEqual(layer2.my_var.name, 'name_3/my_var:0') self.assertEqual(op2.name, 'name_3/my_op:0') def testVariablesAreLiftedFromFunctionBuildingGraphs(self): class MyLayer(base_layers.Layer): def build(self, input_shape): self.my_var = self.add_variable('my_var', (), dtypes.float32) self.built = True def call(self, inputs): return inputs outer_graph = ops.get_default_graph() function_building_graph = ops.Graph() function_building_graph._building_function = True with outer_graph.as_default(): with function_building_graph.as_default(): layer = MyLayer() # Create a variable by invoking build through __call__ and assert that # it is both tracked and lifted into the outer graph. inputs = array_ops.placeholder(dtypes.float32, (), 'inputs') layer.apply(inputs) self.assertEqual(len(layer.variables), 1) self.assertEqual(len(layer.trainable_variables), 1) self.assertEqual(layer.variables[0].graph, outer_graph) @test_util.run_deprecated_v1 def testGetUpdateFor(self): class MyLayer(base_layers.Layer): def build(self, input_shape): self.a = self.add_variable('a', (), dtypes.float32, trainable=False) self.b = self.add_variable('b', (), dtypes.float32, trainable=False) self.add_update(state_ops.assign_add(self.a, 1., name='b_update')) self.built = True def call(self, inputs): self.add_update(state_ops.assign_add(self.a, inputs, name='a_update'), inputs=True) return inputs + 1 layer = MyLayer() inputs = array_ops.placeholder(dtypes.float32, (), 'inputs') intermediate_inputs = inputs + 1 outputs = layer.apply(intermediate_inputs) self.assertEqual(len(layer.updates), 2) self.assertEqual(len(layer.get_updates_for(None)), 1) self.assertEqual(len(layer.get_updates_for([inputs])), 1) self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1) self.assertEqual(len(layer.get_updates_for([outputs])), 0) # Call same layer on new input, creating one more conditional update inputs = array_ops.placeholder(dtypes.float32, (), 'inputs') intermediate_inputs = inputs + 1 outputs = layer.apply(intermediate_inputs) self.assertEqual(len(layer.updates), 3) self.assertEqual(len(layer.get_updates_for(None)), 1) # Check that we are successfully filtering out irrelevant updates self.assertEqual(len(layer.get_updates_for([inputs])), 1) self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1) self.assertEqual(len(layer.get_updates_for([outputs])), 0) @test_util.run_deprecated_v1 def testGetLossesFor(self): class MyLayer(base_layers.Layer): def build(self, input_shape): self.a = self.add_variable('a', (), dtypes.float32, trainable=False) self.b = self.add_variable('b', (), dtypes.float32, trainable=False) self.add_loss(self.a) self.built = True def call(self, inputs): self.add_loss(inputs, inputs=True) return inputs + 1 layer = MyLayer() inputs = array_ops.placeholder(dtypes.float32, (), 'inputs') intermediate_inputs = inputs + 1 outputs = layer.apply(intermediate_inputs) self.assertEqual(len(layer.losses), 2) self.assertEqual(len(layer.get_losses_for(None)), 1) self.assertEqual(len(layer.get_losses_for([inputs])), 1) self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1) self.assertEqual(len(layer.get_losses_for([outputs])), 0) # Call same layer on new input, creating one more conditional loss inputs = array_ops.placeholder(dtypes.float32, (), 'inputs') intermediate_inputs = inputs + 1 outputs = layer.apply(intermediate_inputs) self.assertEqual(len(layer.losses), 3) self.assertEqual(len(layer.get_losses_for(None)), 1) # Check that we are successfully filtering out irrelevant losses self.assertEqual(len(layer.get_losses_for([inputs])), 1) self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1) self.assertEqual(len(layer.get_losses_for([outputs])), 0) class IdentityLayer(base_layers.Layer): """A layer returns the identity of it's input.""" def call(self, inputs): return inputs @test_util.run_all_in_graph_and_eager_modes class DTypeTest(test.TestCase): def _const(self, dtype): return array_ops.constant(1, dtype=dtype) def test_dtype_inferred_from_input(self): # Test with Tensor input layer = IdentityLayer() self.assertIsNone(layer.dtype) layer(self._const('float64')) self.assertEqual(layer.dtype, 'float64') # Test with Numpy input layer = IdentityLayer() self.assertIsNone(layer.dtype) layer(np.array(1., dtype='float64')) self.assertEqual(layer.dtype, 'float64') # Test with integer input layer = IdentityLayer() self.assertIsNone(layer.dtype) layer(self._const('int32')) self.assertEqual(layer.dtype, 'int32') # Test layer dtype doesn't change when passed a new dtype layer = IdentityLayer() self.assertIsNone(layer.dtype) layer(self._const('float64')) self.assertEqual(layer.dtype, 'float64') layer(self._const('float16')) self.assertEqual(layer.dtype, 'float64') # Test layer dtype inferred from first input layer = IdentityLayer() layer([self._const('float32'), self._const('float64')]) self.assertEqual(layer.dtype, 'float32') def test_passing_dtype_to_constructor(self): layer = IdentityLayer(dtype='float64') layer(self._const('float32')) self.assertEqual(layer.dtype, 'float64') layer = IdentityLayer(dtype='int32') layer(self._const('float32')) self.assertEqual(layer.dtype, 'int32') layer = IdentityLayer(dtype=dtypes.float64) layer(self._const('float32')) self.assertEqual(layer.dtype, 'float64') def test_inputs_not_casted(self): layer = IdentityLayer(dtype='float32') self.assertEqual(layer(self._const('float64')).dtype, 'float64') if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/layers/base_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Contains the convolutional layer classes and their functional aliases. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras import layers as keras_layers from tensorflow.python.layers import base from tensorflow.python.ops import init_ops from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export @tf_export(v1=['layers.Conv1D']) class Conv1D(keras_layers.Conv1D, base.Layer): """1D convolution layer (e.g. temporal convolution). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of a single integer, specifying the length of the 1D convolution window. strides: An integer or tuple/list of a single integer, specifying the stride length of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. dilation_rate: An integer or tuple/list of a single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. """ def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(Conv1D, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @deprecation.deprecated( date=None, instructions='Use `tf.keras.layers.Conv1D` instead.') @tf_export(v1=['layers.conv1d']) def conv1d(inputs, filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for 1D convolution layer (e.g. temporal convolution). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Arguments: inputs: Tensor input. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of a single integer, specifying the length of the 1D convolution window. strides: An integer or tuple/list of a single integer, specifying the stride length of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. dilation_rate: An integer or tuple/list of a single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. """ layer = Conv1D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name) return layer.apply(inputs) @tf_export(v1=['layers.Conv2D']) class Conv2D(keras_layers.Conv2D, base.Layer): """2D convolution layer (e.g. spatial convolution over images). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(Conv2D, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @deprecation.deprecated( date=None, instructions='Use `tf.keras.layers.Conv2D` instead.') @tf_export(v1=['layers.conv2d']) def conv2d(inputs, filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for the 2D convolution layer. This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Arguments: inputs: Tensor input. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. """ layer = Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name) return layer.apply(inputs) @tf_export(v1=['layers.Conv3D']) class Conv3D(keras_layers.Conv3D, base.Layer): """3D convolution layer (e.g. spatial convolution over volumes). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height and width of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along the depth, height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. dilation_rate: An integer or tuple/list of 3 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. """ def __init__(self, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(Conv3D, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @deprecation.deprecated( date=None, instructions='Use `tf.keras.layers.Conv3D` instead.') @tf_export(v1=['layers.conv3d']) def conv3d(inputs, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for the 3D convolution layer. This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Arguments: inputs: Tensor input. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height and width of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along the depth, height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. dilation_rate: An integer or tuple/list of 3 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. """ layer = Conv3D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name) return layer.apply(inputs) @tf_export(v1=['layers.SeparableConv1D']) class SeparableConv1D(keras_layers.SeparableConv1D, base.Layer): """Depthwise separable 1D convolution. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A single integer specifying the spatial dimensions of the filters. strides: A single integer specifying the strides of the convolution. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. dilation_rate: A single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel. pointwise_initializer: An initializer for the pointwise convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. """ def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer=None, pointwise_initializer=None, bias_initializer=init_ops.zeros_initializer(), depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(SeparableConv1D, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, depth_multiplier=depth_multiplier, activation=activation, use_bias=use_bias, depthwise_initializer=depthwise_initializer, pointwise_initializer=pointwise_initializer, bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, pointwise_regularizer=pointwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, pointwise_constraint=pointwise_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @tf_export(v1=['layers.SeparableConv2D']) class SeparableConv2D(keras_layers.SeparableConv2D, base.Layer): """Depthwise separable 2D convolution. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 2 integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of 2 positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel. pointwise_initializer: An initializer for the pointwise convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1), depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer=None, pointwise_initializer=None, bias_initializer=init_ops.zeros_initializer(), depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(SeparableConv2D, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, depth_multiplier=depth_multiplier, activation=activation, use_bias=use_bias, depthwise_initializer=depthwise_initializer, pointwise_initializer=pointwise_initializer, bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, pointwise_regularizer=pointwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, pointwise_constraint=pointwise_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @deprecation.deprecated( date=None, instructions='Use `tf.keras.layers.SeparableConv1D` instead.') @tf_export(v1=['layers.separable_conv1d']) def separable_conv1d(inputs, filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer=None, pointwise_initializer=None, bias_initializer=init_ops.zeros_initializer(), depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for the depthwise separable 1D convolution layer. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Arguments: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A single integer specifying the spatial dimensions of the filters. strides: A single integer specifying the strides of the convolution. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. dilation_rate: A single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel. pointwise_initializer: An initializer for the pointwise convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. """ layer = SeparableConv1D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, depth_multiplier=depth_multiplier, activation=activation, use_bias=use_bias, depthwise_initializer=depthwise_initializer, pointwise_initializer=pointwise_initializer, bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, pointwise_regularizer=pointwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, pointwise_constraint=pointwise_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name) return layer.apply(inputs) @deprecation.deprecated( date=None, instructions='Use `tf.keras.layers.SeparableConv2D` instead.') @tf_export(v1=['layers.separable_conv2d']) def separable_conv2d(inputs, filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1), depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer=None, pointwise_initializer=None, bias_initializer=init_ops.zeros_initializer(), depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for the depthwise separable 2D convolution layer. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Arguments: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 2 integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of 2 positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel. pointwise_initializer: An initializer for the pointwise convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. """ layer = SeparableConv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, depth_multiplier=depth_multiplier, activation=activation, use_bias=use_bias, depthwise_initializer=depthwise_initializer, pointwise_initializer=pointwise_initializer, bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, pointwise_regularizer=pointwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, pointwise_constraint=pointwise_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name) return layer.apply(inputs) @tf_export(v1=['layers.Conv2DTranspose']) class Conv2DTranspose(keras_layers.Conv2DTranspose, base.Layer): """Transposed 2D convolution layer (sometimes called 2D Deconvolution). The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 2 positive integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of 2 positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. padding: one of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last', activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(Conv2DTranspose, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @deprecation.deprecated( date=None, instructions='Use `tf.keras.layers.Conv2DTranspose` instead.') @tf_export(v1=['layers.conv2d_transpose']) def conv2d_transpose(inputs, filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last', activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for transposed 2D convolution layer. The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. Arguments: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 2 positive integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of 2 positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. padding: one of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. activation: Activation function. Set it to `None` to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If `None`, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. """ layer = Conv2DTranspose( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name) return layer.apply(inputs) @tf_export(v1=['layers.Conv3DTranspose']) class Conv3DTranspose(keras_layers.Conv3DTranspose, base.Layer): """Transposed 3D convolution layer (sometimes called 3D Deconvolution). Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height and width of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along the depth, height and width. Can be a single integer to specify the same value for all spatial dimensions. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. activation: Activation function. Set it to `None` to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If `None`, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. """ def __init__(self, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format='channels_last', activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(Conv3DTranspose, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @deprecation.deprecated( date=None, instructions='Use `tf.keras.layers.Conv3DTranspose` instead.') @tf_export(v1=['layers.conv3d_transpose']) def conv3d_transpose(inputs, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format='channels_last', activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for transposed 3D convolution layer. Arguments: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 3 positive integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of 3 positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. padding: one of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. """ layer = Conv3DTranspose( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name) return layer.apply(inputs) # Aliases Convolution1D = Conv1D Convolution2D = Conv2D Convolution3D = Conv3D SeparableConvolution2D = SeparableConv2D Convolution2DTranspose = Deconvolution2D = Deconv2D = Conv2DTranspose Convolution3DTranspose = Deconvolution3D = Deconv3D = Conv3DTranspose convolution1d = conv1d convolution2d = conv2d convolution3d = conv3d separable_convolution2d = separable_conv2d convolution2d_transpose = deconvolution2d = deconv2d = conv2d_transpose convolution3d_transpose = deconvolution3d = deconv3d = conv3d_transpose
tensorflow-r1.15.5-nv23.03
tensorflow/python/layers/convolutional.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.layers.utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util from tensorflow.python.layers import utils from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class ConvUtilsTest(test.TestCase): def testConvertDataFormat(self): self.assertEqual('NCDHW', utils.convert_data_format('channels_first', 5)) self.assertEqual('NCHW', utils.convert_data_format('channels_first', 4)) self.assertEqual('NCW', utils.convert_data_format('channels_first', 3)) self.assertEqual('NHWC', utils.convert_data_format('channels_last', 4)) self.assertEqual('NWC', utils.convert_data_format('channels_last', 3)) self.assertEqual('NDHWC', utils.convert_data_format('channels_last', 5)) with self.assertRaises(ValueError): utils.convert_data_format('invalid', 2) def testNormalizeTuple(self): self.assertEqual((2, 2, 2), utils.normalize_tuple(2, n=3, name='strides')) self.assertEqual( (2, 1, 2), utils.normalize_tuple((2, 1, 2), n=3, name='strides')) with self.assertRaises(ValueError): utils.normalize_tuple((2, 1), n=3, name='strides') with self.assertRaises(ValueError): utils.normalize_tuple(None, n=3, name='strides') def testNormalizeDataFormat(self): self.assertEqual( 'channels_last', utils.normalize_data_format('Channels_Last')) self.assertEqual( 'channels_first', utils.normalize_data_format('CHANNELS_FIRST')) with self.assertRaises(ValueError): utils.normalize_data_format('invalid') def testNormalizePadding(self): self.assertEqual('same', utils.normalize_padding('SAME')) self.assertEqual('valid', utils.normalize_padding('VALID')) with self.assertRaises(ValueError): utils.normalize_padding('invalid') def testConvOutputLength(self): self.assertEqual(4, utils.conv_output_length(4, 2, 'same', 1, 1)) self.assertEqual(2, utils.conv_output_length(4, 2, 'same', 2, 1)) self.assertEqual(3, utils.conv_output_length(4, 2, 'valid', 1, 1)) self.assertEqual(2, utils.conv_output_length(4, 2, 'valid', 2, 1)) self.assertEqual(5, utils.conv_output_length(4, 2, 'full', 1, 1)) self.assertEqual(3, utils.conv_output_length(4, 2, 'full', 2, 1)) self.assertEqual(2, utils.conv_output_length(5, 2, 'valid', 2, 2)) def testConvInputLength(self): self.assertEqual(3, utils.conv_input_length(4, 2, 'same', 1)) self.assertEqual(2, utils.conv_input_length(2, 2, 'same', 2)) self.assertEqual(4, utils.conv_input_length(3, 2, 'valid', 1)) self.assertEqual(4, utils.conv_input_length(2, 2, 'valid', 2)) self.assertEqual(3, utils.conv_input_length(4, 2, 'full', 1)) self.assertEqual(4, utils.conv_input_length(3, 2, 'full', 2)) def testDeconvOutputLength(self): self.assertEqual(4, utils.deconv_output_length(4, 2, 'same', 1)) self.assertEqual(8, utils.deconv_output_length(4, 2, 'same', 2)) self.assertEqual(5, utils.deconv_output_length(4, 2, 'valid', 1)) self.assertEqual(8, utils.deconv_output_length(4, 2, 'valid', 2)) self.assertEqual(3, utils.deconv_output_length(4, 2, 'full', 1)) self.assertEqual(6, utils.deconv_output_length(4, 2, 'full', 2)) class ConstantValueTest(test.TestCase): @test_util.run_deprecated_v1 def testConstantValue(self): f1 = lambda: constant_op.constant(5) f2 = lambda: constant_op.constant(32) # Boolean pred self.assertEqual(5, utils.constant_value(utils.smart_cond(True, f1, f2))) self.assertEqual(32, utils.constant_value(utils.smart_cond(False, f1, f2))) # Integer pred self.assertEqual(5, utils.constant_value(utils.smart_cond(1, f1, f2))) self.assertEqual(32, utils.constant_value(utils.smart_cond(0, f1, f2))) # Unknown pred pred = array_ops.placeholder_with_default(True, shape=()) self.assertIsNone(utils.constant_value(utils.smart_cond(pred, f1, f2))) #Error case with self.assertRaises(TypeError): utils.constant_value(5) class GetReachableFromInputsTest(test.TestCase): @test_util.run_deprecated_v1 def testGetReachableFromInputs(self): pl_1 = array_ops.placeholder(shape=None, dtype='float32') pl_2 = array_ops.placeholder(shape=None, dtype='float32') pl_3 = array_ops.placeholder(shape=None, dtype='float32') x_1 = pl_1 + pl_2 x_2 = pl_2 * 2 x_3 = pl_3 + 1 x_4 = x_1 + x_2 x_5 = x_3 * pl_1 self.assertEqual({pl_1, x_1, x_4, x_5}, utils.get_reachable_from_inputs([pl_1])) self.assertEqual({pl_1, pl_2, x_1, x_2, x_4, x_5}, utils.get_reachable_from_inputs([pl_1, pl_2])) self.assertEqual({pl_3, x_3, x_5}, utils.get_reachable_from_inputs([pl_3])) self.assertEqual({x_3, x_5}, utils.get_reachable_from_inputs([x_3])) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/layers/utils_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.layers.normalization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from tensorflow.core.protobuf import saver_pb2 from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.layers import convolutional as conv_layers from tensorflow.python.layers import normalization as normalization_layers from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent from tensorflow.python.training import saver as saver_lib @test_util.run_v1_only('b/120545219') class BNTest(test.TestCase): def _simple_model(self, image, fused, freeze_mode): output_channels, kernel_size = 2, 3 conv = conv_layers.conv2d( image, output_channels, kernel_size, use_bias=False, kernel_initializer=init_ops.ones_initializer()) bn_layer = normalization_layers.BatchNormalization(fused=fused) bn_layer._bessels_correction_test_only = False training = not freeze_mode bn = bn_layer.apply(conv, training=training) loss = math_ops.reduce_sum(math_ops.abs(bn)) optimizer = gradient_descent.GradientDescentOptimizer(0.01) if not freeze_mode: update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS) with ops.control_dependencies(update_ops): train_op = optimizer.minimize(loss) else: train_op = optimizer.minimize(loss) saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2) return loss, train_op, saver def _train(self, checkpoint_path, shape, use_gpu, is_fused, restore=False, freeze_mode=False, dtype=dtypes.float32): ops.reset_default_graph() graph = ops.get_default_graph() with self.session(graph=graph, use_gpu=use_gpu) as sess: image = array_ops.placeholder(dtype=dtype, shape=shape) loss, train_op, saver = self._simple_model(image, is_fused, freeze_mode) if restore: saver.restore(sess, checkpoint_path) else: self.evaluate(variables.global_variables_initializer()) np.random.seed(0) for _ in range(2): image_val = np.random.rand(*shape).astype(dtype.as_numpy_dtype) sess.run([loss, train_op], feed_dict={image: image_val}) if restore: all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) all_vars_values = [var.eval() for var in all_vars] return all_vars_values else: saver.save(sess, checkpoint_path) def _infer(self, checkpoint_path, image_val, shape, use_gpu, is_fused): dtype = image_val.dtype ops.reset_default_graph() graph = ops.get_default_graph() with self.session(graph=graph, use_gpu=use_gpu) as sess: image = array_ops.placeholder(dtype=dtype, shape=shape) loss, _, saver = self._simple_model(image, is_fused, True) saver.restore(sess, checkpoint_path) loss_val = sess.run(loss, feed_dict={image: image_val}) return loss_val def _trainEvalSequence(self, dtype, train1_use_gpu, train2_use_gpu, infer_use_gpu): batch, height, width, input_channels = 2, 4, 5, 3 shape = [batch, height, width, input_channels] # Not all characters in a dtype string representation are allowed in # filenames in all operating systems. This map will sanitize these. dtype_to_valid_fn = { dtypes.float16: 'float16', dtypes.float32: 'float32', } checkpoint = os.path.join( self.get_temp_dir(), 'cp_%s_%s_%s_%s' % ( dtype_to_valid_fn[dtype], train1_use_gpu, train2_use_gpu, infer_use_gpu)) self._train( checkpoint, shape, use_gpu=train1_use_gpu, is_fused=True, restore=False, freeze_mode=False, dtype=dtype) train_vars = self._train( checkpoint, shape, use_gpu=train2_use_gpu, is_fused=True, restore=True, freeze_mode=False, dtype=dtype) np.random.seed(0) image_val = np.random.rand(batch, height, width, input_channels).astype( dtype.as_numpy_dtype) loss_val = self._infer( checkpoint, image_val, shape, use_gpu=infer_use_gpu, is_fused=True) return train_vars, loss_val def testHalfPrecision(self): ref_vars, ref_loss = self._trainEvalSequence( dtype=dtypes.float32, train1_use_gpu=True, train2_use_gpu=True, infer_use_gpu=True) self.assertEqual(len(ref_vars), 5) for train1_use_gpu in [True, False]: for train2_use_gpu in [True, False]: for infer_use_gpu in [True, False]: test_vars, test_loss = self._trainEvalSequence( dtypes.float16, train1_use_gpu, train2_use_gpu, infer_use_gpu) self.assertEqual(len(test_vars), 5) for test_var, ref_var in zip(test_vars, ref_vars): self.assertAllClose(test_var, ref_var, rtol=1.e-3, atol=1.e-3) self.assertAllClose(test_loss, ref_loss, rtol=1.e-3, atol=1.e-3) def _testCheckpoint(self, is_fused_checkpoint_a, is_fused_checkpoint_b, use_gpu_checkpoint_a, use_gpu_checkpoint_b, use_gpu_test_a, use_gpu_test_b, freeze_mode): batch, height, width, input_channels = 2, 4, 5, 3 shape = [batch, height, width, input_channels] base_path = '%s_%s_%s_%s_%s_%s' % (is_fused_checkpoint_a, is_fused_checkpoint_b, use_gpu_checkpoint_a, use_gpu_checkpoint_b, use_gpu_test_a, use_gpu_test_b) checkpoint_path_a = os.path.join(self.get_temp_dir(), 'checkpoint_a_%s' % base_path) self._train( checkpoint_path_a, shape, use_gpu_checkpoint_a, is_fused_checkpoint_a, restore=False, freeze_mode=freeze_mode) checkpoint_path_b = os.path.join(self.get_temp_dir(), 'checkpoint_b_%s' % base_path) self._train( checkpoint_path_b, shape, use_gpu_checkpoint_b, is_fused_checkpoint_b, restore=False, freeze_mode=freeze_mode) vars_fused = self._train( checkpoint_path_a, shape, use_gpu_test_a, True, restore=True, freeze_mode=freeze_mode) vars_nonfused = self._train( checkpoint_path_b, shape, use_gpu_test_b, False, restore=True, freeze_mode=freeze_mode) self.assertEqual(len(vars_fused), 5) self.assertEqual(len(vars_nonfused), 5) for var_fused, var_nonfused in zip(vars_fused, vars_nonfused): self.assertAllClose(var_fused, var_nonfused, atol=1e-5) image_val = np.random.rand(batch, height, width, input_channels).astype(np.float32) loss_fused_val = self._infer(checkpoint_path_a, image_val, shape, use_gpu_test_a, True) loss_nonfused_val = self._infer(checkpoint_path_b, image_val, shape, use_gpu_test_b, False) self.assertAllClose(loss_fused_val, loss_nonfused_val, atol=1e-6, rtol=3e-4) def _testCheckpointCrossDevice(self, ckpt_a_fused, ckpt_a_use_gpu, ckpt_b_fused, ckpt_b_use_gpu): for use_gpu_test_a in [True, False]: for use_gpu_test_b in [True, False]: for freeze_mode in [True, False]: self._testCheckpoint(ckpt_a_fused, ckpt_a_use_gpu, ckpt_b_fused, ckpt_b_use_gpu, use_gpu_test_a, use_gpu_test_b, freeze_mode) def testCheckpointFusedCPUAndFusedGPU(self): self._testCheckpointCrossDevice(True, False, True, True) def testCheckpointFusedCPUAndFusedCPU(self): self._testCheckpointCrossDevice(True, False, True, False) def testCheckpointFusedGPUAndFusedGPU(self): self._testCheckpointCrossDevice(True, True, True, True) def testCheckpointNonFusedCPUAndNonFusedGPU(self): self._testCheckpointCrossDevice(False, False, False, True) def testCheckpointNonFusedCPUAndNonFusedCPU(self): self._testCheckpointCrossDevice(False, False, False, False) def testCheckpointNonFusedGPUAndNonFusedGPU(self): self._testCheckpointCrossDevice(False, True, False, True) def testCheckpointNonFusedGPUAndFusedGPU(self): self._testCheckpointCrossDevice(False, True, True, True) def testCheckpointNonFusedGPUAndFusedCPU(self): self._testCheckpointCrossDevice(False, True, True, False) def testCheckpointNonFusedCPUAndFusedCPU(self): self._testCheckpointCrossDevice(False, False, True, False) def testCreateBN(self): # Call layer. bn = normalization_layers.BatchNormalization(axis=1) inputs = random_ops.random_uniform((5, 4, 3), seed=1) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) # Verify shape. self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3]) # Verify layer attributes. self.assertEqual(len(bn.updates), 2) self.assertEqual(len(bn.variables), 4) self.assertEqual(len(bn.trainable_variables), 2) self.assertEqual(len(bn.non_trainable_variables), 2) # Test that updates were created and added to UPDATE_OPS. self.assertEqual(len(bn.updates), 2) self.assertListEqual( ops.get_collection(ops.GraphKeys.UPDATE_OPS), bn.updates) # Test that weights were created and added to TRAINABLE_VARIABLES. self.assertListEqual( ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES), bn.trainable_variables) def testCreateFusedBNFloat16(self): # Call layer. bn = normalization_layers.BatchNormalization(axis=1, fused=True) inputs = random_ops.random_uniform( (5, 4, 3, 3), seed=1, dtype=dtypes.float16) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) # Verify shape. self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3, 3]) # Verify layer attributes. self.assertEqual(len(bn.updates), 2) self.assertEqual(len(bn.variables), 4) self.assertEqual(len(bn.trainable_variables), 2) self.assertEqual(len(bn.non_trainable_variables), 2) for var in bn.variables: self.assertEqual(var.dtype, dtypes.float32_ref) # Test that updates were created and added to UPDATE_OPS. self.assertEqual(len(bn.updates), 2) self.assertListEqual( ops.get_collection(ops.GraphKeys.UPDATE_OPS), bn.updates) # Test that weights were created and added to TRAINABLE_VARIABLES. self.assertListEqual( ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES), bn.trainable_variables) def test3DInputAxis1(self): if not test.is_gpu_available(cuda_only=True): self.skipTest("Only run on GPU.") epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=1, epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 4, 1)) np_beta = np.reshape(np_beta, (1, 4, 1)) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 2)) std = np.std(np_inputs, axis=(0, 2)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test3DInputAxis2(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=2, epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 3)) np_beta = np.reshape(np_beta, (1, 1, 3)) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1)) std = np.std(np_inputs, axis=(0, 1)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test4DInputAxis1(self): if test.is_gpu_available(cuda_only=True): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=1, epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.session(use_gpu=True) as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 4, 1, 1)) np_beta = np.reshape(np_beta, (1, 4, 1, 1)) for _ in range(100): np_output, _, _ = sess.run( [outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 2, 3)) std = np.std(np_inputs, axis=(0, 2, 3)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test4DInputAxis2(self): if not test.is_gpu_available(cuda_only=True): self.skipTest("Only run on GPU.") epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=2, epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 3, 1)) np_beta = np.reshape(np_beta, (1, 1, 3, 1)) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1, 3)) std = np.std(np_inputs, axis=(0, 1, 3)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test4DInputAxis3(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=3, epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1, 2)) std = np.std(np_inputs, axis=(0, 1, 2)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test4DInputAxis3Fused(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=3, epsilon=epsilon, momentum=0.9, fused=True) inputs = variables.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) for _ in range(100): np_output, _, _ = sess.run( [outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1, 2)) std = np.std(np_inputs, axis=(0, 1, 2)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test4DInputAxis1Fused(self): if test.is_gpu_available(cuda_only=True): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=1, epsilon=epsilon, momentum=0.9, fused=True) inputs = variables.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 4, 1, 1)) np_beta = np.reshape(np_beta, (1, 4, 1, 1)) for _ in range(100): np_output, _, _ = sess.run( [outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 2, 3)) std = np.std(np_inputs, axis=(0, 2, 3)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testNegativeAxis(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=-1, epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1, 2)) std = np.std(np_inputs, axis=(0, 1, 2)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testBooleanLearningPhase(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=-1, epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32) outputs_training = bn.apply(inputs, training=True) outputs_infer = bn.apply(inputs, training=False) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) for _ in range(100): np_output, _, _ = sess.run([outputs_training] + bn.updates) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=2) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1, 2)) std = np.std(np_inputs, axis=(0, 1, 2)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = self.evaluate(outputs_infer) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testFunctionalNoReuse(self): inputs = variables.Variable( np.random.random((5, 4, 3, 6)), dtype=dtypes.float32) epsilon = 1e-3 training = array_ops.placeholder(dtype='bool') outputs = normalization_layers.batch_norm( inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training, name='bn') updates = ops.get_collection(ops.GraphKeys.UPDATE_OPS) all_vars = dict([(v.name, v) for v in variables.global_variables()]) moving_mean = all_vars['bn/moving_mean:0'] moving_variance = all_vars['bn/moving_variance:0'] beta = all_vars['bn/beta:0'] gamma = all_vars['bn/gamma:0'] with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([gamma, beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) for _ in range(100): np_output, _, _ = sess.run([outputs] + updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. np_moving_mean, np_moving_var = self.evaluate( [moving_mean, moving_variance]) np_inputs = self.evaluate(inputs) np_mean = np.mean(np_inputs, axis=(0, 1, 2)) np_std = np.std(np_inputs, axis=(0, 1, 2)) np_variance = np.square(np_std) self.assertAllClose(np_mean, np_moving_mean, atol=1e-2) self.assertAllClose(np_variance, np_moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testFunctionalReuse(self): inputs1 = variables.Variable( np.random.random((5, 4, 3, 6)), dtype=dtypes.float32) inputs2 = variables.Variable( np.random.random((5, 4, 3, 6)), dtype=dtypes.float32) epsilon = 1e-3 training = array_ops.placeholder(dtype='bool') _ = normalization_layers.batch_norm( inputs1, axis=-1, momentum=0.9, epsilon=epsilon, training=training, name='bn') outputs2 = normalization_layers.batch_norm( inputs2, axis=-1, momentum=0.9, epsilon=epsilon, training=training, name='bn', reuse=True) # Last 2 update ops updates = ops.get_collection(ops.GraphKeys.UPDATE_OPS)[-2:] all_vars = dict([(v.name, v) for v in variables.global_variables()]) moving_mean = all_vars['bn/moving_mean:0'] moving_variance = all_vars['bn/moving_variance:0'] beta = all_vars['bn/beta:0'] gamma = all_vars['bn/gamma:0'] with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) for _ in range(100): np_output, _, _ = sess.run([outputs2] + updates, feed_dict={training: True}) # Verify that the statistics are updated during training. np_moving_mean, np_moving_var = self.evaluate( [moving_mean, moving_variance]) np_inputs = self.evaluate(inputs2) np_mean = np.mean(np_inputs, axis=(0, 1, 2)) np_std = np.std(np_inputs, axis=(0, 1, 2)) np_variance = np.square(np_std) self.assertAllClose(np_mean, np_moving_mean, atol=1e-2) self.assertAllClose(np_variance, np_moving_var, atol=1e-2) # Verify that the axis is normalized during training. np_gamma, np_beta = self.evaluate([gamma, beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=2) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Test inference with placeholder learning phase. np_output = sess.run(outputs2, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=2) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testFunctionalReuseFromScope(self): inputs = variables.Variable( np.random.random((5, 4, 3, 6)), dtype=dtypes.float32) epsilon = 1e-3 training = array_ops.placeholder(dtype='bool') with variable_scope.variable_scope('scope'): _ = normalization_layers.batch_norm( inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training) self.assertEqual(len(variables.global_variables()), 5) with variable_scope.variable_scope('scope', reuse=True): _ = normalization_layers.batch_norm( inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training) self.assertEqual(len(variables.global_variables()), 5) def testNoCenter(self): bn = normalization_layers.BatchNormalization(axis=1, center=False) inputs = random_ops.random_uniform((5, 4, 3), seed=1) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) # Verify shape. self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3]) # Verify layer attributes. self.assertEqual(len(bn.updates), 2) self.assertEqual(len(bn.variables), 3) self.assertEqual(len(bn.trainable_variables), 1) self.assertEqual(len(bn.non_trainable_variables), 2) def testNoScale(self): bn = normalization_layers.BatchNormalization(axis=1, scale=False) inputs = random_ops.random_uniform((5, 4, 3), seed=1) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) # Verify shape. self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3]) # Verify layer attributes. self.assertEqual(len(bn.updates), 2) self.assertEqual(len(bn.variables), 3) self.assertEqual(len(bn.trainable_variables), 1) self.assertEqual(len(bn.non_trainable_variables), 2) def testRegularizers(self): reg = lambda x: 0.1 * math_ops.reduce_sum(x) bn = normalization_layers.BatchNormalization(axis=1, beta_regularizer=reg) inputs = random_ops.random_uniform((5, 4, 3), seed=1) training = array_ops.placeholder(dtype='bool') _ = bn.apply(inputs, training=training) self.assertEqual(len(bn.losses), 1) bn = normalization_layers.BatchNormalization(axis=1, gamma_regularizer=reg) inputs = random_ops.random_uniform((5, 4, 3), seed=1) training = array_ops.placeholder(dtype='bool') _ = bn.apply(inputs, training=training) self.assertEqual(len(bn.losses), 1) def testConstraints(self): g_constraint = lambda x: x / math_ops.reduce_sum(x) b_constraint = lambda x: x / math_ops.reduce_max(x) bn = normalization_layers.BatchNormalization(axis=1, gamma_constraint=g_constraint, beta_constraint=b_constraint) inputs = random_ops.random_uniform((5, 4, 3), seed=1) bn(inputs) self.assertEqual(bn.gamma_constraint, g_constraint) self.assertEqual(bn.beta_constraint, b_constraint) def testRenorm(self): shape = (4, 3) xt = array_ops.placeholder(dtypes.float32, shape) momentum = 0.99 renorm_momentum = 0.8 rmax = 1.1 rmin = 0.9 dmax = 0.1 gamma = 2. beta = 3. epsilon = 0.001 bn = normalization_layers.BatchNormalization( axis=1, gamma_initializer=init_ops.constant_initializer(gamma), beta_initializer=init_ops.constant_initializer(beta), epsilon=epsilon, momentum=momentum, renorm=True, renorm_clipping={'rmax': rmax, 'rmin': rmin, 'dmax': dmax}, renorm_momentum=renorm_momentum) training = array_ops.placeholder(dtypes.bool) yt = bn.apply(xt, training=training) moving_mean = 0. moving_stddev = 1. renorm_mean = 0. renorm_stddev = 1. with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) mean = x.mean(0) variance = x.var(0) stddev = np.sqrt(variance + epsilon) r = (stddev / renorm_stddev).clip(rmin, rmax) d = ((mean - renorm_mean) / renorm_stddev).clip(-dmax, dmax) y_train = ((x - mean) / stddev * r + d) * gamma + beta renorm_mean += (mean - renorm_mean) * (1. - renorm_momentum) renorm_stddev += (stddev - renorm_stddev) * (1. - renorm_momentum) moving_mean += (mean - moving_mean) * (1. - momentum) moving_stddev += (stddev - moving_stddev) * (1. - momentum) y_test = ((x - moving_mean) / (moving_stddev * moving_stddev)**0.5 * gamma) + beta yt_val_train, _, _ = sess.run([yt] + bn.updates, feed_dict={xt: x, training: True}) yt_val_test, _, _ = sess.run([yt] + bn.updates, feed_dict={xt: x, training: False}) self.assertAllClose(y_train, yt_val_train, atol=1e-5) self.assertAllClose(y_test, yt_val_test, atol=1e-5) def testRenormNoClippingSameMomentumGivesSameTestTrain(self): shape = (4, 3) xt = array_ops.placeholder(dtypes.float32, shape) momentum = 0.9 renorm_momentum = 0.9 gamma = 2. beta = 3. epsilon = 0.001 bn = normalization_layers.BatchNormalization( axis=1, gamma_initializer=init_ops.constant_initializer(gamma), beta_initializer=init_ops.constant_initializer(beta), epsilon=epsilon, momentum=momentum, renorm=True, renorm_clipping=None, renorm_momentum=momentum) training = array_ops.placeholder(dtypes.bool) yt = bn.apply(xt, training=training) moving_mean = 0. moving_stddev = 1. renorm_mean = 0. renorm_stddev = 1. with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) for step in range(6): x = np.random.random(shape) mean = x.mean(0) variance = x.var(0) stddev = np.sqrt(variance + epsilon) r = (stddev / renorm_stddev) d = ((mean - renorm_mean) / renorm_stddev) y_test = ((x - moving_mean) / (moving_stddev * moving_stddev)**0.5 * gamma) + beta y_train = ((x - mean) / stddev * r + d) * gamma + beta renorm_mean += (mean - renorm_mean) * (1. - renorm_momentum) renorm_stddev += (stddev - renorm_stddev) * (1. - renorm_momentum) moving_mean += (mean - moving_mean) * (1. - momentum) moving_stddev += (stddev - moving_stddev) * (1. - momentum) # Compute test values first, before the train mode updates the moving # averages. yt_val_test, _, _ = sess.run([yt] + bn.updates, feed_dict={xt: x, training: False}) yt_val_train, _, _ = sess.run([yt] + bn.updates, feed_dict={xt: x, training: True}) # Due to initialization inconsistencies, values may not be identical # on the first iteration (but shouldn't be different by much more than # epsilon). After the first iteration they should be identical. atol = epsilon * 1.5 if step == 0 else 1e-5 self.assertAllClose(y_train, yt_val_train, atol=atol) self.assertAllClose(y_test, yt_val_test, atol=atol) self.assertAllClose(yt_val_train, yt_val_test, atol=atol) def testAdjustment(self): shape = (4, 3) xt = array_ops.placeholder(dtypes.float32, shape) momentum = 0.99 gamma = 2. beta = 3. epsilon = 0.001 adjust_scale = random_ops.random_uniform(shape[-1:], 0.5, 1.5) adjust_bias = random_ops.random_uniform(shape[-1:], -.2, .2) bn = normalization_layers.BatchNormalization( axis=1, gamma_initializer=init_ops.constant_initializer(gamma), beta_initializer=init_ops.constant_initializer(beta), epsilon=epsilon, momentum=momentum, adjustment=lambda _: (adjust_scale, adjust_bias)) training = array_ops.placeholder(dtypes.bool) yt = bn.apply(xt, training=training) moving_mean = 0. moving_variance = 1. with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) yt_val_train, adj_scale_val, adj_bias_val = sess.run( [yt, adjust_scale, adjust_bias] + bn.updates, feed_dict={xt: x, training: True})[:3] yt_val_test = sess.run([yt] + bn.updates, feed_dict={xt: x, training: False})[0] mean = x.mean(0) variance = x.var(0) y_train = (((x - mean) / (variance + epsilon) ** 0.5) * adj_scale_val + adj_bias_val) * gamma + beta moving_mean += (mean - moving_mean) * (1. - momentum) moving_variance += (variance - moving_variance) * (1. - momentum) y_test = ((x - moving_mean) / (moving_variance + epsilon) ** 0.5 * gamma) + beta self.assertAllClose(y_train, yt_val_train, atol=1e-5) self.assertAllClose(y_test, yt_val_test, atol=1e-5) def testRenormWithAdjustment(self): shape = (4, 3) xt = array_ops.placeholder(dtypes.float32, shape) momentum = 0.99 renorm_momentum = 0.8 rmax = 1.1 rmin = 0.9 dmax = 0.1 gamma = 2. beta = 3. epsilon = 0.001 adjust_scale = random_ops.random_uniform(shape[-1:], 0.5, 1.5) adjust_bias = random_ops.random_uniform(shape[-1:], -.2, .2) bn = normalization_layers.BatchNormalization( axis=1, gamma_initializer=init_ops.constant_initializer(gamma), beta_initializer=init_ops.constant_initializer(beta), epsilon=epsilon, momentum=momentum, renorm=True, renorm_clipping={'rmax': rmax, 'rmin': rmin, 'dmax': dmax}, renorm_momentum=renorm_momentum, adjustment=lambda _: (adjust_scale, adjust_bias)) training = array_ops.placeholder(dtypes.bool) yt = bn.apply(xt, training=training) moving_mean = 0. moving_stddev = 1. renorm_mean = 0. renorm_stddev = 1. with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) yt_val_train, adj_scale_val, adj_bias_val = sess.run( [yt, adjust_scale, adjust_bias] + bn.updates, feed_dict={xt: x, training: True})[:3] yt_val_test = sess.run([yt] + bn.updates, feed_dict={xt: x, training: False})[0] mean = x.mean(0) variance = x.var(0) stddev = np.sqrt(variance + epsilon) r = (stddev / renorm_stddev).clip(rmin, rmax) d = ((mean - renorm_mean) / renorm_stddev).clip(-dmax, dmax) y_train = (((x - mean) / stddev * r + d) * adj_scale_val + adj_bias_val) * gamma + beta renorm_mean += (mean - renorm_mean) * (1. - renorm_momentum) renorm_stddev += (stddev - renorm_stddev) * (1. - renorm_momentum) moving_mean += (mean - moving_mean) * (1. - momentum) moving_stddev += (stddev - moving_stddev) * (1. - momentum) y_test = ((x - moving_mean) / (moving_stddev * moving_stddev)**0.5 * gamma) + beta self.assertAllClose(y_train, yt_val_train, atol=1e-5) self.assertAllClose(y_test, yt_val_test, atol=1e-5) def testGhostBNNegativeVirtualBatch(self): shape = [6, 5, 4, 3] inp = random_ops.random_uniform(shape, seed=1) with self.assertRaises(ValueError): normalization_layers.batch_normalization( inp, virtual_batch_size=-1) def testGhostBNVirtualBatchFull(self): shape = [6, 5, 4, 3] inp = random_ops.random_uniform(shape, seed=1) out1 = normalization_layers.batch_normalization(inp) out2 = normalization_layers.batch_normalization( inp, virtual_batch_size=6) self.assertListEqual( out1.shape.as_list(), out2.shape.as_list()) with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) x = np.random.random(shape) y1, y2 = sess.run([out1, out2], feed_dict={inp: x}) self.assertAllClose(y1, y2, atol=1e-5) def testGhostBNInputOutputShapesMatch(self): shape = [6, 4, 3] inp = random_ops.random_uniform(shape, seed=1) out = normalization_layers.batch_normalization( inp, virtual_batch_size=3) self.assertListEqual(out.shape.as_list(), shape) def testGhostBNUnknownBatchSize(self): np_shape = [10, 5, 4] tf_shape = [None, 5, 4] inp = array_ops.placeholder(dtypes.float32, tf_shape) out = normalization_layers.batch_normalization( inp, virtual_batch_size=2) with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) x = np.random.random(np_shape) y = sess.run(out, feed_dict={inp: x}) self.assertListEqual(list(y.shape), np_shape) def testGhostBN2Dims(self): shape = [6, 2] virtual_batch_size = 3 beta = 2. gamma = 3. momentum = 0.8 epsilon = 1e-3 moving_means = np.zeros([2, 2], dtype=np.float32) moving_vars = np.ones([2, 2], dtype=np.float32) inp = array_ops.placeholder(dtypes.float32, shape) is_training = array_ops.placeholder(dtypes.bool) bn = normalization_layers.BatchNormalization( momentum=momentum, epsilon=epsilon, beta_initializer=init_ops.constant_initializer(beta), gamma_initializer=init_ops.constant_initializer(gamma), virtual_batch_size=virtual_batch_size) out = bn.apply(inp, training=is_training) ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size, shape[1]]) with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) sub_batched = np.reshape(x, ghost_shape) means = np.mean(sub_batched, axis=0, keepdims=True) variances = np.var(sub_batched, axis=0, keepdims=True) avg_means = np.mean(means, axis=1, keepdims=True) avg_variances = np.mean(variances, axis=1, keepdims=True) moving_means = moving_means * momentum + avg_means * (1. - momentum) moving_vars = moving_vars * momentum + avg_variances * (1. - momentum) y_train = ((sub_batched - means) / (variances + epsilon) ** 0.5 * gamma) + beta y_test = ((sub_batched - moving_means) / (moving_vars + epsilon) ** 0.5 * gamma) + beta y_train = np.reshape(y_train, shape) y_test = np.reshape(y_test, shape) y_val_train, _, _ = sess.run([out] + bn.updates, feed_dict={inp: x, is_training: True}) y_val_test = sess.run(out, feed_dict={inp: x, is_training: False}) self.assertAllClose(y_train, y_val_train, atol=1e-5) self.assertAllClose(y_test, y_val_test, atol=1e-5) def testGhostBN4DimsAxis3(self): shape = [6, 10, 10, 3] virtual_batch_size = 2 beta = 2. gamma = 3. momentum = 0.8 epsilon = 1e-3 moving_means = np.zeros([1, 1, 1, 1, 3], dtype=np.float32) moving_vars = np.ones([1, 1, 1, 1, 3], dtype=np.float32) inp = array_ops.placeholder(dtypes.float32, shape) is_training = array_ops.placeholder(dtypes.bool) bn = normalization_layers.BatchNormalization( axis=3, momentum=momentum, epsilon=epsilon, beta_initializer=init_ops.constant_initializer(beta), gamma_initializer=init_ops.constant_initializer(gamma), virtual_batch_size=virtual_batch_size) out = bn.apply(inp, training=is_training) ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] + shape[1:]) with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) sub_batched = np.reshape(x, ghost_shape) means = np.mean(sub_batched, axis=(0, 2, 3), keepdims=True) variances = np.var(sub_batched, axis=(0, 2, 3), keepdims=True) avg_means = np.mean(means, axis=1, keepdims=True) avg_variances = np.mean(variances, axis=1, keepdims=True) moving_means = moving_means * momentum + avg_means * (1. - momentum) moving_vars = moving_vars * momentum + avg_variances * (1. - momentum) y_train = ((sub_batched - means) / (variances + epsilon) ** 0.5 * gamma) + beta y_test = ((sub_batched - moving_means) / (moving_vars + epsilon) ** 0.5 * gamma) + beta y_train = np.reshape(y_train, shape) y_test = np.reshape(y_test, shape) y_val_train, _, _ = sess.run([out] + bn.updates, feed_dict={inp: x, is_training: True}) y_val_test = sess.run(out, feed_dict={inp: x, is_training: False}) self.assertAllClose(y_train, y_val_train, atol=1e-2) self.assertAllClose(y_test, y_val_test, atol=1e-2) def testGhostBN4DimsAxis1(self): shape = [6, 3, 10, 10] virtual_batch_size = 2 beta = 2. gamma = 3. momentum = 0.8 epsilon = 1e-3 moving_means = np.zeros([1, 1, 3, 1, 1], dtype=np.float32) moving_vars = np.ones([1, 1, 3, 1, 1], dtype=np.float32) inp = array_ops.placeholder(dtypes.float32, shape) is_training = array_ops.placeholder(dtypes.bool) bn = normalization_layers.BatchNormalization( axis=1, momentum=momentum, epsilon=epsilon, beta_initializer=init_ops.constant_initializer(beta), gamma_initializer=init_ops.constant_initializer(gamma), virtual_batch_size=virtual_batch_size, fused=False) # NCHW is unsupported by CPU fused batch norm out = bn.apply(inp, training=is_training) ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] + shape[1:]) with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) sub_batched = np.reshape(x, ghost_shape) means = np.mean(sub_batched, axis=(0, 3, 4), keepdims=True) variances = np.var(sub_batched, axis=(0, 3, 4), keepdims=True) avg_means = np.mean(means, axis=1, keepdims=True) avg_variances = np.mean(variances, axis=1, keepdims=True) moving_means = moving_means * momentum + avg_means * (1. - momentum) moving_vars = moving_vars * momentum + avg_variances * (1. - momentum) y_train = ((sub_batched - means) / (variances + epsilon) ** 0.5 * gamma) + beta y_test = ((sub_batched - moving_means) / (moving_vars + epsilon) ** 0.5 * gamma) + beta y_train = np.reshape(y_train, shape) y_test = np.reshape(y_test, shape) y_val_train, _, _ = sess.run([out] + bn.updates, feed_dict={inp: x, is_training: True}) y_val_test = sess.run(out, feed_dict={inp: x, is_training: False}) self.assertAllClose(y_train, y_val_train, atol=1e-2) self.assertAllClose(y_test, y_val_test, atol=1e-2) def testMultiAxisInvalid(self): shape = [6, 5, 4, 3] inp = random_ops.random_uniform(shape, seed=1) with self.assertRaises(ValueError): normalization_layers.batch_normalization( inp, axis=[1, 4]) # out of bounds with self.assertRaises(ValueError): normalization_layers.batch_normalization( inp, axis=[-5, 1]) # out of bounds with self.assertRaises(ValueError): normalization_layers.batch_normalization( inp, axis=[1, 2, 1]) # duplicate def test3DInputMultiAxis12(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=[1, 2], epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=0, keepdims=True) std = np.std(np_inputs, axis=0, keepdims=True) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test5DInputMultiAxis123(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=[1, 2, 3], epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 3, 4, 4, 3)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 4), keepdims=True) std = np.std(np_inputs, axis=(0, 4), keepdims=True) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testGhostBN5DimsMultiAxis14(self): shape = [6, 3, 10, 10, 4] virtual_batch_size = 3 beta = 2. gamma = 3. momentum = 0.8 epsilon = 1e-3 moving_means = np.zeros([1, 1, 3, 1, 1, 4], dtype=np.float32) moving_vars = np.ones([1, 1, 3, 1, 1, 4], dtype=np.float32) inp = array_ops.placeholder(dtypes.float32, shape) is_training = array_ops.placeholder(dtypes.bool) bn = normalization_layers.BatchNormalization( axis=[1, 4], momentum=momentum, epsilon=epsilon, beta_initializer=init_ops.constant_initializer(beta), gamma_initializer=init_ops.constant_initializer(gamma), virtual_batch_size=virtual_batch_size, fused=False) out = bn.apply(inp, training=is_training) ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] + shape[1:]) with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) sub_batched = np.reshape(x, ghost_shape) means = np.mean(sub_batched, axis=(0, 3, 4), keepdims=True) variances = np.var(sub_batched, axis=(0, 3, 4), keepdims=True) avg_means = np.mean(means, axis=1, keepdims=True) avg_variances = np.mean(variances, axis=1, keepdims=True) moving_means = moving_means * momentum + avg_means * (1. - momentum) moving_vars = moving_vars * momentum + avg_variances * (1. - momentum) y_train = ((sub_batched - means) / (variances + epsilon) ** 0.5 * gamma) + beta y_test = ((sub_batched - moving_means) / (moving_vars + epsilon) ** 0.5 * gamma) + beta y_train = np.reshape(y_train, shape) y_test = np.reshape(y_test, shape) y_val_train, _, _ = sess.run([out] + bn.updates, feed_dict={inp: x, is_training: True}) y_val_test = sess.run(out, feed_dict={inp: x, is_training: False}) self.assertAllClose(y_train, y_val_train, atol=1e-2) self.assertAllClose(y_test, y_val_test, atol=1e-2) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/layers/normalization_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Contains the pooling layer classes and their functional aliases. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras import layers as keras_layers from tensorflow.python.layers import base from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export @tf_export(v1=['layers.AveragePooling1D']) class AveragePooling1D(keras_layers.AveragePooling1D, base.Layer): """Average Pooling layer for 1D inputs. Arguments: pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(AveragePooling1D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) @deprecation.deprecated( date=None, instructions='Use keras.layers.AveragePooling1D instead.') @tf_export(v1=['layers.average_pooling1d']) def average_pooling1d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Average Pooling layer for 1D inputs. Arguments: inputs: The tensor over which to pool. Must have rank 3. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. Returns: The output tensor, of rank 3. Raises: ValueError: if eager execution is enabled. """ layer = AveragePooling1D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) @tf_export(v1=['layers.MaxPooling1D']) class MaxPooling1D(keras_layers.MaxPooling1D, base.Layer): """Max Pooling layer for 1D inputs. Arguments: pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(MaxPooling1D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) @deprecation.deprecated( date=None, instructions='Use keras.layers.MaxPooling1D instead.') @tf_export(v1=['layers.max_pooling1d']) def max_pooling1d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Max Pooling layer for 1D inputs. Arguments: inputs: The tensor over which to pool. Must have rank 3. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. Returns: The output tensor, of rank 3. Raises: ValueError: if eager execution is enabled. """ layer = MaxPooling1D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) @tf_export(v1=['layers.AveragePooling2D']) class AveragePooling2D(keras_layers.AveragePooling2D, base.Layer): """Average pooling layer for 2D inputs (e.g. images). Arguments: pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(AveragePooling2D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) @deprecation.deprecated( date=None, instructions='Use keras.layers.AveragePooling2D instead.') @tf_export(v1=['layers.average_pooling2d']) def average_pooling2d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Average pooling layer for 2D inputs (e.g. images). Arguments: inputs: The tensor over which to pool. Must have rank 4. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. """ layer = AveragePooling2D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) @tf_export(v1=['layers.MaxPooling2D']) class MaxPooling2D(keras_layers.MaxPooling2D, base.Layer): """Max pooling layer for 2D inputs (e.g. images). Arguments: pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(MaxPooling2D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) @deprecation.deprecated( date=None, instructions='Use keras.layers.MaxPooling2D instead.') @tf_export(v1=['layers.max_pooling2d']) def max_pooling2d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Max pooling layer for 2D inputs (e.g. images). Arguments: inputs: The tensor over which to pool. Must have rank 4. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. """ layer = MaxPooling2D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) @tf_export(v1=['layers.AveragePooling3D']) class AveragePooling3D(keras_layers.AveragePooling3D, base.Layer): """Average pooling layer for 3D inputs (e.g. volumes). Arguments: pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(AveragePooling3D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) @deprecation.deprecated( date=None, instructions='Use keras.layers.AveragePooling3D instead.') @tf_export(v1=['layers.average_pooling3d']) def average_pooling3d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Average pooling layer for 3D inputs (e.g. volumes). Arguments: inputs: The tensor over which to pool. Must have rank 5. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. """ layer = AveragePooling3D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) @tf_export(v1=['layers.MaxPooling3D']) class MaxPooling3D(keras_layers.MaxPooling3D, base.Layer): """Max pooling layer for 3D inputs (e.g. volumes). Arguments: pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(MaxPooling3D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) @deprecation.deprecated( date=None, instructions='Use keras.layers.MaxPooling3D instead.') @tf_export(v1=['layers.max_pooling3d']) def max_pooling3d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Max pooling layer for 3D inputs (e.g. volumes). Arguments: inputs: The tensor over which to pool. Must have rank 5. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. """ layer = MaxPooling3D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) # Aliases AvgPool2D = AveragePooling2D MaxPool2D = MaxPooling2D max_pool2d = max_pooling2d avg_pool2d = average_pooling2d
tensorflow-r1.15.5-nv23.03
tensorflow/python/layers/pooling.py
tensorflow-r1.15.5-nv23.03
tensorflow/python/layers/__init__.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Contains the core layers: Dense, Dropout. Also contains their functional aliases. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras import layers as keras_layers from tensorflow.python.layers import base from tensorflow.python.ops import init_ops from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export @tf_export(v1=['layers.Dense']) class Dense(keras_layers.Dense, base.Layer): """Densely-connected layer class. This layer implements the operation: `outputs = activation(inputs * kernel + bias)` Where `activation` is the activation function passed as the `activation` argument (if not `None`), `kernel` is a weights matrix created by the layer, and `bias` is a bias vector created by the layer (only if `use_bias` is `True`). Arguments: units: Integer or Long, dimensionality of the output space. activation: Activation function (callable). Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: Initializer function for the weight matrix. If `None` (default), weights are initialized using the default initializer used by `tf.compat.v1.get_variable`. bias_initializer: Initializer function for the bias. kernel_regularizer: Regularizer function for the weight matrix. bias_regularizer: Regularizer function for the bias. activity_regularizer: Regularizer function for the output. kernel_constraint: An optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: An optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. _reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Properties: units: Python integer, dimensionality of the output space. activation: Activation function (callable). use_bias: Boolean, whether the layer uses a bias. kernel_initializer: Initializer instance (or name) for the kernel matrix. bias_initializer: Initializer instance (or name) for the bias. kernel_regularizer: Regularizer instance for the kernel matrix (callable) bias_regularizer: Regularizer instance for the bias (callable). activity_regularizer: Regularizer instance for the output (callable) kernel_constraint: Constraint function for the kernel matrix. bias_constraint: Constraint function for the bias. kernel: Weight matrix (TensorFlow variable or tensor). bias: Bias vector, if applicable (TensorFlow variable or tensor). """ def __init__(self, units, activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(Dense, self).__init__(units=units, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @deprecation.deprecated( date=None, instructions='Use keras.layers.Dense instead.') @tf_export(v1=['layers.dense']) def dense( inputs, units, activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for the densely-connected layer. This layer implements the operation: `outputs = activation(inputs * kernel + bias)` where `activation` is the activation function passed as the `activation` argument (if not `None`), `kernel` is a weights matrix created by the layer, and `bias` is a bias vector created by the layer (only if `use_bias` is `True`). Arguments: inputs: Tensor input. units: Integer or Long, dimensionality of the output space. activation: Activation function (callable). Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: Initializer function for the weight matrix. If `None` (default), weights are initialized using the default initializer used by `tf.compat.v1.get_variable`. bias_initializer: Initializer function for the bias. kernel_regularizer: Regularizer function for the weight matrix. bias_regularizer: Regularizer function for the bias. activity_regularizer: Regularizer function for the output. kernel_constraint: An optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: An optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: String, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor the same shape as `inputs` except the last dimension is of size `units`. Raises: ValueError: if eager execution is enabled. """ layer = Dense(units, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _scope=name, _reuse=reuse) return layer.apply(inputs) @tf_export(v1=['layers.Dropout']) class Dropout(keras_layers.Dropout, base.Layer): """Applies Dropout to the input. Dropout consists in randomly setting a fraction `rate` of input units to 0 at each update during training time, which helps prevent overfitting. The units that are kept are scaled by `1 / (1 - rate)`, so that their sum is unchanged at training time and inference time. Arguments: rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out 10% of input units. noise_shape: 1D tensor of type `int32` representing the shape of the binary dropout mask that will be multiplied with the input. For instance, if your inputs have shape `(batch_size, timesteps, features)`, and you want the dropout mask to be the same for all timesteps, you can use `noise_shape=[batch_size, 1, features]`. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed`. for behavior. name: The name of the layer (string). """ def __init__(self, rate=0.5, noise_shape=None, seed=None, name=None, **kwargs): super(Dropout, self).__init__(rate=rate, noise_shape=noise_shape, seed=seed, name=name, **kwargs) def call(self, inputs, training=False): return super(Dropout, self).call(inputs, training=training) @deprecation.deprecated( date=None, instructions='Use keras.layers.dropout instead.') @tf_export(v1=['layers.dropout']) def dropout(inputs, rate=0.5, noise_shape=None, seed=None, training=False, name=None): """Applies Dropout to the input. Dropout consists in randomly setting a fraction `rate` of input units to 0 at each update during training time, which helps prevent overfitting. The units that are kept are scaled by `1 / (1 - rate)`, so that their sum is unchanged at training time and inference time. Arguments: inputs: Tensor input. rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out 10% of input units. noise_shape: 1D tensor of type `int32` representing the shape of the binary dropout mask that will be multiplied with the input. For instance, if your inputs have shape `(batch_size, timesteps, features)`, and you want the dropout mask to be the same for all timesteps, you can use `noise_shape=[batch_size, 1, features]`. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. training: Either a Python boolean, or a TensorFlow boolean scalar tensor (e.g. a placeholder). Whether to return the output in training mode (apply dropout) or in inference mode (return the input untouched). name: The name of the layer (string). Returns: Output tensor. Raises: ValueError: if eager execution is enabled. """ layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name) return layer.apply(inputs, training=training) @tf_export(v1=['layers.Flatten']) class Flatten(keras_layers.Flatten, base.Layer): """Flattens an input tensor while preserving the batch axis (axis 0). Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, ..., channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, ...)`. Examples: ``` x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32') y = Flatten()(x) # now `y` has shape `(None, 16)` x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32') y = Flatten()(x) # now `y` has shape `(None, None)` ``` """ pass @deprecation.deprecated( date=None, instructions='Use keras.layers.flatten instead.') @tf_export(v1=['layers.flatten']) def flatten(inputs, name=None, data_format='channels_last'): """Flattens an input tensor while preserving the batch axis (axis 0). Arguments: inputs: Tensor input. name: The name of the layer (string). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. Returns: Reshaped tensor. Examples: ``` x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32') y = flatten(x) # now `y` has shape `(None, 16)` x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32') y = flatten(x) # now `y` has shape `(None, None)` ``` """ layer = Flatten(name=name, data_format=data_format) return layer.apply(inputs) # Aliases FullyConnected = Dense fully_connected = dense
tensorflow-r1.15.5-nv23.03
tensorflow/python/layers/core.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.layers.core.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.layers import core as core_layers from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test class DenseTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def testDenseProperties(self): dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense') self.assertEqual(dense.units, 2) self.assertEqual(dense.activation, nn_ops.relu) self.assertEqual(dense.kernel_regularizer, None) self.assertEqual(dense.bias_regularizer, None) self.assertEqual(dense.activity_regularizer, None) self.assertEqual(dense.use_bias, True) # Test auto-naming dense = core_layers.Dense(2, activation=nn_ops.relu) dense.apply(random_ops.random_uniform((5, 2))) self.assertEqual(dense.name, 'dense_1') dense = core_layers.Dense(2, activation=nn_ops.relu) dense.apply(random_ops.random_uniform((5, 2))) self.assertEqual(dense.name, 'dense_2') @test_util.run_deprecated_v1 def testVariableInput(self): with self.cached_session(): v = variable_scope.get_variable( 'X', initializer=init_ops.zeros_initializer(), shape=(1, 1)) x = core_layers.Dense(1)(v) variables.global_variables_initializer().run() self.assertAllEqual(x.eval(), [[0.0]]) @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testCall(self): dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense') inputs = random_ops.random_uniform((5, 4), seed=1) outputs = dense(inputs) self.assertListEqual([5, 2], outputs.get_shape().as_list()) self.assertListEqual(dense.variables, [dense.kernel, dense.bias]) self.assertListEqual(dense.trainable_variables, [dense.kernel, dense.bias]) self.assertListEqual(dense.non_trainable_variables, []) if not context.executing_eagerly(): self.assertEqual( len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2) self.assertEqual(dense.kernel.name, 'my_dense/kernel:0') self.assertEqual(dense.bias.name, 'my_dense/bias:0') @test_util.assert_no_new_pyobjects_executing_eagerly def testNoEagerLeak(self): # Tests that repeatedly constructing and building a Layer does not leak # Python objects. inputs = random_ops.random_uniform((5, 4), seed=1) core_layers.Dense(5)(inputs) core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')(inputs) @test_util.run_in_graph_and_eager_modes def testCallTensorDot(self): dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense') inputs = random_ops.random_uniform((5, 4, 3), seed=1) outputs = dense(inputs) self.assertListEqual([5, 4, 2], outputs.get_shape().as_list()) @test_util.run_in_graph_and_eager_modes def testNoBias(self): dense = core_layers.Dense(2, use_bias=False, name='my_dense') inputs = random_ops.random_uniform((5, 2), seed=1) _ = dense(inputs) self.assertListEqual(dense.variables, [dense.kernel]) self.assertListEqual(dense.trainable_variables, [dense.kernel]) self.assertListEqual(dense.non_trainable_variables, []) if not context.executing_eagerly(): self.assertEqual( len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1) self.assertEqual(dense.kernel.name, 'my_dense/kernel:0') self.assertEqual(dense.bias, None) @test_util.run_in_graph_and_eager_modes def testNonTrainable(self): dense = core_layers.Dense(2, trainable=False, name='my_dense') inputs = random_ops.random_uniform((5, 2), seed=1) _ = dense(inputs) self.assertListEqual(dense.variables, [dense.kernel, dense.bias]) self.assertListEqual(dense.non_trainable_variables, [dense.kernel, dense.bias]) self.assertListEqual(dense.trainable_variables, []) if not context.executing_eagerly(): self.assertEqual( len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 0) @test_util.run_in_graph_and_eager_modes def testOutputShape(self): dense = core_layers.Dense(7, activation=nn_ops.relu, name='my_dense') inputs = random_ops.random_uniform((5, 3), seed=1) outputs = dense.apply(inputs) self.assertEqual(outputs.get_shape().as_list(), [5, 7]) inputs = random_ops.random_uniform((5, 2, 3), seed=1) outputs = dense(inputs) self.assertEqual(outputs.get_shape().as_list(), [5, 2, 7]) inputs = random_ops.random_uniform((1, 2, 4, 3), seed=1) outputs = dense.apply(inputs) self.assertEqual(outputs.get_shape().as_list(), [1, 2, 4, 7]) @test_util.run_deprecated_v1 def testCallOnPlaceHolder(self): inputs = array_ops.placeholder(dtype=dtypes.float32) dense = core_layers.Dense(4, name='my_dense') with self.assertRaises(ValueError): dense(inputs) inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None]) dense = core_layers.Dense(4, name='my_dense') with self.assertRaises(ValueError): dense(inputs) inputs = array_ops.placeholder( dtype=dtypes.float32, shape=[None, None, None]) dense = core_layers.Dense(4, name='my_dense') with self.assertRaises(ValueError): dense(inputs) inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3]) dense = core_layers.Dense(4, name='my_dense') dense(inputs) inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None, 3]) dense = core_layers.Dense(4, name='my_dense') dense(inputs) @test_util.run_in_graph_and_eager_modes def testActivation(self): dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1') inputs = random_ops.random_uniform((5, 3), seed=1) outputs = dense(inputs) if not context.executing_eagerly(): self.assertEqual(outputs.op.name, 'dense1/Relu') dense = core_layers.Dense(2, name='dense2') inputs = random_ops.random_uniform((5, 3), seed=1) outputs = dense(inputs) if not context.executing_eagerly(): self.assertEqual(outputs.op.name, 'dense2/BiasAdd') @test_util.run_deprecated_v1 def testActivityRegularizer(self): regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3 dense = core_layers.Dense( 2, name='my_dense', activity_regularizer=regularizer) inputs = random_ops.random_uniform((5, 3), seed=1) _ = dense(inputs) loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.assertListEqual(dense.losses, loss_keys) @test_util.run_deprecated_v1 def testKernelRegularizer(self): regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3 dense = core_layers.Dense( 2, name='my_dense', kernel_regularizer=regularizer) inputs = random_ops.random_uniform((5, 3), seed=1) _ = dense(inputs) loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in dense.variables]) self.assertAllEqual(self.evaluate(dense.losses), self.evaluate(loss_keys)) @test_util.run_deprecated_v1 def testKernelRegularizerWithReuse(self): regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3 inputs = random_ops.random_uniform((5, 3), seed=1) _ = core_layers.dense( inputs, 2, name='my_dense', kernel_regularizer=regularizer) self.assertEqual( len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1) _ = core_layers.dense( inputs, 2, name='my_dense', kernel_regularizer=regularizer, reuse=True) self.assertEqual( len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1) @test_util.run_deprecated_v1 def testBiasRegularizer(self): regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3 dense = core_layers.Dense(2, name='my_dense', bias_regularizer=regularizer) inputs = random_ops.random_uniform((5, 3), seed=1) _ = dense(inputs) loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in dense.variables]) self.assertAllEqual(self.evaluate(dense.losses), self.evaluate(loss_keys)) @test_util.run_deprecated_v1 def testFunctionalDense(self): with self.cached_session(): inputs = random_ops.random_uniform((5, 3), seed=1) outputs = core_layers.dense( inputs, 2, activation=nn_ops.relu, name='my_dense') self.assertEqual( len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2) self.assertEqual(outputs.op.name, 'my_dense/Relu') @test_util.run_deprecated_v1 def testFunctionalDenseTwice(self): inputs = random_ops.random_uniform((5, 3), seed=1) core_layers.dense(inputs, 2) vars1 = _get_variable_dict_from_varstore().values() core_layers.dense(inputs, 2) vars2 = _get_variable_dict_from_varstore().values() self.assertEqual(len(vars1), 2) self.assertEqual(len(vars2), 4) # TODO(alive): get this to work in eager mode. def testFunctionalDenseTwiceReuse(self): with self.cached_session(): inputs = random_ops.random_uniform((5, 3), seed=1) core_layers.dense(inputs, 2, name='my_dense') vars1 = variables.trainable_variables() core_layers.dense(inputs, 2, name='my_dense', reuse=True) vars2 = variables.trainable_variables() self.assertEqual(vars1, vars2) # TODO(alive): get this to work in eager mode. def testFunctionalDenseTwiceReuseFromScope(self): with self.cached_session(): with variable_scope.variable_scope('scope'): inputs = random_ops.random_uniform((5, 3), seed=1) core_layers.dense(inputs, 2, name='my_dense') vars1 = variables.trainable_variables() with variable_scope.variable_scope('scope', reuse=True): core_layers.dense(inputs, 2, name='my_dense') vars2 = variables.trainable_variables() self.assertEqual(vars1, vars2) @test_util.run_deprecated_v1 def testFunctionalDenseInitializerFromScope(self): with variable_scope.variable_scope( 'scope', initializer=init_ops.ones_initializer()), self.cached_session(): inputs = random_ops.random_uniform((5, 3), seed=1) core_layers.dense(inputs, 2) variables.global_variables_initializer().run() weights = _get_variable_dict_from_varstore() self.assertEqual(len(weights), 2) # Check that the matrix weights got initialized to ones (from scope). self.assertAllClose(weights['scope/dense/kernel'].read_value().eval(), np.ones((3, 2))) # Check that the bias still got initialized to zeros. self.assertAllClose(weights['scope/dense/bias'].read_value().eval(), np.zeros((2))) def testEagerExecution(self): with context.eager_mode(): container = variable_scope.EagerVariableStore() x = constant_op.constant([[2.0]]) with container.as_default(): y = core_layers.dense( x, 1, name='my_dense', kernel_initializer=init_ops.ones_initializer()) self.assertAllEqual(y, [[2.0]]) self.assertEqual(len(container.variables()), 2) # Recreate the layer to test reuse. with container.as_default(): core_layers.dense( x, 1, name='my_dense', kernel_initializer=init_ops.ones_initializer()) self.assertEqual(len(container.variables()), 2) def testFunctionalDenseWithCustomGetter(self): called = [0] def custom_getter(getter, *args, **kwargs): called[0] += 1 return getter(*args, **kwargs) with variable_scope.variable_scope('test', custom_getter=custom_getter): inputs = random_ops.random_uniform((5, 3), seed=1) core_layers.dense(inputs, 2) self.assertEqual(called[0], 2) @test_util.run_deprecated_v1 def testFunctionalDenseInScope(self): with self.cached_session(): with variable_scope.variable_scope('test'): inputs = random_ops.random_uniform((5, 3), seed=1) core_layers.dense(inputs, 2, name='my_dense') var_dict = _get_variable_dict_from_varstore() var_key = 'test/my_dense/kernel' self.assertEqual(var_dict[var_key].name, '%s:0' % var_key) with variable_scope.variable_scope('test1') as scope: inputs = random_ops.random_uniform((5, 3), seed=1) core_layers.dense(inputs, 2, name=scope) var_dict = _get_variable_dict_from_varstore() var_key = 'test1/kernel' self.assertEqual(var_dict[var_key].name, '%s:0' % var_key) with variable_scope.variable_scope('test2'): inputs = random_ops.random_uniform((5, 3), seed=1) core_layers.dense(inputs, 2) var_dict = _get_variable_dict_from_varstore() var_key = 'test2/dense/kernel' self.assertEqual(var_dict[var_key].name, '%s:0' % var_key) @test_util.run_in_graph_and_eager_modes def testComputeOutputShape(self): dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1') ts = tensor_shape.TensorShape # pylint: disable=protected-access with self.assertRaises(ValueError): dense.compute_output_shape(ts(None)) with self.assertRaises(ValueError): dense.compute_output_shape(ts([])) with self.assertRaises(ValueError): dense.compute_output_shape(ts([1])) self.assertEqual( [None, 2], dense.compute_output_shape((None, 3)).as_list()) self.assertEqual( [None, 2], dense.compute_output_shape(ts([None, 3])).as_list()) self.assertEqual( [None, 4, 2], dense.compute_output_shape(ts([None, 4, 3])).as_list()) # pylint: enable=protected-access @test_util.run_in_graph_and_eager_modes def testConstraints(self): k_constraint = lambda x: x / math_ops.reduce_sum(x) b_constraint = lambda x: x / math_ops.reduce_max(x) dense = core_layers.Dense(2, kernel_constraint=k_constraint, bias_constraint=b_constraint) inputs = random_ops.random_uniform((5, 3), seed=1) dense(inputs) self.assertEqual(dense.kernel_constraint, k_constraint) self.assertEqual(dense.bias_constraint, b_constraint) def _get_variable_dict_from_varstore(): var_dict = variable_scope._get_default_variable_store()._vars # pylint: disable=protected-access sorted_var_dict = collections.OrderedDict( sorted(var_dict.items(), key=lambda t: t[0])) return sorted_var_dict class DropoutTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def testDropoutProperties(self): dp = core_layers.Dropout(0.5, name='dropout') self.assertEqual(dp.rate, 0.5) self.assertEqual(dp.noise_shape, None) dp.apply(array_ops.ones(())) self.assertEqual(dp.name, 'dropout') @test_util.run_in_graph_and_eager_modes def testBooleanLearningPhase(self): dp = core_layers.Dropout(0.5) inputs = array_ops.ones((5, 3)) dropped = dp.apply(inputs, training=True) if not context.executing_eagerly(): self.evaluate(variables.global_variables_initializer()) np_output = self.evaluate(dropped) self.assertAlmostEqual(0., np_output.min()) dropped = dp.apply(inputs, training=False) np_output = self.evaluate(dropped) self.assertAllClose(np.ones((5, 3)), np_output) @test_util.run_deprecated_v1 def testDynamicLearningPhase(self): with self.cached_session() as sess: dp = core_layers.Dropout(0.5, seed=1) inputs = array_ops.ones((5, 5)) training = array_ops.placeholder(dtype='bool') dropped = dp.apply(inputs, training=training) self.evaluate(variables.global_variables_initializer()) np_output = sess.run(dropped, feed_dict={training: True}) self.assertAlmostEqual(0., np_output.min()) np_output = sess.run(dropped, feed_dict={training: False}) self.assertAllClose(np.ones((5, 5)), np_output) @test_util.run_in_graph_and_eager_modes def testDynamicNoiseShape(self): inputs = array_ops.ones((5, 3, 2)) noise_shape = [None, 1, None] dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1) dropped = dp.apply(inputs, training=True) self.evaluate(variables.global_variables_initializer()) np_output = self.evaluate(dropped) self.assertAlmostEqual(0., np_output.min()) self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :]) def testCustomNoiseShape(self): inputs = array_ops.ones((5, 3, 2)) noise_shape = [5, 1, 2] dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1) dropped = dp.apply(inputs, training=True) self.evaluate(variables.global_variables_initializer()) np_output = self.evaluate(dropped) self.assertAlmostEqual(0., np_output.min()) self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :]) @test_util.run_deprecated_v1 def testFunctionalDropout(self): with self.cached_session(): inputs = array_ops.ones((5, 5)) dropped = core_layers.dropout(inputs, 0.5, training=True, seed=1) variables.global_variables_initializer().run() np_output = self.evaluate(dropped) self.assertAlmostEqual(0., np_output.min()) dropped = core_layers.dropout(inputs, 0.5, training=False, seed=1) np_output = self.evaluate(dropped) self.assertAllClose(np.ones((5, 5)), np_output) @test_util.run_deprecated_v1 def testDynamicRate(self): with self.cached_session() as sess: rate = array_ops.placeholder(dtype='float32', name='rate') dp = core_layers.Dropout(rate, name='dropout') inputs = array_ops.ones((5, 5)) dropped = dp.apply(inputs, training=True) self.evaluate(variables.global_variables_initializer()) np_output = sess.run(dropped, feed_dict={rate: 0.5}) self.assertAlmostEqual(0., np_output.min()) np_output = sess.run(dropped, feed_dict={rate: 0.0}) self.assertAllClose(np.ones((5, 5)), np_output) class FlattenTest(test.TestCase): @test_util.run_deprecated_v1 def testCreateFlatten(self): with self.cached_session() as sess: x = array_ops.placeholder(shape=(None, 2, 3), dtype='float32') y = core_layers.Flatten()(x) np_output = sess.run(y, feed_dict={x: np.zeros((3, 2, 3))}) self.assertEqual(list(np_output.shape), [3, 6]) self.assertEqual(y.get_shape().as_list(), [None, 6]) x = array_ops.placeholder(shape=(1, 2, 3, 2), dtype='float32') y = core_layers.Flatten()(x) np_output = sess.run(y, feed_dict={x: np.zeros((1, 2, 3, 2))}) self.assertEqual(list(np_output.shape), [1, 12]) self.assertEqual(y.get_shape().as_list(), [1, 12]) def testComputeShape(self): shape = core_layers.Flatten().compute_output_shape((1, 2, 3, 2)) self.assertEqual(shape.as_list(), [1, 12]) shape = core_layers.Flatten().compute_output_shape((None, 3, 2)) self.assertEqual(shape.as_list(), [None, 6]) shape = core_layers.Flatten().compute_output_shape((None, 3, None)) self.assertEqual(shape.as_list(), [None, None]) @test_util.run_deprecated_v1 def testDataFormat5d(self): np_input_channels_last = np.arange( 120, dtype='float32').reshape([1, 5, 4, 3, 2]) with self.test_session() as sess: x = array_ops.placeholder(shape=(1, 5, 4, 3, 2), dtype='float32') y = core_layers.Flatten(data_format='channels_last')(x) np_output_cl = sess.run(y, feed_dict={x: np_input_channels_last}) x = array_ops.placeholder(shape=(1, 2, 5, 4, 3), dtype='float32') y = core_layers.Flatten(data_format='channels_first')(x) np_input_channels_first = np.transpose(np_input_channels_last, [0, 4, 1, 2, 3]) np_output_cf = sess.run(y, feed_dict={x: np_input_channels_first}) self.assertAllEqual(np_output_cl, np_output_cf) @test_util.run_deprecated_v1 def testDataFormat4d(self): np_input_channels_last = np.arange( 24, dtype='float32').reshape([1, 4, 3, 2]) with self.test_session() as sess: x = array_ops.placeholder(shape=(1, 4, 3, 2), dtype='float32') y = core_layers.Flatten(data_format='channels_last')(x) np_output_cl = sess.run(y, feed_dict={x: np_input_channels_last}) x = array_ops.placeholder(shape=(1, 2, 4, 3), dtype='float32') y = core_layers.Flatten(data_format='channels_first')(x) np_input_channels_first = np.transpose(np_input_channels_last, [0, 3, 1, 2]) np_output_cf = sess.run(y, feed_dict={x: np_input_channels_first}) self.assertAllEqual(np_output_cl, np_output_cf) @test_util.run_deprecated_v1 def testFunctionalFlatten(self): x = array_ops.placeholder(shape=(None, 2, 3), dtype='float32') y = core_layers.flatten(x, name='flatten') self.assertEqual(y.get_shape().as_list(), [None, 6]) @test_util.run_deprecated_v1 def testFlatten0D(self): x = array_ops.placeholder(shape=(None,), dtype='float32') y = core_layers.Flatten()(x) with self.cached_session() as sess: np_output = sess.run(y, feed_dict={x: np.zeros((5,))}) self.assertEqual(list(np_output.shape), [5, 1]) self.assertEqual(y.shape.as_list(), [None, 1]) @test_util.run_deprecated_v1 def testFlattenUnknownAxes(self): with self.cached_session() as sess: x = array_ops.placeholder(shape=(5, None, None), dtype='float32') y = core_layers.Flatten()(x) np_output = sess.run(y, feed_dict={x: np.zeros((5, 2, 3))}) self.assertEqual(list(np_output.shape), [5, 6]) self.assertEqual(y.get_shape().as_list(), [5, None]) x = array_ops.placeholder(shape=(5, None, 2), dtype='float32') y = core_layers.Flatten()(x) np_output = sess.run(y, feed_dict={x: np.zeros((5, 3, 2))}) self.assertEqual(list(np_output.shape), [5, 6]) self.assertEqual(y.get_shape().as_list(), [5, None]) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/layers/core_test.py