python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """MaskedAutoregressiveFlow bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.layers import core as layers from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import template as template_ops from tensorflow.python.ops import variable_scope as variable_scope_lib from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "MaskedAutoregressiveFlow", "masked_autoregressive_default_template", "masked_dense", ] class MaskedAutoregressiveFlow(bijector.Bijector): """Affine MaskedAutoregressiveFlow bijector for vector-valued events. The affine autoregressive flow [(Papamakarios et al., 2016)][3] provides a relatively simple framework for user-specified (deep) architectures to learn a distribution over vector-valued events. Regarding terminology, "Autoregressive models decompose the joint density as a product of conditionals, and model each conditional in turn. Normalizing flows transform a base density (e.g. a standard Gaussian) into the target density by an invertible transformation with tractable Jacobian." [(Papamakarios et al., 2016)][3] In other words, the "autoregressive property" is equivalent to the decomposition, `p(x) = prod{ p(x[i] | x[0:i]) : i=0, ..., d }`. The provided `shift_and_log_scale_fn`, `masked_autoregressive_default_template`, achieves this property by zeroing out weights in its `masked_dense` layers. In the `tfp` framework, a "normalizing flow" is implemented as a `tfp.bijectors.Bijector`. The `forward` "autoregression" is implemented using a `tf.while_loop` and a deep neural network (DNN) with masked weights such that the autoregressive property is automatically met in the `inverse`. A `TransformedDistribution` using `MaskedAutoregressiveFlow(...)` uses the (expensive) forward-mode calculation to draw samples and the (cheap) reverse-mode calculation to compute log-probabilities. Conversely, a `TransformedDistribution` using `Invert(MaskedAutoregressiveFlow(...))` uses the (expensive) forward-mode calculation to compute log-probabilities and the (cheap) reverse-mode calculation to compute samples. See "Example Use" [below] for more details. Given a `shift_and_log_scale_fn`, the forward and inverse transformations are (a sequence of) affine transformations. A "valid" `shift_and_log_scale_fn` must compute each `shift` (aka `loc` or "mu" in [Germain et al. (2015)][1]) and `log(scale)` (aka "alpha" in [Germain et al. (2015)][1]) such that each are broadcastable with the arguments to `forward` and `inverse`, i.e., such that the calculations in `forward`, `inverse` [below] are possible. For convenience, `masked_autoregressive_default_template` is offered as a possible `shift_and_log_scale_fn` function. It implements the MADE architecture [(Germain et al., 2015)][1]. MADE is a feed-forward network that computes a `shift` and `log(scale)` using `masked_dense` layers in a deep neural network. Weights are masked to ensure the autoregressive property. It is possible that this architecture is suboptimal for your task. To build alternative networks, either change the arguments to `masked_autoregressive_default_template`, use the `masked_dense` function to roll-out your own, or use some other architecture, e.g., using `tf.layers`. Warning: no attempt is made to validate that the `shift_and_log_scale_fn` enforces the "autoregressive property". Assuming `shift_and_log_scale_fn` has valid shape and autoregressive semantics, the forward transformation is ```python def forward(x): y = zeros_like(x) event_size = x.shape[-1] for _ in range(event_size): shift, log_scale = shift_and_log_scale_fn(y) y = x * math_ops.exp(log_scale) + shift return y ``` and the inverse transformation is ```python def inverse(y): shift, log_scale = shift_and_log_scale_fn(y) return (y - shift) / math_ops.exp(log_scale) ``` Notice that the `inverse` does not need a for-loop. This is because in the forward pass each calculation of `shift` and `log_scale` is based on the `y` calculated so far (not `x`). In the `inverse`, the `y` is fully known, thus is equivalent to the scaling used in `forward` after `event_size` passes, i.e., the "last" `y` used to compute `shift`, `log_scale`. (Roughly speaking, this also proves the transform is bijective.) #### Examples ```python import tensorflow_probability as tfp tfd = tfp.distributions tfb = tfp.bijectors dims = 5 # A common choice for a normalizing flow is to use a Gaussian for the base # distribution. (However, any continuous distribution would work.) E.g., maf = tfd.TransformedDistribution( distribution=tfd.Normal(loc=0., scale=1.), bijector=tfb.MaskedAutoregressiveFlow( shift_and_log_scale_fn=tfb.masked_autoregressive_default_template( hidden_layers=[512, 512])), event_shape=[dims]) x = maf.sample() # Expensive; uses `tf.while_loop`, no Bijector caching. maf.log_prob(x) # Almost free; uses Bijector caching. maf.log_prob(0.) # Cheap; no `tf.while_loop` despite no Bijector caching. # [Papamakarios et al. (2016)][3] also describe an Inverse Autoregressive # Flow [(Kingma et al., 2016)][2]: iaf = tfd.TransformedDistribution( distribution=tfd.Normal(loc=0., scale=1.), bijector=tfb.Invert(tfb.MaskedAutoregressiveFlow( shift_and_log_scale_fn=tfb.masked_autoregressive_default_template( hidden_layers=[512, 512]))), event_shape=[dims]) x = iaf.sample() # Cheap; no `tf.while_loop` despite no Bijector caching. iaf.log_prob(x) # Almost free; uses Bijector caching. iaf.log_prob(0.) # Expensive; uses `tf.while_loop`, no Bijector caching. # In many (if not most) cases the default `shift_and_log_scale_fn` will be a # poor choice. Here's an example of using a "shift only" version and with a # different number/depth of hidden layers. shift_only = True maf_no_scale_hidden2 = tfd.TransformedDistribution( distribution=tfd.Normal(loc=0., scale=1.), bijector=tfb.MaskedAutoregressiveFlow( tfb.masked_autoregressive_default_template( hidden_layers=[32], shift_only=shift_only), is_constant_jacobian=shift_only), event_shape=[dims]) ``` #### References [1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE: Masked Autoencoder for Distribution Estimation. In _International Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509 [2]: Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, and Max Welling. Improving Variational Inference with Inverse Autoregressive Flow. In _Neural Information Processing Systems_, 2016. https://arxiv.org/abs/1606.04934 [3]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked Autoregressive Flow for Density Estimation. In _Neural Information Processing Systems_, 2017. https://arxiv.org/abs/1705.07057 """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, shift_and_log_scale_fn, is_constant_jacobian=False, validate_args=False, unroll_loop=False, name=None): """Creates the MaskedAutoregressiveFlow bijector. Args: shift_and_log_scale_fn: Python `callable` which computes `shift` and `log_scale` from both the forward domain (`x`) and the inverse domain (`y`). Calculation must respect the "autoregressive property" (see class docstring). Suggested default `masked_autoregressive_default_template(hidden_layers=...)`. Typically the function contains `tf.Variables` and is wrapped using `tf.compat.v1.make_template`. Returning `None` for either (both) `shift`, `log_scale` is equivalent to (but more efficient than) returning zero. is_constant_jacobian: Python `bool`. Default: `False`. When `True` the implementation assumes `log_scale` does not depend on the forward domain (`x`) or inverse domain (`y`) values. (No validation is made; `is_constant_jacobian=False` is always safe but possibly computationally inefficient.) validate_args: Python `bool` indicating whether arguments should be checked for correctness. unroll_loop: Python `bool` indicating whether the `tf.while_loop` in `_forward` should be replaced with a static for loop. Requires that the final dimension of `x` be known at graph construction time. Defaults to `False`. name: Python `str`, name given to ops managed by this object. """ name = name or "masked_autoregressive_flow" self._shift_and_log_scale_fn = shift_and_log_scale_fn self._unroll_loop = unroll_loop super(MaskedAutoregressiveFlow, self).__init__( forward_min_event_ndims=1, is_constant_jacobian=is_constant_jacobian, validate_args=validate_args, name=name) def _forward(self, x): if self._unroll_loop: event_size = tensor_shape.dimension_value( x.shape.with_rank_at_least(1)[-1]) if event_size is None: raise ValueError( "The final dimension of `x` must be known at graph construction " "time if `unroll_loop=True`. `x.shape: %r`" % x.shape) y = array_ops.zeros_like(x, name="y0") for _ in range(event_size): shift, log_scale = self._shift_and_log_scale_fn(y) # next_y = scale * x + shift next_y = x if log_scale is not None: next_y *= math_ops.exp(log_scale) if shift is not None: next_y += shift y = next_y return y event_size = array_ops.shape(x)[-1] # If the event size is available at graph construction time, we can inform # the graph compiler of the maximum number of steps. If not, # static_event_size will be None, and the maximum_iterations argument will # have no effect. static_event_size = tensor_shape.dimension_value( x.shape.with_rank_at_least(1)[-1]) y0 = array_ops.zeros_like(x, name="y0") # call the template once to ensure creation _ = self._shift_and_log_scale_fn(y0) def _loop_body(index, y0): """While-loop body for autoregression calculation.""" # Set caching device to avoid re-getting the tf.Variable for every while # loop iteration. with variable_scope_lib.variable_scope( variable_scope_lib.get_variable_scope()) as vs: if vs.caching_device is None: vs.set_caching_device(lambda op: op.device) shift, log_scale = self._shift_and_log_scale_fn(y0) y = x if log_scale is not None: y *= math_ops.exp(log_scale) if shift is not None: y += shift return index + 1, y _, y = control_flow_ops.while_loop( cond=lambda index, _: index < event_size, body=_loop_body, loop_vars=(0, y0), maximum_iterations=static_event_size) return y def _inverse(self, y): shift, log_scale = self._shift_and_log_scale_fn(y) x = y if shift is not None: x -= shift if log_scale is not None: x *= math_ops.exp(-log_scale) return x def _inverse_log_det_jacobian(self, y): _, log_scale = self._shift_and_log_scale_fn(y) if log_scale is None: return constant_op.constant(0., dtype=y.dtype, name="ildj") return -math_ops.reduce_sum(log_scale, axis=-1) MASK_INCLUSIVE = "inclusive" MASK_EXCLUSIVE = "exclusive" @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def _gen_slices(num_blocks, n_in, n_out, mask_type=MASK_EXCLUSIVE): """Generate the slices for building an autoregressive mask.""" # TODO(b/67594795): Better support of dynamic shape. slices = [] col = 0 d_in = n_in // num_blocks d_out = n_out // num_blocks row = d_out if mask_type == MASK_EXCLUSIVE else 0 for _ in range(num_blocks): row_slice = slice(row, None) col_slice = slice(col, col + d_in) slices.append([row_slice, col_slice]) col += d_in row += d_out return slices @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def _gen_mask(num_blocks, n_in, n_out, mask_type=MASK_EXCLUSIVE, dtype=dtypes.float32): """Generate the mask for building an autoregressive dense layer.""" # TODO(b/67594795): Better support of dynamic shape. mask = np.zeros([n_out, n_in], dtype=dtype.as_numpy_dtype()) slices = _gen_slices(num_blocks, n_in, n_out, mask_type=mask_type) for [row_slice, col_slice] in slices: mask[row_slice, col_slice] = 1 return mask @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def masked_dense(inputs, units, num_blocks=None, exclusive=False, kernel_initializer=None, reuse=None, name=None, *args, **kwargs): """A autoregressively masked dense layer. Analogous to `tf.compat.v1.layers.dense`. See [Germain et al. (2015)][1] for detailed explanation. Arguments: inputs: Tensor input. units: Python `int` scalar representing the dimensionality of the output space. num_blocks: Python `int` scalar representing the number of blocks for the MADE masks. exclusive: Python `bool` scalar representing whether to zero the diagonal of the mask, used for the first layer of a MADE. kernel_initializer: Initializer function for the weight matrix. If `None` (default), weights are initialized using the `tf.glorot_random_initializer`. reuse: Python `bool` scalar representing whether to reuse the weights of a previous layer by the same name. name: Python `str` used to describe ops managed by this function. *args: `tf.compat.v1.layers.dense` arguments. **kwargs: `tf.compat.v1.layers.dense` keyword arguments. Returns: Output tensor. Raises: NotImplementedError: if rightmost dimension of `inputs` is unknown prior to graph execution. #### References [1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE: Masked Autoencoder for Distribution Estimation. In _International Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509 """ # TODO(b/67594795): Better support of dynamic shape. input_depth = tensor_shape.dimension_value( inputs.shape.with_rank_at_least(1)[-1]) if input_depth is None: raise NotImplementedError( "Rightmost dimension must be known prior to graph execution.") mask = _gen_mask(num_blocks, input_depth, units, MASK_EXCLUSIVE if exclusive else MASK_INCLUSIVE).T if kernel_initializer is None: kernel_initializer = init_ops.glorot_normal_initializer() def masked_initializer(shape, dtype=None, partition_info=None): return mask * kernel_initializer(shape, dtype, partition_info) with ops.name_scope(name, "masked_dense", [inputs, units, num_blocks]): layer = layers.Dense( units, kernel_initializer=masked_initializer, kernel_constraint=lambda x: mask * x, name=name, dtype=inputs.dtype.base_dtype, _scope=name, _reuse=reuse, *args, **kwargs) return layer.apply(inputs) @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def masked_autoregressive_default_template( hidden_layers, shift_only=False, activation=nn_ops.relu, log_scale_min_clip=-5., log_scale_max_clip=3., log_scale_clip_gradient=False, name=None, *args, **kwargs): """Build the Masked Autoregressive Density Estimator (Germain et al., 2015). This will be wrapped in a make_template to ensure the variables are only created once. It takes the input and returns the `loc` ("mu" in [Germain et al. (2015)][1]) and `log_scale` ("alpha" in [Germain et al. (2015)][1]) from the MADE network. Warning: This function uses `masked_dense` to create randomly initialized `tf.Variables`. It is presumed that these will be fit, just as you would any other neural architecture which uses `tf.compat.v1.layers.dense`. #### About Hidden Layers Each element of `hidden_layers` should be greater than the `input_depth` (i.e., `input_depth = tf.shape(input)[-1]` where `input` is the input to the neural network). This is necessary to ensure the autoregressivity property. #### About Clipping This function also optionally clips the `log_scale` (but possibly not its gradient). This is useful because if `log_scale` is too small/large it might underflow/overflow making it impossible for the `MaskedAutoregressiveFlow` bijector to implement a bijection. Additionally, the `log_scale_clip_gradient` `bool` indicates whether the gradient should also be clipped. The default does not clip the gradient; this is useful because it still provides gradient information (for fitting) yet solves the numerical stability problem. I.e., `log_scale_clip_gradient = False` means `grad[exp(clip(x))] = grad[x] exp(clip(x))` rather than the usual `grad[clip(x)] exp(clip(x))`. Args: hidden_layers: Python `list`-like of non-negative integer, scalars indicating the number of units in each hidden layer. Default: `[512, 512]. shift_only: Python `bool` indicating if only the `shift` term shall be computed. Default: `False`. activation: Activation function (callable). Explicitly setting to `None` implies a linear activation. log_scale_min_clip: `float`-like scalar `Tensor`, or a `Tensor` with the same shape as `log_scale`. The minimum value to clip by. Default: -5. log_scale_max_clip: `float`-like scalar `Tensor`, or a `Tensor` with the same shape as `log_scale`. The maximum value to clip by. Default: 3. log_scale_clip_gradient: Python `bool` indicating that the gradient of `tf.clip_by_value` should be preserved. Default: `False`. name: A name for ops managed by this function. Default: "masked_autoregressive_default_template". *args: `tf.compat.v1.layers.dense` arguments. **kwargs: `tf.compat.v1.layers.dense` keyword arguments. Returns: shift: `Float`-like `Tensor` of shift terms (the "mu" in [Germain et al. (2015)][1]). log_scale: `Float`-like `Tensor` of log(scale) terms (the "alpha" in [Germain et al. (2015)][1]). Raises: NotImplementedError: if rightmost dimension of `inputs` is unknown prior to graph execution. #### References [1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE: Masked Autoencoder for Distribution Estimation. In _International Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509 """ name = name or "masked_autoregressive_default_template" with ops.name_scope(name, values=[log_scale_min_clip, log_scale_max_clip]): def _fn(x): """MADE parameterized via `masked_autoregressive_default_template`.""" # TODO(b/67594795): Better support of dynamic shape. input_depth = tensor_shape.dimension_value( x.shape.with_rank_at_least(1)[-1]) if input_depth is None: raise NotImplementedError( "Rightmost dimension must be known prior to graph execution.") input_shape = ( np.int32(x.shape.as_list()) if x.shape.is_fully_defined() else array_ops.shape(x)) for i, units in enumerate(hidden_layers): x = masked_dense( inputs=x, units=units, num_blocks=input_depth, exclusive=True if i == 0 else False, activation=activation, *args, **kwargs) x = masked_dense( inputs=x, units=(1 if shift_only else 2) * input_depth, num_blocks=input_depth, activation=None, *args, **kwargs) if shift_only: x = array_ops.reshape(x, shape=input_shape) return x, None x = array_ops.reshape( x, shape=array_ops.concat([input_shape, [2]], axis=0)) shift, log_scale = array_ops.unstack(x, num=2, axis=-1) which_clip = ( math_ops.clip_by_value if log_scale_clip_gradient else _clip_by_value_preserve_grad) log_scale = which_clip(log_scale, log_scale_min_clip, log_scale_max_clip) return shift, log_scale return template_ops.make_template(name, _fn) @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def _clip_by_value_preserve_grad(x, clip_value_min, clip_value_max, name=None): """Clips input while leaving gradient unaltered.""" with ops.name_scope(name, "clip_by_value_preserve_grad", [x, clip_value_min, clip_value_max]): clip_x = clip_ops.clip_by_value(x, clip_value_min, clip_value_max) return x + array_ops.stop_gradient(clip_x - x)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Batch Norm bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import ops from tensorflow.python.layers import normalization from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "BatchNormalization", ] @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def _undo_batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name=None): r"""Inverse of tf.nn.batch_normalization. Args: x: Input `Tensor` of arbitrary dimensionality. mean: A mean `Tensor`. variance: A variance `Tensor`. offset: An offset `Tensor`, often denoted `beta` in equations, or None. If present, will be added to the normalized tensor. scale: A scale `Tensor`, often denoted `gamma` in equations, or `None`. If present, the scale is applied to the normalized tensor. variance_epsilon: A small `float` added to the minibatch `variance` to prevent dividing by zero. name: A name for this operation (optional). Returns: batch_unnormalized: The de-normalized, de-scaled, de-offset `Tensor`. """ with ops.name_scope(name, "undo_batchnorm", [x, mean, variance, scale, offset]): # inv = math_ops.rsqrt(variance + variance_epsilon) # if scale is not None: # inv *= scale # return x * inv + ( # offset - mean * inv if offset is not None else -mean * inv) rescale = math_ops.sqrt(variance + variance_epsilon) if scale is not None: rescale /= scale batch_unnormalized = x * rescale + ( mean - offset * rescale if offset is not None else mean) return batch_unnormalized class BatchNormalization(bijector.Bijector): """Compute `Y = g(X) s.t. X = g^-1(Y) = (Y - mean(Y)) / std(Y)`. Applies Batch Normalization [(Ioffe and Szegedy, 2015)][1] to samples from a data distribution. This can be used to stabilize training of normalizing flows ([Papamakarios et al., 2016][3]; [Dinh et al., 2017][2]) When training Deep Neural Networks (DNNs), it is common practice to normalize or whiten features by shifting them to have zero mean and scaling them to have unit variance. The `inverse()` method of the `BatchNormalization` bijector, which is used in the log-likelihood computation of data samples, implements the normalization procedure (shift-and-scale) using the mean and standard deviation of the current minibatch. Conversely, the `forward()` method of the bijector de-normalizes samples (e.g. `X*std(Y) + mean(Y)` with the running-average mean and standard deviation computed at training-time. De-normalization is useful for sampling. ```python dist = tfd.TransformedDistribution( distribution=tfd.Normal()), bijector=tfb.BatchNorm()) y = tfd.MultivariateNormalDiag(loc=1., scale=2.).sample(100) # ~ N(1, 2) x = dist.bijector.inverse(y) # ~ N(0, 1) y = dist.sample() # ~ N(1, 2) ``` During training time, `BatchNorm.inverse` and `BatchNorm.forward` are not guaranteed to be inverses of each other because `inverse(y)` uses statistics of the current minibatch, while `forward(x)` uses running-average statistics accumulated from training. In other words, `BatchNorm.inverse(BatchNorm.forward(...))` and `BatchNorm.forward(BatchNorm.inverse(...))` will be identical when `training=False` but may be different when `training=True`. #### References [1]: Sergey Ioffe and Christian Szegedy. Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. In _International Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03167 [2]: Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density Estimation using Real NVP. In _International Conference on Learning Representations_, 2017. https://arxiv.org/abs/1605.08803 [3]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked Autoregressive Flow for Density Estimation. In _Neural Information Processing Systems_, 2017. https://arxiv.org/abs/1705.07057 """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, batchnorm_layer=None, training=True, validate_args=False, name="batch_normalization"): """Instantiates the `BatchNorm` bijector. Args: batchnorm_layer: `tf.compat.v1.layers.BatchNormalization` layer object. If `None`, defaults to `tf.compat.v1.layers.BatchNormalization(gamma_constraint=nn_ops.relu(x) + 1e-6)`. This ensures positivity of the scale variable. training: If True, updates running-average statistics during call to `inverse()`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. Raises: ValueError: If bn_layer is not an instance of `tf.compat.v1.layers.BatchNormalization`, or if it is specified with `renorm=True` or a virtual batch size. """ # Scale must be positive. g_constraint = lambda x: nn.relu(x) + 1e-6 self.batchnorm = batchnorm_layer or normalization.BatchNormalization( gamma_constraint=g_constraint) self._validate_bn_layer(self.batchnorm) self._training = training if isinstance(self.batchnorm.axis, int): forward_min_event_ndims = 1 else: forward_min_event_ndims = len(self.batchnorm.axis) super(BatchNormalization, self).__init__( forward_min_event_ndims=forward_min_event_ndims, validate_args=validate_args, name=name) def _validate_bn_layer(self, layer): """Check for valid BatchNormalization layer. Args: layer: Instance of `tf.compat.v1.layers.BatchNormalization`. Raises: ValueError: If batchnorm_layer argument is not an instance of `tf.compat.v1.layers.BatchNormalization`, or if `batchnorm_layer.renorm=True` or if `batchnorm_layer.virtual_batch_size` is specified. """ if not isinstance(layer, normalization.BatchNormalization): raise ValueError( "batchnorm_layer must be an instance of BatchNormalization layer.") if layer.renorm: raise ValueError("BatchNorm Bijector does not support renormalization.") if layer.virtual_batch_size: raise ValueError( "BatchNorm Bijector does not support virtual batch sizes.") def _get_broadcast_fn(self, x): # Compute shape to broadcast scale/shift parameters to. if not x.shape.is_fully_defined(): raise ValueError("Input must have shape known at graph construction.") input_shape = np.int32(x.shape.as_list()) ndims = len(input_shape) reduction_axes = [i for i in range(ndims) if i not in self.batchnorm.axis] # Broadcasting only necessary for single-axis batch norm where the axis is # not the last dimension broadcast_shape = [1] * ndims broadcast_shape[self.batchnorm.axis[0]] = ( input_shape[self.batchnorm.axis[0]]) def _broadcast(v): if (v is not None and len(v.get_shape()) != ndims and reduction_axes != list(range(ndims - 1))): return array_ops.reshape(v, broadcast_shape) return v return _broadcast def _normalize(self, y): return self.batchnorm.apply(y, training=self._training) def _de_normalize(self, x): # Uses the saved statistics. if not self.batchnorm.built: input_shape = x.get_shape() self.batchnorm.build(input_shape) broadcast_fn = self._get_broadcast_fn(x) mean = broadcast_fn(self.batchnorm.moving_mean) variance = broadcast_fn(self.batchnorm.moving_variance) beta = broadcast_fn(self.batchnorm.beta) if self.batchnorm.center else None gamma = broadcast_fn(self.batchnorm.gamma) if self.batchnorm.scale else None return _undo_batch_normalization(x, mean, variance, beta, gamma, self.batchnorm.epsilon) def _forward(self, x): return self._de_normalize(x) def _inverse(self, y): return self._normalize(y) def _forward_log_det_jacobian(self, x): # Uses saved statistics to compute volume distortion. return -self._inverse_log_det_jacobian(x, use_saved_statistics=True) def _inverse_log_det_jacobian(self, y, use_saved_statistics=False): if not y.shape.is_fully_defined(): raise ValueError("Input must have shape known at graph construction.") input_shape = np.int32(y.shape.as_list()) if not self.batchnorm.built: # Create variables. self.batchnorm.build(input_shape) event_dims = self.batchnorm.axis reduction_axes = [i for i in range(len(input_shape)) if i not in event_dims] if use_saved_statistics or not self._training: log_variance = math_ops.log(self.batchnorm.moving_variance + self.batchnorm.epsilon) else: # At training-time, ildj is computed from the mean and log-variance across # the current minibatch. _, v = nn.moments(y, axes=reduction_axes, keepdims=True) log_variance = math_ops.log(v + self.batchnorm.epsilon) # `gamma` and `log Var(y)` reductions over event_dims. # Log(total change in area from gamma term). log_total_gamma = math_ops.reduce_sum(math_ops.log(self.batchnorm.gamma)) # Log(total change in area from log-variance term). log_total_variance = math_ops.reduce_sum(log_variance) # The ildj is scalar, as it does not depend on the values of x and are # constant across minibatch elements. return log_total_gamma - 0.5 * log_total_variance
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/batch_normalization.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """CholeskyOuterProduct bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.ops.distributions import util as distribution_util from tensorflow.python.util import deprecation __all__ = [ "CholeskyOuterProduct", ] class CholeskyOuterProduct(bijector.Bijector): """Compute `g(X) = X @ X.T`; X is lower-triangular, positive-diagonal matrix. Note: the upper-triangular part of X is ignored (whether or not its zero). The surjectivity of g as a map from the set of n x n positive-diagonal lower-triangular matrices to the set of SPD matrices follows immediately from executing the Cholesky factorization algorithm on an SPD matrix A to produce a positive-diagonal lower-triangular matrix L such that `A = L @ L.T`. To prove the injectivity of g, suppose that L_1 and L_2 are lower-triangular with positive diagonals and satisfy `A = L_1 @ L_1.T = L_2 @ L_2.T`. Then `inv(L_1) @ A @ inv(L_1).T = [inv(L_1) @ L_2] @ [inv(L_1) @ L_2].T = I`. Setting `L_3 := inv(L_1) @ L_2`, that L_3 is a positive-diagonal lower-triangular matrix follows from `inv(L_1)` being positive-diagonal lower-triangular (which follows from the diagonal of a triangular matrix being its spectrum), and that the product of two positive-diagonal lower-triangular matrices is another positive-diagonal lower-triangular matrix. A simple inductive argument (proceeding one column of L_3 at a time) shows that, if `I = L_3 @ L_3.T`, with L_3 being lower-triangular with positive- diagonal, then `L_3 = I`. Thus, `L_1 = L_2`, proving injectivity of g. #### Examples ```python bijector.CholeskyOuterProduct().forward(x=[[1., 0], [2, 1]]) # Result: [[1., 2], [2, 5]], i.e., x @ x.T bijector.CholeskyOuterProduct().inverse(y=[[1., 2], [2, 5]]) # Result: [[1., 0], [2, 1]], i.e., cholesky(y). ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, validate_args=False, name="cholesky_outer_product"): """Instantiates the `CholeskyOuterProduct` bijector. Args: validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ self._graph_parents = [] self._name = name super(CholeskyOuterProduct, self).__init__( forward_min_event_ndims=2, validate_args=validate_args, name=name) def _forward(self, x): if self.validate_args: is_matrix = check_ops.assert_rank_at_least(x, 2) shape = array_ops.shape(x) is_square = check_ops.assert_equal(shape[-2], shape[-1]) x = control_flow_ops.with_dependencies([is_matrix, is_square], x) # For safety, explicitly zero-out the upper triangular part. x = array_ops.matrix_band_part(x, -1, 0) return math_ops.matmul(x, x, adjoint_b=True) def _inverse(self, y): return linalg_ops.cholesky(y) def _forward_log_det_jacobian(self, x): # Let Y be a symmetric, positive definite matrix and write: # Y = X X.T # where X is lower-triangular. # # Observe that, # dY[i,j]/dX[a,b] # = d/dX[a,b] { X[i,:] X[j,:] } # = sum_{d=1}^p { I[i=a] I[d=b] X[j,d] + I[j=a] I[d=b] X[i,d] } # # To compute the Jacobian dX/dY we must represent X,Y as vectors. Since Y is # symmetric and X is lower-triangular, we need vectors of dimension: # d = p (p + 1) / 2 # where X, Y are p x p matrices, p > 0. We use a row-major mapping, i.e., # k = { i (i + 1) / 2 + j i>=j # { undef i<j # and assume zero-based indexes. When k is undef, the element is dropped. # Example: # j k # 0 1 2 3 / # 0 [ 0 . . . ] # i 1 [ 1 2 . . ] # 2 [ 3 4 5 . ] # 3 [ 6 7 8 9 ] # Write vec[.] to indicate transforming a matrix to vector via k(i,j). (With # slight abuse: k(i,j)=undef means the element is dropped.) # # We now show d vec[Y] / d vec[X] is lower triangular. Assuming both are # defined, observe that k(i,j) < k(a,b) iff (1) i<a or (2) i=a and j<b. # In both cases dvec[Y]/dvec[X]@[k(i,j),k(a,b)] = 0 since: # (1) j<=i<a thus i,j!=a. # (2) i=a>j thus i,j!=a. # # Since the Jacobian is lower-triangular, we need only compute the product # of diagonal elements: # d vec[Y] / d vec[X] @[k(i,j), k(i,j)] # = X[j,j] + I[i=j] X[i,j] # = 2 X[j,j]. # Since there is a 2 X[j,j] term for every lower-triangular element of X we # conclude: # |Jac(d vec[Y]/d vec[X])| = 2^p prod_{j=0}^{p-1} X[j,j]^{p-j}. diag = array_ops.matrix_diag_part(x) # We now ensure diag is columnar. Eg, if `diag = [1, 2, 3]` then the output # is `[[1], [2], [3]]` and if `diag = [[1, 2, 3], [4, 5, 6]]` then the # output is unchanged. diag = self._make_columnar(diag) if self.validate_args: is_matrix = check_ops.assert_rank_at_least( x, 2, message="Input must be a (batch of) matrix.") shape = array_ops.shape(x) is_square = check_ops.assert_equal( shape[-2], shape[-1], message="Input must be a (batch of) square matrix.") # Assuming lower-triangular means we only need check diag>0. is_positive_definite = check_ops.assert_positive( diag, message="Input must be positive definite.") x = control_flow_ops.with_dependencies( [is_matrix, is_square, is_positive_definite], x) # Create a vector equal to: [p, p-1, ..., 2, 1]. if x.get_shape().ndims is None or x.get_shape().dims[-1].value is None: p_int = array_ops.shape(x)[-1] p_float = math_ops.cast(p_int, dtype=x.dtype) else: p_int = x.get_shape().dims[-1].value p_float = np.array(p_int, dtype=x.dtype.as_numpy_dtype) exponents = math_ops.linspace(p_float, 1., p_int) sum_weighted_log_diag = array_ops.squeeze( math_ops.matmul(math_ops.log(diag), exponents[..., array_ops.newaxis]), axis=-1) fldj = p_float * np.log(2.) + sum_weighted_log_diag # We finally need to undo adding an extra column in non-scalar cases # where there is a single matrix as input. if x.get_shape().ndims is not None: if x.get_shape().ndims == 2: fldj = array_ops.squeeze(fldj, axis=-1) return fldj shape = array_ops.shape(fldj) maybe_squeeze_shape = array_ops.concat([ shape[:-1], distribution_util.pick_vector( math_ops.equal(array_ops.rank(x), 2), np.array([], dtype=np.int32), shape[-1:])], 0) return array_ops.reshape(fldj, maybe_squeeze_shape) def _make_columnar(self, x): """Ensures non-scalar input has at least one column. Example: If `x = [1, 2, 3]` then the output is `[[1], [2], [3]]`. If `x = [[1, 2, 3], [4, 5, 6]]` then the output is unchanged. If `x = 1` then the output is unchanged. Args: x: `Tensor`. Returns: columnar_x: `Tensor` with at least two dimensions. """ if x.get_shape().ndims is not None: if x.get_shape().ndims == 1: x = x[array_ops.newaxis, :] return x shape = array_ops.shape(x) maybe_expanded_shape = array_ops.concat([ shape[:-1], distribution_util.pick_vector( math_ops.equal(array_ops.rank(x), 1), [1], np.array([], dtype=np.int32)), shape[-1:], ], 0) return array_ops.reshape(x, maybe_expanded_shape)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/cholesky_outer_product.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Permutation bijectors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "Permute", ] class Permute(bijector.Bijector): """Permutes the rightmost dimension of a `Tensor`. ```python import tensorflow_probability as tfp tfb = tfp.bijectors reverse = tfb.Permute(permutation=[2, 1, 0]) reverse.forward([-1., 0., 1.]) # ==> [1., 0., -1] reverse.inverse([1., 0., -1]) # ==> [-1., 0., 1.] reverse.forward_log_det_jacobian(any_value) # ==> 0. reverse.inverse_log_det_jacobian(any_value) # ==> 0. ``` Warning: `tf.estimator` may repeatedly build the graph thus `Permute(np.random.permutation(event_size)).astype("int32"))` is not a reliable parameterization (nor would it be even if using `tf.constant`). A safe alternative is to use `tf.compat.v1.get_variable` to achieve "init once" behavior, i.e., ```python def init_once(x, name): return tf.compat.v1.get_variable(name, initializer=x, trainable=False) Permute(permutation=init_once( np.random.permutation(event_size).astype("int32"), name="permutation")) ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, permutation, validate_args=False, name=None): """Creates the `Permute` bijector. Args: permutation: An `int`-like vector-shaped `Tensor` representing the permutation to apply to the rightmost dimension of the transformed `Tensor`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str`, name given to ops managed by this object. Raises: TypeError: if `not permutation.dtype.is_integer`. ValueError: if `permutation` does not contain exactly one of each of `{0, 1, ..., d}`. """ with ops.name_scope(name, "permute", values=[permutation]): permutation = ops.convert_to_tensor(permutation, name="permutation") if not permutation.dtype.is_integer: raise TypeError("permutation.dtype ({}) should be `int`-like.".format( permutation.dtype.name)) p = tensor_util.constant_value(permutation) if p is not None: if set(p) != set(np.arange(p.size)): raise ValueError("Permutation over `d` must contain exactly one of " "each of `{0, 1, ..., d}`.") elif validate_args: p, _ = nn_ops.top_k( -permutation, k=array_ops.shape(permutation)[-1], sorted=True) permutation = control_flow_ops.with_dependencies([ check_ops.assert_equal( -p, math_ops.range(array_ops.size(p)), message=("Permutation over `d` must contain exactly one of " "each of `{0, 1, ..., d}`.")), ], permutation) self._permutation = permutation super(Permute, self).__init__( forward_min_event_ndims=1, is_constant_jacobian=True, validate_args=validate_args, name=name or "permute") @property def permutation(self): return self._permutation def _forward(self, x): return array_ops.gather(x, self.permutation, axis=-1) def _inverse(self, y): return array_ops.gather( y, array_ops.invert_permutation(self.permutation), axis=-1) def _inverse_log_det_jacobian(self, y): # is_constant_jacobian = True for this bijector, hence the # `log_det_jacobian` need only be specified for a single input, as this will # be tiled to match `event_ndims`. return constant_op.constant(0., dtype=y.dtype.base_dtype) def _forward_log_det_jacobian(self, x): return constant_op.constant(0., dtype=x.dtype.base_dtype)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/permute.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SinhArcsinh bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "SinhArcsinh", ] @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def _sqrtx2p1(x): """Implementation of `sqrt(1 + x**2)` which is stable despite large `x`.""" return array_ops.where_v2( math_ops.abs(x) * np.sqrt(np.finfo(x.dtype.as_numpy_dtype).eps) <= 1., math_ops.sqrt(x**2. + 1.), # For large x, calculating x**2 can overflow. This can be alleviated by # considering: # sqrt(1 + x**2) # = exp(0.5 log(1 + x**2)) # = exp(0.5 log(x**2 * (1 + x**-2))) # = exp(log(x) + 0.5 * log(1 + x**-2)) # = |x| * exp(0.5 log(1 + x**-2)) # = |x| * sqrt(1 + x**-2) # We omit the last term in this approximation. # When |x| > 1 / sqrt(machineepsilon), the second term will be 1, # due to sqrt(1 + x**-2) = 1. This is also true with the gradient term, # and higher order gradients, since the first order derivative of # sqrt(1 + x**-2) is -2 * x**-3 / (1 + x**-2) = -2 / (x**3 + x), # and all nth-order derivatives will be O(x**-(n + 2)). This makes any # gradient terms that contain any derivatives of sqrt(1 + x**-2) vanish. math_ops.abs(x)) class SinhArcsinh(bijector.Bijector): """Compute `Y = g(X) = Sinh( (Arcsinh(X) + skewness) * tailweight )`. For `skewness in (-inf, inf)` and `tailweight in (0, inf)`, this transformation is a diffeomorphism of the real line `(-inf, inf)`. The inverse transform is `X = g^{-1}(Y) = Sinh( ArcSinh(Y) / tailweight - skewness )`. The `SinhArcsinh` transformation of the Normal is described in [Sinh-arcsinh distributions](https://www.jstor.org/stable/27798865) This Bijector allows a similar transformation of any distribution supported on `(-inf, inf)`. #### Meaning of the parameters * If `skewness = 0` and `tailweight = 1`, this transform is the identity. * Positive (negative) `skewness` leads to positive (negative) skew. * positive skew means, for unimodal `X` centered at zero, the mode of `Y` is "tilted" to the right. * positive skew means positive values of `Y` become more likely, and negative values become less likely. * Larger (smaller) `tailweight` leads to fatter (thinner) tails. * Fatter tails mean larger values of `|Y|` become more likely. * If `X` is a unit Normal, `tailweight < 1` leads to a distribution that is "flat" around `Y = 0`, and a very steep drop-off in the tails. * If `X` is a unit Normal, `tailweight > 1` leads to a distribution more peaked at the mode with heavier tails. To see the argument about the tails, note that for `|X| >> 1` and `|X| >> (|skewness| * tailweight)**tailweight`, we have `Y approx 0.5 X**tailweight e**(sign(X) skewness * tailweight)`. """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, skewness=None, tailweight=None, validate_args=False, name="SinhArcsinh"): """Instantiates the `SinhArcsinh` bijector. Args: skewness: Skewness parameter. Float-type `Tensor`. Default is `0` of type `float32`. tailweight: Tailweight parameter. Positive `Tensor` of same `dtype` as `skewness` and broadcastable `shape`. Default is `1` of type `float32`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ self._graph_parents = [] self._name = name self._validate_args = validate_args with self._name_scope("init", values=[skewness, tailweight]): tailweight = 1. if tailweight is None else tailweight skewness = 0. if skewness is None else skewness self._skewness = ops.convert_to_tensor( skewness, name="skewness") self._tailweight = ops.convert_to_tensor( tailweight, name="tailweight", dtype=self._skewness.dtype) check_ops.assert_same_float_dtype([self._skewness, self._tailweight]) if validate_args: self._tailweight = control_flow_ops.with_dependencies([ check_ops.assert_positive( self._tailweight, message="Argument tailweight was not positive") ], self._tailweight) super(SinhArcsinh, self).__init__( forward_min_event_ndims=0, validate_args=validate_args, name=name) @property def skewness(self): """The `skewness` in: `Y = Sinh((Arcsinh(X) + skewness) * tailweight)`.""" return self._skewness @property def tailweight(self): """The `tailweight` in: `Y = Sinh((Arcsinh(X) + skewness) * tailweight)`.""" return self._tailweight def _forward(self, x): return math_ops.sinh((math_ops.asinh(x) + self.skewness) * self.tailweight) def _inverse(self, y): return math_ops.sinh(math_ops.asinh(y) / self.tailweight - self.skewness) def _inverse_log_det_jacobian(self, y): # x = sinh(arcsinh(y) / tailweight - skewness) # Using sinh' = cosh, arcsinh'(y) = 1 / sqrt(y**2 + 1), # dx/dy # = cosh(arcsinh(y) / tailweight - skewness) # / (tailweight * sqrt(y**2 + 1)) # This is computed inside the log to avoid catastrophic cancellations # from cosh((arcsinh(y) / tailweight) - skewness) and sqrt(x**2 + 1). return ( math_ops.log(math_ops.cosh( math_ops.asinh(y) / self.tailweight - self.skewness) # TODO(srvasude): Consider using cosh(arcsinh(x)) in cases # where (arcsinh(x) / tailweight) - skewness ~= arcsinh(x). / _sqrtx2p1(y)) - math_ops.log(self.tailweight)) def _forward_log_det_jacobian(self, x): # y = sinh((arcsinh(x) + skewness) * tailweight) # Using sinh' = cosh, arcsinh'(x) = 1 / sqrt(x**2 + 1), # dy/dx # = cosh((arcsinh(x) + skewness) * tailweight) * tailweight / sqrt(x**2 + 1) # This is computed inside the log to avoid catastrophic cancellations # from cosh((arcsinh(x) + skewness) * tailweight) and sqrt(x**2 + 1). return ( math_ops.log(math_ops.cosh( (math_ops.asinh(x) + self.skewness) * self.tailweight) # TODO(srvasude): Consider using cosh(arcsinh(x)) in cases # where (arcsinh(x) + skewness) * tailweight ~= arcsinh(x). / _sqrtx2p1(x)) + math_ops.log(self.tailweight))
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/sinh_arcsinh.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Weibull bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "Weibull", ] class Weibull(bijector.Bijector): """Compute `Y = g(X) = 1 - exp((-X / scale) ** concentration), X >= 0`. This bijector maps inputs from `[0, inf]` to [0, 1]`. The inverse of the bijector applied to a uniform random variable `X ~ U(0, 1) gives back a random variable with the [Weibull distribution](https://en.wikipedia.org/wiki/Weibull_distribution): ```none Y ~ Weibull(scale, concentration) pdf(y; scale, concentration, y >= 0) = (scale / concentration) * ( scale / concentration) ** (concentration - 1) * exp( -(y / scale) ** concentration) ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, scale=1., concentration=1., validate_args=False, name="weibull"): """Instantiates the `Weibull` bijector. Args: scale: Positive Float-type `Tensor` that is the same dtype and is broadcastable with `concentration`. This is `l` in `Y = g(X) = 1 - exp((-x / l) ** k)`. concentration: Positive Float-type `Tensor` that is the same dtype and is broadcastable with `scale`. This is `k` in `Y = g(X) = 1 - exp((-x / l) ** k)`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ self._graph_parents = [] self._name = name self._validate_args = validate_args with self._name_scope("init", values=[scale, concentration]): self._scale = ops.convert_to_tensor(scale, name="scale") self._concentration = ops.convert_to_tensor( concentration, name="concentration") check_ops.assert_same_float_dtype([self._scale, self._concentration]) if validate_args: self._scale = control_flow_ops.with_dependencies([ check_ops.assert_positive( self._scale, message="Argument scale was not positive") ], self._scale) self._concentration = control_flow_ops.with_dependencies([ check_ops.assert_positive( self._concentration, message="Argument concentration was not positive") ], self._concentration) super(Weibull, self).__init__( forward_min_event_ndims=0, validate_args=validate_args, name=name) @property def scale(self): """The `l` in `Y = g(X) = 1 - exp((-x / l) ** k)`.""" return self._scale @property def concentration(self): """The `k` in `Y = g(X) = 1 - exp((-x / l) ** k)`.""" return self._concentration def _forward(self, x): x = self._maybe_assert_valid_x(x) return -math_ops.expm1(-((x / self.scale) ** self.concentration)) def _inverse(self, y): y = self._maybe_assert_valid_y(y) return self.scale * (-math_ops.log1p(-y)) ** (1 / self.concentration) def _inverse_log_det_jacobian(self, y): y = self._maybe_assert_valid_y(y) return ( -math_ops.log1p(-y) + (1 / self.concentration - 1) * math_ops.log(-math_ops.log1p(-y)) + math_ops.log(self.scale / self.concentration)) def _forward_log_det_jacobian(self, x): x = self._maybe_assert_valid_x(x) return ( -(x / self.scale) ** self.concentration + (self.concentration - 1) * math_ops.log(x) + math_ops.log(self.concentration) + -self.concentration * math_ops.log(self.scale)) def _maybe_assert_valid_x(self, x): if not self.validate_args: return x is_valid = check_ops.assert_non_negative( x, message="Forward transformation input must be at least 0.") return control_flow_ops.with_dependencies([is_valid], x) def _maybe_assert_valid_y(self, y): if not self.validate_args: return y is_positive = check_ops.assert_non_negative( y, message="Inverse transformation input must be greater than 0.") less_than_one = check_ops.assert_less_equal( y, constant_op.constant(1., y.dtype), message="Inverse transformation input must be less than or equal to 1.") return control_flow_ops.with_dependencies([is_positive, less_than_one], y)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/weibull.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Softsign bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "Softsign", ] class Softsign(bijector.Bijector): """Bijector which computes `Y = g(X) = X / (1 + |X|)`. The softsign `Bijector` has the following two useful properties: * The domain is all real numbers * `softsign(x) approx sgn(x)`, for large `|x|`. #### Examples ```python # Create the Y = softsign(X) transform. softsign = Softsign() x = [[[1., 2], [3, 4]], [[5, 6], [7, 8]]] x / (1 + abs(x)) == softsign.forward(x) x / (1 - abs(x)) == softsign.inverse(x) ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, validate_args=False, name="softsign"): super(Softsign, self).__init__( forward_min_event_ndims=0, validate_args=validate_args, name=name) def _forward(self, x): return x / (1. + math_ops.abs(x)) def _inverse(self, y): y = self._maybe_assert_valid_y(y) return y / (1. - math_ops.abs(y)) def _forward_log_det_jacobian(self, x): return -2. * math_ops.log1p(math_ops.abs(x)) def _inverse_log_det_jacobian(self, y): y = self._maybe_assert_valid_y(y) return -2. * math_ops.log1p(-math_ops.abs(y)) def _maybe_assert_valid_y(self, y): if not self.validate_args: return y is_valid = [ check_ops.assert_greater( y, math_ops.cast(-1., dtype=y.dtype.base_dtype), message="Inverse transformation input must be greater than -1."), check_ops.assert_less( y, math_ops.cast(1., dtype=y.dtype.base_dtype), message="Inverse transformation input must be less than 1.") ] return control_flow_ops.with_dependencies(is_valid, y)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/softsign.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Chain bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "Chain", ] @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def _use_static_shape(input_tensor, ndims): return input_tensor.shape.is_fully_defined() and isinstance(ndims, int) @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def _compute_min_event_ndims(bijector_list, compute_forward=True): """Computes the min_event_ndims associated with the give list of bijectors. Given a list `bijector_list` of bijectors, compute the min_event_ndims that is associated with the composition of bijectors in that list. min_event_ndims is the # of right most dimensions for which the bijector has done necessary computation on (i.e. the non-broadcastable part of the computation). We can derive the min_event_ndims for a chain of bijectors as follows: In the case where there are no rank changing bijectors, this will simply be `max(b.forward_min_event_ndims for b in bijector_list)`. This is because the bijector with the most forward_min_event_ndims requires the most dimensions, and hence the chain also requires operating on those dimensions. However in the case of rank changing, more care is needed in determining the exact amount of dimensions. Padding dimensions causes subsequent bijectors to operate on the padded dimensions, and Removing dimensions causes bijectors to operate more left. Args: bijector_list: List of bijectors to be composed by chain. compute_forward: Boolean. If True, computes the min_event_ndims associated with a forward call to Chain, and otherwise computes the min_event_ndims associated with an inverse call to Chain. The latter is the same as the min_event_ndims associated with a forward call to Invert(Chain(....)). Returns: min_event_ndims """ min_event_ndims = 0 # This is a mouthful, but what this encapsulates is that if not for rank # changing bijectors, we'd only need to compute the largest of the min # required ndims. Hence "max_min". Due to rank changing bijectors, we need to # account for synthetic rank growth / synthetic rank decrease from a rank # changing bijector. rank_changed_adjusted_max_min_event_ndims = 0 if compute_forward: bijector_list = reversed(bijector_list) for b in bijector_list: if compute_forward: current_min_event_ndims = b.forward_min_event_ndims current_inverse_min_event_ndims = b.inverse_min_event_ndims else: current_min_event_ndims = b.inverse_min_event_ndims current_inverse_min_event_ndims = b.forward_min_event_ndims # New dimensions were touched. if rank_changed_adjusted_max_min_event_ndims < current_min_event_ndims: min_event_ndims += ( current_min_event_ndims - rank_changed_adjusted_max_min_event_ndims) rank_changed_adjusted_max_min_event_ndims = max( current_min_event_ndims, rank_changed_adjusted_max_min_event_ndims) # If the number of dimensions has increased via forward, then # inverse_min_event_ndims > forward_min_event_ndims, and hence the # dimensions we computed on, have moved left (so we have operated # on additional dimensions). # Conversely, if the number of dimensions has decreased via forward, # then we have inverse_min_event_ndims < forward_min_event_ndims, # and so we will have operated on fewer right most dimensions. number_of_changed_dimensions = ( current_min_event_ndims - current_inverse_min_event_ndims) rank_changed_adjusted_max_min_event_ndims -= number_of_changed_dimensions return min_event_ndims class Chain(bijector.Bijector): """Bijector which applies a sequence of bijectors. Example Use: ```python chain = Chain([Exp(), Softplus()], name="one_plus_exp") ``` Results in: * Forward: ```python exp = Exp() softplus = Softplus() Chain([exp, softplus]).forward(x) = exp.forward(softplus.forward(x)) = tf.exp(tf.math.log(1. + tf.exp(x))) = 1. + tf.exp(x) ``` * Inverse: ```python exp = Exp() softplus = Softplus() Chain([exp, softplus]).inverse(y) = softplus.inverse(exp.inverse(y)) = tf.math.log(tf.exp(tf.math.log(y)) - 1.) = tf.math.log(y - 1.) ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, bijectors=None, validate_args=False, name=None): """Instantiates `Chain` bijector. Args: bijectors: Python `list` of bijector instances. An empty list makes this bijector equivalent to the `Identity` bijector. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str`, name given to ops managed by this object. Default: E.g., `Chain([Exp(), Softplus()]).name == "chain_of_exp_of_softplus"`. Raises: ValueError: if bijectors have different dtypes. """ if bijectors is None: bijectors = () self._bijectors = bijectors for a_bijector in bijectors: if not a_bijector._is_injective: # pylint: disable=protected-access raise NotImplementedError( "Invert is not implemented for non-injective bijector ({})".format( a_bijector.name)) dtype = list(set([b.dtype for b in bijectors])) if len(dtype) > 2: raise ValueError("incompatible dtypes: %s" % dtype) elif len(dtype) == 2: dtype = dtype[1] if dtype[0] is None else dtype[0] elif len(dtype) == 1: dtype = dtype[0] else: dtype = None inverse_min_event_ndims = _compute_min_event_ndims( bijectors, compute_forward=False) forward_min_event_ndims = _compute_min_event_ndims( bijectors, compute_forward=True) super(Chain, self).__init__( graph_parents=list(itertools.chain.from_iterable( b.graph_parents for b in bijectors)), forward_min_event_ndims=forward_min_event_ndims, inverse_min_event_ndims=inverse_min_event_ndims, is_constant_jacobian=all(b.is_constant_jacobian for b in bijectors), validate_args=validate_args, dtype=dtype, name=name or ("identity" if not bijectors else "_of_".join(["chain"] + [b.name for b in bijectors]))) @property def bijectors(self): return self._bijectors def _shape_helper(self, func_name, input_shape, reverse): new_shape = input_shape for b in reversed(self.bijectors) if reverse else self.bijectors: func = getattr(b, func_name, None) if func is None: raise ValueError("unable to call %s on bijector %s (%s)" % (func_name, b.name, func)) new_shape = func(new_shape) return new_shape def _forward_event_shape(self, input_shape): return self._shape_helper("forward_event_shape", input_shape, reverse=True) def _forward_event_shape_tensor(self, input_shape): return self._shape_helper( "forward_event_shape_tensor", input_shape, reverse=True) def _inverse_event_shape(self, output_shape): return self._shape_helper("inverse_event_shape", output_shape, reverse=False) def _inverse_event_shape_tensor(self, output_shape): return self._shape_helper("inverse_event_shape_tensor", output_shape, reverse=False) def _inverse(self, y, **kwargs): for b in self.bijectors: y = b.inverse(y, **kwargs.get(b.name, {})) return y def _inverse_log_det_jacobian(self, y, **kwargs): y = ops.convert_to_tensor(y, name="y") ildj = math_ops.cast(0., dtype=y.dtype.base_dtype) if not self.bijectors: return ildj event_ndims = self._maybe_get_static_event_ndims( self.inverse_min_event_ndims) if _use_static_shape(y, event_ndims): event_shape = y.shape[y.shape.ndims - event_ndims:] else: event_shape = array_ops.shape(y)[array_ops.rank(y) - event_ndims:] for b in self.bijectors: ildj += b.inverse_log_det_jacobian( y, event_ndims=event_ndims, **kwargs.get(b.name, {})) if _use_static_shape(y, event_ndims): event_shape = b.inverse_event_shape(event_shape) event_ndims = self._maybe_get_static_event_ndims( event_shape.ndims) else: event_shape = b.inverse_event_shape_tensor(event_shape) event_ndims = array_ops.size(event_shape) event_ndims_ = self._maybe_get_static_event_ndims(event_ndims) if event_ndims_ is not None: event_ndims = event_ndims_ y = b.inverse(y, **kwargs.get(b.name, {})) return ildj def _forward(self, x, **kwargs): for b in reversed(self.bijectors): x = b.forward(x, **kwargs.get(b.name, {})) return x def _forward_log_det_jacobian(self, x, **kwargs): x = ops.convert_to_tensor(x, name="x") fldj = math_ops.cast(0., dtype=x.dtype.base_dtype) if not self.bijectors: return fldj event_ndims = self._maybe_get_static_event_ndims( self.forward_min_event_ndims) if _use_static_shape(x, event_ndims): event_shape = x.shape[x.shape.ndims - event_ndims:] else: event_shape = array_ops.shape(x)[array_ops.rank(x) - event_ndims:] for b in reversed(self.bijectors): fldj += b.forward_log_det_jacobian( x, event_ndims=event_ndims, **kwargs.get(b.name, {})) if _use_static_shape(x, event_ndims): event_shape = b.forward_event_shape(event_shape) event_ndims = self._maybe_get_static_event_ndims(event_shape.ndims) else: event_shape = b.forward_event_shape_tensor(event_shape) event_ndims = array_ops.size(event_shape) event_ndims_ = self._maybe_get_static_event_ndims(event_ndims) if event_ndims_ is not None: event_ndims = event_ndims_ x = b.forward(x, **kwargs.get(b.name, {})) return fldj
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/chain.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Kumaraswamy bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "Kumaraswamy", ] class Kumaraswamy(bijector.Bijector): """Compute `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a), X in [0, 1]`. This bijector maps inputs from `[0, 1]` to [0, 1]`. The inverse of the bijector applied to a uniform random variable `X ~ U(0, 1) gives back a random variable with the [Kumaraswamy distribution]( https://en.wikipedia.org/wiki/Kumaraswamy_distribution): ```none Y ~ Kumaraswamy(a, b) pdf(y; a, b, 0 <= y <= 1) = a * b * y ** (a - 1) * (1 - y**a) ** (b - 1) ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, concentration1=None, concentration0=None, validate_args=False, name="kumaraswamy"): """Instantiates the `Kumaraswamy` bijector. Args: concentration1: Python `float` scalar indicating the transform power, i.e., `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)` where `a` is `concentration1`. concentration0: Python `float` scalar indicating the transform power, i.e., `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)` where `b` is `concentration0`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ self._graph_parents = [] self._name = name self._validate_args = validate_args with self._name_scope("init", values=[concentration1, concentration0]): concentration1 = self._maybe_assert_valid_concentration( ops.convert_to_tensor(concentration1, name="concentration1"), validate_args=validate_args) concentration0 = self._maybe_assert_valid_concentration( ops.convert_to_tensor(concentration0, name="concentration0"), validate_args=validate_args) self._concentration1 = concentration1 self._concentration0 = concentration0 super(Kumaraswamy, self).__init__( forward_min_event_ndims=0, validate_args=validate_args, name=name) @property def concentration1(self): """The `a` in: `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)`.""" return self._concentration1 @property def concentration0(self): """The `b` in: `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)`.""" return self._concentration0 def _forward(self, x): x = self._maybe_assert_valid(x) return math_ops.exp( math_ops.log1p(-math_ops.exp(math_ops.log1p(-x) / self.concentration0)) / self.concentration1) def _inverse(self, y): y = self._maybe_assert_valid(y) return math_ops.exp(math_ops.log1p( -(1 - y**self.concentration1)**self.concentration0)) def _inverse_log_det_jacobian(self, y): y = self._maybe_assert_valid(y) return ( math_ops.log(self.concentration1) + math_ops.log(self.concentration0) + (self.concentration1 - 1) * math_ops.log(y) + (self.concentration0 - 1) * math_ops.log1p(-y**self.concentration1)) def _maybe_assert_valid_concentration(self, concentration, validate_args): """Checks the validity of a concentration parameter.""" if not validate_args: return concentration return control_flow_ops.with_dependencies([ check_ops.assert_positive( concentration, message="Concentration parameter must be positive."), ], concentration) def _maybe_assert_valid(self, x): if not self.validate_args: return x return control_flow_ops.with_dependencies([ check_ops.assert_non_negative( x, message="sample must be non-negative"), check_ops.assert_less_equal( x, array_ops.ones([], self.concentration0.dtype), message="sample must be no larger than `1`."), ], x)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/kumaraswamy.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Reshape bijectors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "Reshape", ] @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def _static_ndims_from_shape(shape): return tensor_shape.dimension_value(shape.shape.with_rank_at_least(1)[0]) @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def _ndims_from_shape(shape): return array_ops.shape(shape)[0] class Reshape(bijector.Bijector): """Reshapes the `event_shape` of a `Tensor`. The semantics generally follow that of `tf.reshape()`, with a few differences: * The user must provide both the input and output shape, so that the transformation can be inverted. If an input shape is not specified, the default assumes a vector-shaped input, i.e., event_shape_in = (-1,). * The `Reshape` bijector automatically broadcasts over the leftmost dimensions of its input (`sample_shape` and `batch_shape`); only the rightmost `event_ndims_in` dimensions are reshaped. The number of dimensions to reshape is inferred from the provided `event_shape_in` (`event_ndims_in = len(event_shape_in)`). Example usage: ```python import tensorflow_probability as tfp tfb = tfp.bijectors r = tfb.Reshape(event_shape_out=[1, -1]) r.forward([3., 4.]) # shape [2] # ==> [[3., 4.]] # shape [1, 2] r.forward([[1., 2.], [3., 4.]]) # shape [2, 2] # ==> [[[1., 2.]], # [[3., 4.]]] # shape [2, 1, 2] r.inverse([[3., 4.]]) # shape [1,2] # ==> [3., 4.] # shape [2] r.forward_log_det_jacobian(any_value) # ==> 0. r.inverse_log_det_jacobian(any_value) # ==> 0. ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, event_shape_out, event_shape_in=(-1,), validate_args=False, name=None): """Creates a `Reshape` bijector. Args: event_shape_out: An `int`-like vector-shaped `Tensor` representing the event shape of the transformed output. event_shape_in: An optional `int`-like vector-shape `Tensor` representing the event shape of the input. This is required in order to define inverse operations; the default of (-1,) assumes a vector-shaped input. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str`, name given to ops managed by this object. Raises: TypeError: if either `event_shape_in` or `event_shape_out` has non-integer `dtype`. ValueError: if either of `event_shape_in` or `event_shape_out` has non-vector shape (`rank > 1`), or if their sizes do not match. """ with ops.name_scope(name, "reshape", values=[event_shape_out, event_shape_in]): event_shape_out = ops.convert_to_tensor(event_shape_out, name="event_shape_out", preferred_dtype=dtypes.int32) event_shape_in = ops.convert_to_tensor(event_shape_in, name="event_shape_in", preferred_dtype=dtypes.int32) assertions = [] assertions.extend(self._maybe_check_valid_shape( event_shape_out, validate_args)) assertions.extend(self._maybe_check_valid_shape( event_shape_in, validate_args)) self._assertions = assertions self._event_shape_in = event_shape_in self._event_shape_out = event_shape_out super(Reshape, self).__init__( forward_min_event_ndims=0, is_constant_jacobian=True, validate_args=validate_args, name=name or "reshape") def _maybe_check_valid_shape(self, shape, validate_args): """Check that a shape Tensor is int-type and otherwise sane.""" if not shape.dtype.is_integer: raise TypeError("{} dtype ({}) should be `int`-like.".format( shape, shape.dtype.name)) assertions = [] ndims = array_ops.rank(shape) ndims_ = tensor_util.constant_value(ndims) if ndims_ is not None and ndims_ > 1: raise ValueError("`{}` rank ({}) should be <= 1.".format( shape, ndims_)) elif validate_args: assertions.append(check_ops.assert_less_equal( ndims, 1, message="`{}` rank should be <= 1.".format(shape))) shape_ = tensor_util.constant_value_as_shape(shape) if shape_.is_fully_defined(): es = np.int32(shape_.as_list()) if sum(es == -1) > 1: raise ValueError( "`{}` must have at most one `-1` (given {})" .format(shape, es)) if np.any(es < -1): raise ValueError( "`{}` elements must be either positive integers or `-1`" "(given {})." .format(shape, es)) elif validate_args: assertions.extend([ check_ops.assert_less_equal( math_ops.reduce_sum( math_ops.cast(math_ops.equal(shape, -1), dtypes.int32)), 1, message="`{}` elements must have at most one `-1`." .format(shape)), check_ops.assert_greater_equal( shape, -1, message="`{}` elements must be either positive integers or `-1`." .format(shape)), ]) return assertions def _reshape_helper(self, x, event_shape_in, event_shape_out): """Reshape only the event_shape of an input `Tensor`.""" event_ndims_in_ = _static_ndims_from_shape(event_shape_in) event_ndims_in = _ndims_from_shape(event_shape_in) x_ndims_, x_ndims = x.shape.ndims, array_ops.rank(x) assertions = [] # Ensure x.event_shape is compatible with event_shape_in. if (event_ndims_in_ is not None and x_ndims_ is not None and x.shape.with_rank_at_least(event_ndims_in_)[ x_ndims_-event_ndims_in_:].is_fully_defined()): x_event_shape_, x_event_shape = [ # pylint: disable=unbalanced-tuple-unpacking np.int32(x.shape[x_ndims_-event_ndims_in_:])]*2 else: x_event_shape_, x_event_shape = ( None, array_ops.shape(x)[x_ndims-event_ndims_in:]) event_shape_in_ = tensor_util.constant_value(event_shape_in) if x_event_shape_ is not None and event_shape_in_ is not None: # Compare the shape dimensions that are fully specified in the # input (i.e., for which event_shape_in is not -1). If x_event_shape # matches along all of these dimensions, it is compatible with # the desired input shape and any further mismatches (i.e., # imcompatibility with the desired *output* shape) will be # caught inside of array_ops.reshape() below. x_event_shape_specified_ = x_event_shape_[event_shape_in_ >= 0] event_shape_in_specified_ = event_shape_in_[event_shape_in_ >= 0] if not np.equal(x_event_shape_specified_, event_shape_in_specified_).all(): raise ValueError( "Input `event_shape` does not match `event_shape_in` ({} vs {}).". format(x_event_shape_, event_shape_in_)) elif self.validate_args: # Similarly to the static case, we compare the shape dimensions # that are fully specified in the input. We extract these # dimensions using boolean_mask(), which requires that the mask # have known ndims. We can assume that shape Tensors always have # ndims==1 (this assumption is verified inside of # _maybe_check_valid_shape), so the reshape operation is just a # no-op that formally encodes this fact to make boolean_mask() # happy. event_shape_mask = array_ops.reshape(event_shape_in >= 0, [-1]) x_event_shape_specified = array_ops.boolean_mask(x_event_shape, event_shape_mask) event_shape_in_specified = array_ops.boolean_mask(event_shape_in, event_shape_mask) assertions.append(check_ops.assert_equal( x_event_shape_specified, event_shape_in_specified, message="Input `event_shape` does not match `event_shape_in`.")) if assertions: x = control_flow_ops.with_dependencies(assertions, x) # get the parts of shape(x) that will not change sample_and_batch_shape = array_ops.shape(x) ndims = (x.shape.ndims if x.shape.ndims is not None else array_ops.rank(x)) sample_and_batch_shape = sample_and_batch_shape[ :(ndims - math_ops.abs(event_ndims_in))] if (event_ndims_in_ is not None and x_ndims_ is not None and event_ndims_in_ == x_ndims_): # Hack to allow forward/inverse_event_shape to do shape # inference by calling this helper method with a dummy Tensor of # shape event_shape_in. In this special case, # sample_and_batch_shape will be empty so we can preserve static # shape information by avoiding the concat operation below # (which would be a no-op). new_shape = event_shape_out else: new_shape = array_ops.concat( [sample_and_batch_shape, event_shape_out], axis=0) return array_ops.reshape(x, new_shape) def _forward(self, x): with ops.control_dependencies(self._assertions): return self._reshape_helper(x, self._event_shape_in, self._event_shape_out) def _inverse(self, y): with ops.control_dependencies(self._assertions): return self._reshape_helper(y, self._event_shape_out, self._event_shape_in) def _inverse_log_det_jacobian(self, y): with ops.control_dependencies(self._assertions): return constant_op.constant(0., dtype=y.dtype) def _forward_log_det_jacobian(self, x): with ops.control_dependencies(self._assertions): return constant_op.constant(0., dtype=x.dtype) def _forward_event_shape(self, input_shape): # NOTE: this method and the other *_event_shape* methods # compute shape by explicit transformation of a dummy # variable. This approach is not generally recommended because it # bloats the graph and could in general trigger side effects. # # In this particular case of the Reshape bijector, the # forward and inverse transforms have no side effects, and we # believe the reduction in code complexity from delegating the # heavy lifting to tf.reshape() is worth the added graph ops. # However, you should think hard before implementing this approach # in other Bijectors; it is strongly preferred to compute # shapes explicitly whenever it's feasible to do so. with ops.control_dependencies(self._assertions): dummy = array_ops.zeros(dtype=dtypes.float32, shape=input_shape) dummy_reshaped = self.forward(dummy) return dummy_reshaped.shape def _inverse_event_shape(self, output_shape): with ops.control_dependencies(self._assertions): dummy = array_ops.zeros(dtype=dtypes.float32, shape=output_shape) dummy_reshaped = self.inverse(dummy) return dummy_reshaped.shape def _forward_event_shape_tensor(self, input_shape): with ops.control_dependencies(self._assertions): dummy = array_ops.zeros(dtype=dtypes.float32, shape=input_shape) dummy_reshaped = self.forward(dummy) return array_ops.shape(dummy_reshaped) def _inverse_event_shape_tensor(self, output_shape): with ops.control_dependencies(self._assertions): dummy = array_ops.zeros(dtype=dtypes.float32, shape=output_shape) dummy_reshaped = self.inverse(dummy) return array_ops.shape(dummy_reshaped)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/reshape.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """FillTriangular bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.ops.distributions import util as dist_util from tensorflow.python.util import deprecation __all__ = [ "FillTriangular", ] class FillTriangular(bijector.Bijector): """Transforms vectors to triangular. Triangular matrix elements are filled in a clockwise spiral. Given input with shape `batch_shape + [d]`, produces output with shape `batch_shape + [n, n]`, where `n = (-1 + sqrt(1 + 8 * d))/2`. This follows by solving the quadratic equation `d = 1 + 2 + ... + n = n * (n + 1)/2`. #### Example ```python b = tfb.FillTriangular(upper=False) b.forward([1, 2, 3, 4, 5, 6]) # ==> [[4, 0, 0], # [6, 5, 0], # [3, 2, 1]] b = tfb.FillTriangular(upper=True) b.forward([1, 2, 3, 4, 5, 6]) # ==> [[1, 2, 3], # [0, 5, 6], # [0, 0, 4]] ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, upper=False, validate_args=False, name="fill_triangular"): """Instantiates the `FillTriangular` bijector. Args: upper: Python `bool` representing whether output matrix should be upper triangular (`True`) or lower triangular (`False`, default). validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ self._upper = upper super(FillTriangular, self).__init__( forward_min_event_ndims=1, inverse_min_event_ndims=2, validate_args=validate_args, name=name) def _forward(self, x): return dist_util.fill_triangular(x, upper=self._upper) def _inverse(self, y): return dist_util.fill_triangular_inverse(y, upper=self._upper) def _forward_log_det_jacobian(self, x): return array_ops.zeros_like(x[..., 0]) def _inverse_log_det_jacobian(self, y): return array_ops.zeros_like(y[..., 0, 0]) def _forward_event_shape(self, input_shape): batch_shape, d = (input_shape[:-1], tensor_shape.dimension_value(input_shape[-1])) if d is None: n = None else: n = vector_size_to_square_matrix_size(d, self.validate_args) return batch_shape.concatenate([n, n]) def _inverse_event_shape(self, output_shape): batch_shape, n1, n2 = (output_shape[:-2], tensor_shape.dimension_value(output_shape[-2]), tensor_shape.dimension_value(output_shape[-1])) if n1 is None or n2 is None: m = None elif n1 != n2: raise ValueError("Matrix must be square. (saw [{}, {}])".format(n1, n2)) else: m = n1 * (n1 + 1) / 2 return batch_shape.concatenate([m]) def _forward_event_shape_tensor(self, input_shape_tensor): batch_shape, d = input_shape_tensor[:-1], input_shape_tensor[-1] n = vector_size_to_square_matrix_size(d, self.validate_args) return array_ops.concat([batch_shape, [n, n]], axis=0) def _inverse_event_shape_tensor(self, output_shape_tensor): batch_shape, n = output_shape_tensor[:-2], output_shape_tensor[-1] if self.validate_args: is_square_matrix = check_ops.assert_equal( n, output_shape_tensor[-2], message="Matrix must be square.") with ops.control_dependencies([is_square_matrix]): n = array_ops.identity(n) d = math_ops.cast(n * (n + 1) / 2, output_shape_tensor.dtype) return array_ops.concat([batch_shape, [d]], axis=0) @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def vector_size_to_square_matrix_size(d, validate_args, name=None): """Convert a vector size to a matrix size.""" if isinstance(d, (float, int, np.generic, np.ndarray)): n = (-1 + np.sqrt(1 + 8 * d)) / 2. if float(int(n)) != n: raise ValueError("Vector length is not a triangular number.") return int(n) else: with ops.name_scope(name, "vector_size_to_square_matrix_size", [d]) as name: n = (-1. + math_ops.sqrt(1 + 8. * math_ops.cast(d, dtypes.float32))) / 2. if validate_args: with ops.control_dependencies([ check_ops.assert_equal( math_ops.cast(math_ops.cast(n, dtypes.int32), dtypes.float32), n, message="Vector length is not a triangular number") ]): n = array_ops.identity(n) return math_ops.cast(n, d.dtype)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/fill_triangular.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ConditionalBijector base.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops.distributions import bijector from tensorflow.python.ops.distributions import util as distribution_util __all__ = ["ConditionalBijector"] class ConditionalBijector(bijector.Bijector): """Conditional Bijector is a Bijector that allows intrinsic conditioning.""" @distribution_util.AppendDocstring(kwargs_dict={ "**condition_kwargs": "Named arguments forwarded to subclass implementation."}) def forward(self, x, name="forward", **condition_kwargs): return self._call_forward(x, name, **condition_kwargs) @distribution_util.AppendDocstring(kwargs_dict={ "**condition_kwargs": "Named arguments forwarded to subclass implementation."}) def inverse(self, y, name="inverse", **condition_kwargs): return self._call_inverse(y, name, **condition_kwargs) @distribution_util.AppendDocstring(kwargs_dict={ "**condition_kwargs": "Named arguments forwarded to subclass implementation."}) def inverse_log_det_jacobian( self, y, event_ndims, name="inverse_log_det_jacobian", **condition_kwargs): return self._call_inverse_log_det_jacobian( y, event_ndims, name, **condition_kwargs) @distribution_util.AppendDocstring(kwargs_dict={ "**condition_kwargs": "Named arguments forwarded to subclass implementation."}) def forward_log_det_jacobian( self, x, event_ndims, name="forward_log_det_jacobian", **condition_kwargs): return self._call_forward_log_det_jacobian( x, event_ndims, name, **condition_kwargs)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/conditional_bijector.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """PowerTransform bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "PowerTransform", ] class PowerTransform(bijector.Bijector): """Compute `Y = g(X) = (1 + X * c)**(1 / c), X >= -1 / c`. The [power transform](https://en.wikipedia.org/wiki/Power_transform) maps inputs from `[0, inf]` to `[-1/c, inf]`; this is equivalent to the `inverse` of this bijector. This bijector is equivalent to the `Exp` bijector when `c=0`. """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, power=0., validate_args=False, name="power_transform"): """Instantiates the `PowerTransform` bijector. Args: power: Python `float` scalar indicating the transform power, i.e., `Y = g(X) = (1 + X * c)**(1 / c)` where `c` is the `power`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. Raises: ValueError: if `power < 0` or is not known statically. """ self._graph_parents = [] self._name = name self._validate_args = validate_args with self._name_scope("init", values=[power]): power = tensor_util.constant_value( ops.convert_to_tensor(power, name="power")) if power is None or power < 0: raise ValueError("`power` must be a non-negative TF constant.") self._power = power super(PowerTransform, self).__init__( forward_min_event_ndims=0, validate_args=validate_args, name=name) @property def power(self): """The `c` in: `Y = g(X) = (1 + X * c)**(1 / c)`.""" return self._power def _forward(self, x): x = self._maybe_assert_valid_x(x) if self.power == 0.: return math_ops.exp(x) # If large x accuracy is an issue, consider using: # (1. + x * self.power)**(1. / self.power) when x >> 1. return math_ops.exp(math_ops.log1p(x * self.power) / self.power) def _inverse(self, y): y = self._maybe_assert_valid_y(y) if self.power == 0.: return math_ops.log(y) # If large y accuracy is an issue, consider using: # (y**self.power - 1.) / self.power when y >> 1. return math_ops.expm1(math_ops.log(y) * self.power) / self.power def _inverse_log_det_jacobian(self, y): y = self._maybe_assert_valid_y(y) return (self.power - 1.) * math_ops.log(y) def _forward_log_det_jacobian(self, x): x = self._maybe_assert_valid_x(x) if self.power == 0.: return x return (1. / self.power - 1.) * math_ops.log1p(x * self.power) def _maybe_assert_valid_x(self, x): if not self.validate_args or self.power == 0.: return x is_valid = check_ops.assert_non_negative( 1. + self.power * x, message="Forward transformation input must be at least {}.".format( -1. / self.power)) return control_flow_ops.with_dependencies([is_valid], x) def _maybe_assert_valid_y(self, y): if not self.validate_args: return y is_valid = check_ops.assert_positive( y, message="Inverse transformation input must be greater than 0.") return control_flow_ops.with_dependencies([is_valid], y)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/power_transform.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Affine bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.distributions.python.ops import distribution_util from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.ops.linalg import linalg from tensorflow.python.util import deprecation __all__ = [ "Affine", ] @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def _as_tensor(x, name): """Convenience to convert to `Tensor` or leave as `None`.""" return None if x is None else ops.convert_to_tensor(x, name=name) class Affine(bijector.Bijector): """Compute `Y = g(X; shift, scale) = scale @ X + shift`. Here `scale = c * I + diag(D1) + tril(L) + V @ diag(D2) @ V.T`. In TF parlance, the `scale` term is logically equivalent to: ```python scale = ( scale_identity_multiplier * tf.linalg.tensor_diag(tf.ones(d)) + tf.linalg.tensor_diag(scale_diag) + scale_tril + scale_perturb_factor @ diag(scale_perturb_diag) @ tf.transpose([scale_perturb_factor]) ) ``` The `scale` term is applied without necessarily materializing constituent matrices, i.e., the matmul is [matrix-free]( https://en.wikipedia.org/wiki/Matrix-free_methods) when possible. #### Examples ```python # Y = X b = Affine() # Y = X + shift b = Affine(shift=[1., 2, 3]) # Y = 2 * I @ X.T + shift b = Affine(shift=[1., 2, 3], scale_identity_multiplier=2.) # Y = tf.linalg.tensor_diag(d1) @ X.T + shift b = Affine(shift=[1., 2, 3], scale_diag=[-1., 2, 1]) # Implicitly 3x3. # Y = (I + v * v.T) @ X.T + shift b = Affine(shift=[1., 2, 3], scale_perturb_factor=[[1., 0], [0, 1], [1, 1]]) # Y = (diag(d1) + v * diag(d2) * v.T) @ X.T + shift b = Affine(shift=[1., 2, 3], scale_diag=[1., 3, 3], # Implicitly 3x3. scale_perturb_diag=[2., 1], # Implicitly 2x2. scale_perturb_factor=[[1., 0], [0, 1], [1, 1]]) ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, shift=None, scale_identity_multiplier=None, scale_diag=None, scale_tril=None, scale_perturb_factor=None, scale_perturb_diag=None, validate_args=False, name="affine"): """Instantiates the `Affine` bijector. This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments, giving the forward operation: ```none Y = g(X) = scale @ X + shift ``` where the `scale` term is logically equivalent to: ```python scale = ( scale_identity_multiplier * tf.linalg.tensor_diag(tf.ones(d)) + tf.linalg.tensor_diag(scale_diag) + scale_tril + scale_perturb_factor @ diag(scale_perturb_diag) @ tf.transpose([scale_perturb_factor]) ) ``` If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are specified then `scale += IdentityMatrix`. Otherwise specifying a `scale` argument has the semantics of `scale += Expand(arg)`, i.e., `scale_diag != None` means `scale += tf.linalg.tensor_diag(scale_diag)`. Args: shift: Floating-point `Tensor`. If this is set to `None`, no shift is applied. scale_identity_multiplier: floating point rank 0 `Tensor` representing a scaling done to the identity matrix. When `scale_identity_multiplier = scale_diag = scale_tril = None` then `scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added to `scale`. scale_diag: Floating-point `Tensor` representing the diagonal matrix. `scale_diag` has shape [N1, N2, ... k], which represents a k x k diagonal matrix. When `None` no diagonal term is added to `scale`. scale_tril: Floating-point `Tensor` representing the diagonal matrix. `scale_diag` has shape [N1, N2, ... k, k], which represents a k x k lower triangular matrix. When `None` no `scale_tril` term is added to `scale`. The upper triangular elements above the diagonal are ignored. scale_perturb_factor: Floating-point `Tensor` representing factor matrix with last two dimensions of shape `(k, r)`. When `None`, no rank-r update is added to `scale`. scale_perturb_diag: Floating-point `Tensor` representing the diagonal matrix. `scale_perturb_diag` has shape [N1, N2, ... r], which represents an `r x r` diagonal matrix. When `None` low rank updates will take the form `scale_perturb_factor * scale_perturb_factor.T`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. Raises: ValueError: if `perturb_diag` is specified but not `perturb_factor`. TypeError: if `shift` has different `dtype` from `scale` arguments. """ self._graph_parents = [] self._name = name self._validate_args = validate_args # Ambiguous definition of low rank update. if scale_perturb_diag is not None and scale_perturb_factor is None: raise ValueError("When scale_perturb_diag is specified, " "scale_perturb_factor must be specified.") # Special case, only handling a scaled identity matrix. We don't know its # dimensions, so this is special cased. # We don't check identity_multiplier, since below we set it to 1. if all # other scale args are None. self._is_only_identity_multiplier = (scale_tril is None and scale_diag is None and scale_perturb_factor is None) with self._name_scope("init", values=[ shift, scale_identity_multiplier, scale_diag, scale_tril, scale_perturb_diag, scale_perturb_factor]): # In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`. dtype = dtypes.float32 if shift is not None: shift = ops.convert_to_tensor(shift, name="shift") dtype = shift.dtype.base_dtype self._shift = shift # When no args are specified, pretend the scale matrix is the identity # matrix. if (self._is_only_identity_multiplier and scale_identity_multiplier is None): scale_identity_multiplier = ops.convert_to_tensor(1., dtype=dtype) # self._create_scale_operator returns a LinearOperator in all cases # except if self._is_only_identity_multiplier; in which case it # returns a scalar Tensor. scale = self._create_scale_operator( identity_multiplier=scale_identity_multiplier, diag=scale_diag, tril=scale_tril, perturb_diag=scale_perturb_diag, perturb_factor=scale_perturb_factor, shift=shift, validate_args=validate_args) if scale.dtype is not None: dtype = scale.dtype.base_dtype if scale is not None and not self._is_only_identity_multiplier: if (shift is not None and shift.dtype.base_dtype != scale.dtype.base_dtype): raise TypeError( "shift.dtype({}) is incompatible with scale.dtype({}).".format( shift.dtype, scale.dtype)) if scale.tensor_rank is not None: batch_ndims = scale.tensor_rank - 2 else: batch_ndims = scale.tensor_rank_tensor() - 2 else: # We won't need shape inference when scale is None or when scale is a # scalar. batch_ndims = 0 self._scale = scale self._shaper = _DistributionShape( batch_ndims=batch_ndims, event_ndims=1, validate_args=validate_args) super(Affine, self).__init__( forward_min_event_ndims=1, graph_parents=( [self._scale] if tensor_util.is_tensor(self._scale) else self._scale.graph_parents + [self._shift] if self._shift is not None else []), is_constant_jacobian=True, dtype=dtype, validate_args=validate_args, name=name) def _create_scale_operator(self, identity_multiplier, diag, tril, perturb_diag, perturb_factor, shift, validate_args): """Construct `scale` from various components. Args: identity_multiplier: floating point rank 0 `Tensor` representing a scaling done to the identity matrix. diag: Floating-point `Tensor` representing the diagonal matrix. `scale_diag` has shape [N1, N2, ... k], which represents a k x k diagonal matrix. tril: Floating-point `Tensor` representing the diagonal matrix. `scale_tril` has shape [N1, N2, ... k], which represents a k x k lower triangular matrix. perturb_diag: Floating-point `Tensor` representing the diagonal matrix of the low rank update. perturb_factor: Floating-point `Tensor` representing factor matrix. shift: Floating-point `Tensor` representing `shift in `scale @ X + shift`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Returns: scale. In the case of scaling by a constant, scale is a floating point `Tensor`. Otherwise, scale is a `LinearOperator`. Raises: ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`. """ identity_multiplier = _as_tensor(identity_multiplier, "identity_multiplier") diag = _as_tensor(diag, "diag") tril = _as_tensor(tril, "tril") perturb_diag = _as_tensor(perturb_diag, "perturb_diag") perturb_factor = _as_tensor(perturb_factor, "perturb_factor") # If possible, use the low rank update to infer the shape of # the identity matrix, when scale represents a scaled identity matrix # with a low rank update. shape_hint = None if perturb_factor is not None: shape_hint = distribution_util.dimension_size(perturb_factor, axis=-2) if self._is_only_identity_multiplier: if validate_args: return control_flow_ops.with_dependencies( [check_ops.assert_none_equal( identity_multiplier, array_ops.zeros([], identity_multiplier.dtype), ["identity_multiplier should be non-zero."])], identity_multiplier) return identity_multiplier scale = distribution_util.make_tril_scale( loc=shift, scale_tril=tril, scale_diag=diag, scale_identity_multiplier=identity_multiplier, validate_args=validate_args, assert_positive=False, shape_hint=shape_hint) if perturb_factor is not None: return linalg.LinearOperatorLowRankUpdate( scale, u=perturb_factor, diag_update=perturb_diag, is_diag_update_positive=perturb_diag is None, is_non_singular=True, # Implied by is_positive_definite=True. is_self_adjoint=True, is_positive_definite=True, is_square=True) return scale @property def shift(self): """The `shift` `Tensor` in `Y = scale @ X + shift`.""" return self._shift @property def scale(self): """The `scale` `LinearOperator` in `Y = scale @ X + shift`.""" return self._scale def _forward(self, x): y = x if self._is_only_identity_multiplier: y *= self._scale if self.shift is not None: return y + self.shift return y y, sample_shape = self._shaper.make_batch_of_event_sample_matrices( y, expand_batch_dim=False) with ops.control_dependencies(self._maybe_check_scale() if self.validate_args else []): y = self.scale.matmul(y) y = self._shaper.undo_make_batch_of_event_sample_matrices( y, sample_shape, expand_batch_dim=False) if self.shift is not None: y += self.shift return y def _inverse(self, y): x = y if self.shift is not None: x -= self.shift if self._is_only_identity_multiplier: return x / self._scale x, sample_shape = self._shaper.make_batch_of_event_sample_matrices( x, expand_batch_dim=False) # Solve fails if the op is singular so we may safely skip this assertion. x = self.scale.solve(x) x = self._shaper.undo_make_batch_of_event_sample_matrices( x, sample_shape, expand_batch_dim=False) return x def _forward_log_det_jacobian(self, x): # is_constant_jacobian = True for this bijector, hence the # `log_det_jacobian` need only be specified for a single input, as this will # be tiled to match `event_ndims`. if self._is_only_identity_multiplier: # We don't pad in this case and instead let the fldj be applied # via broadcast. event_size = array_ops.shape(x)[-1] event_size = math_ops.cast(event_size, dtype=self._scale.dtype) return math_ops.log(math_ops.abs(self._scale)) * event_size return self.scale.log_abs_determinant() def _maybe_check_scale(self): try: return [self.scale.assert_non_singular()] except NotImplementedError: pass return []
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/affine.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Bijector Ops. Use [tfp.bijectors](/probability/api_docs/python/tfp/bijectors) instead. @@AbsoluteValue @@Affine @@AffineLinearOperator @@AffineScalar @@Bijector @@BatchNormalization @@Chain @@CholeskyOuterProduct @@ConditionalBijector @@Exp @@FillTriangular @@Gumbel @@Identity @@Inline @@Invert @@Kumaraswamy @@MaskedAutoregressiveFlow @@MatrixInverseTriL @@Ordered @@Permute @@PowerTransform @@RealNVP @@Reshape @@ScaleTriL @@Sigmoid @@SinhArcsinh @@SoftmaxCentered @@Softplus @@Softsign @@Square @@TransformDiagonal @@Weibull @@masked_autoregressive_default_template @@masked_dense @@real_nvp_default_template """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import,wildcard-import,line-too-long,g-importing-member from tensorflow.contrib.distributions.python.ops.bijectors.absolute_value import * from tensorflow.contrib.distributions.python.ops.bijectors.affine import * from tensorflow.contrib.distributions.python.ops.bijectors.affine_linear_operator import * from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import * from tensorflow.contrib.distributions.python.ops.bijectors.batch_normalization import * from tensorflow.contrib.distributions.python.ops.bijectors.chain import * from tensorflow.contrib.distributions.python.ops.bijectors.cholesky_outer_product import * from tensorflow.contrib.distributions.python.ops.bijectors.conditional_bijector import * from tensorflow.contrib.distributions.python.ops.bijectors.exp import * from tensorflow.contrib.distributions.python.ops.bijectors.fill_triangular import * from tensorflow.contrib.distributions.python.ops.bijectors.gumbel import * from tensorflow.contrib.distributions.python.ops.bijectors.inline import * from tensorflow.contrib.distributions.python.ops.bijectors.invert import * from tensorflow.contrib.distributions.python.ops.bijectors.kumaraswamy import * from tensorflow.contrib.distributions.python.ops.bijectors.masked_autoregressive import * from tensorflow.contrib.distributions.python.ops.bijectors.matrix_inverse_tril import * from tensorflow.contrib.distributions.python.ops.bijectors.ordered import * from tensorflow.contrib.distributions.python.ops.bijectors.permute import * from tensorflow.contrib.distributions.python.ops.bijectors.power_transform import * from tensorflow.contrib.distributions.python.ops.bijectors.real_nvp import * from tensorflow.contrib.distributions.python.ops.bijectors.reshape import * from tensorflow.contrib.distributions.python.ops.bijectors.scale_tril import * from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid import * from tensorflow.contrib.distributions.python.ops.bijectors.sinh_arcsinh import * from tensorflow.contrib.distributions.python.ops.bijectors.softmax_centered import * from tensorflow.contrib.distributions.python.ops.bijectors.softplus import * from tensorflow.contrib.distributions.python.ops.bijectors.softsign import * from tensorflow.contrib.distributions.python.ops.bijectors.square import * from tensorflow.contrib.distributions.python.ops.bijectors.transform_diagonal import * from tensorflow.python.ops.distributions.bijector import * from tensorflow.python.ops.distributions.identity_bijector import Identity # pylint: enable=unused-import,wildcard-import,line-too-long,g-importing-member from tensorflow.python.util.all_util import remove_undocumented remove_undocumented(__name__)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/__init__.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ScaleTriL bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.distributions.python.ops.bijectors import affine_scalar from tensorflow.contrib.distributions.python.ops.bijectors import chain from tensorflow.contrib.distributions.python.ops.bijectors import fill_triangular from tensorflow.contrib.distributions.python.ops.bijectors import softplus from tensorflow.contrib.distributions.python.ops.bijectors import transform_diagonal from tensorflow.python.util import deprecation __all__ = [ "ScaleTriL", ] class ScaleTriL(chain.Chain): """Transforms unconstrained vectors to TriL matrices with positive diagonal. This is implemented as a simple `tfb.Chain` of `tfb.FillTriangular` followed by `tfb.TransformDiagonal`, and provided mostly as a convenience. The default setup is somewhat opinionated, using a Softplus transformation followed by a small shift (`1e-5`) which attempts to avoid numerical issues from zeros on the diagonal. #### Examples ```python import tensorflow_probability as tfp tfd = tfp.distributions tfb = tfp.bijectors b = tfb.ScaleTriL( diag_bijector=tfb.Exp(), diag_shift=None) b.forward(x=[0., 0., 0.]) # Result: [[1., 0.], # [0., 1.]] b.inverse(y=[[1., 0], [.5, 2]]) # Result: [log(2), .5, log(1)] # Define a distribution over PSD matrices of shape `[3, 3]`, # with `1 + 2 + 3 = 6` degrees of freedom. dist = tfd.TransformedDistribution( tfd.Normal(tf.zeros(6), tf.ones(6)), tfb.Chain([tfb.CholeskyOuterProduct(), tfb.ScaleTriL()])) # Using an identity transformation, ScaleTriL is equivalent to # tfb.FillTriangular. b = tfb.ScaleTriL( diag_bijector=tfb.Identity(), diag_shift=None) # For greater control over initialization, one can manually encode # pre- and post- shifts inside of `diag_bijector`. b = tfb.ScaleTriL( diag_bijector=tfb.Chain([ tfb.AffineScalar(shift=1e-3), tfb.Softplus(), tfb.AffineScalar(shift=0.5413)]), # softplus_inverse(1.) # = log(expm1(1.)) = 0.5413 diag_shift=None) ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, diag_bijector=None, diag_shift=1e-5, validate_args=False, name="scale_tril"): """Instantiates the `ScaleTriL` bijector. Args: diag_bijector: `Bijector` instance, used to transform the output diagonal to be positive. Default value: `None` (i.e., `tfb.Softplus()`). diag_shift: Float value broadcastable and added to all diagonal entries after applying the `diag_bijector`. Setting a positive value forces the output diagonal entries to be positive, but prevents inverting the transformation for matrices with diagonal entries less than this value. Default value: `1e-5` (i.e., no shift is applied). validate_args: Python `bool` indicating whether arguments should be checked for correctness. Default value: `False` (i.e., arguments are not validated). name: Python `str` name given to ops managed by this object. Default value: `scale_tril`. """ if diag_bijector is None: diag_bijector = softplus.Softplus(validate_args=validate_args) if diag_shift is not None: diag_bijector = chain.Chain([affine_scalar.AffineScalar(shift=diag_shift), diag_bijector]) super(ScaleTriL, self).__init__( [transform_diagonal.TransformDiagonal(diag_bijector=diag_bijector), fill_triangular.FillTriangular()], validate_args=validate_args, name=name)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/scale_tril.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gumbel bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "Gumbel", ] class Gumbel(bijector.Bijector): """Compute `Y = g(X) = exp(-exp(-(X - loc) / scale))`. This bijector maps inputs from `[-inf, inf]` to [0, 1]`. The inverse of the bijector applied to a uniform random variable `X ~ U(0, 1) gives back a random variable with the [Gumbel distribution](https://en.wikipedia.org/wiki/Gumbel_distribution): ```none Y ~ Gumbel(loc, scale) pdf(y; loc, scale) = exp( -( (y - loc) / scale + exp(- (y - loc) / scale) ) ) / scale ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, loc=0., scale=1., validate_args=False, name="gumbel"): """Instantiates the `Gumbel` bijector. Args: loc: Float-like `Tensor` that is the same dtype and is broadcastable with `scale`. This is `loc` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`. scale: Positive Float-like `Tensor` that is the same dtype and is broadcastable with `loc`. This is `scale` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ self._graph_parents = [] self._name = name self._validate_args = validate_args with self._name_scope("init", values=[loc, scale]): self._loc = ops.convert_to_tensor(loc, name="loc") self._scale = ops.convert_to_tensor(scale, name="scale") check_ops.assert_same_float_dtype([self._loc, self._scale]) if validate_args: self._scale = control_flow_ops.with_dependencies([ check_ops.assert_positive( self._scale, message="Argument scale was not positive") ], self._scale) super(Gumbel, self).__init__( validate_args=validate_args, forward_min_event_ndims=0, name=name) @property def loc(self): """The `loc` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`.""" return self._loc @property def scale(self): """This is `scale` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`.""" return self._scale def _forward(self, x): z = (x - self.loc) / self.scale return math_ops.exp(-math_ops.exp(-z)) def _inverse(self, y): y = self._maybe_assert_valid_y(y) return self.loc - self.scale * math_ops.log(-math_ops.log(y)) def _inverse_log_det_jacobian(self, y): y = self._maybe_assert_valid_y(y) return math_ops.log(self.scale / (-math_ops.log(y) * y)) def _forward_log_det_jacobian(self, x): z = (x - self.loc) / self.scale return -z - math_ops.exp(-z) - math_ops.log(self.scale) def _maybe_assert_valid_y(self, y): if not self.validate_args: return y is_positive = check_ops.assert_non_negative( y, message="Inverse transformation input must be greater than 0.") less_than_one = check_ops.assert_less_equal( y, constant_op.constant(1., y.dtype), message="Inverse transformation input must be less than or equal to 1.") return control_flow_ops.with_dependencies([is_positive, less_than_one], y)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/gumbel.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SoftmaxCentered bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.distributions.python.ops import distribution_util from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "SoftmaxCentered", ] class SoftmaxCentered(bijector.Bijector): """Bijector which computes `Y = g(X) = exp([X 0]) / sum(exp([X 0]))`. To implement [softmax](https://en.wikipedia.org/wiki/Softmax_function) as a bijection, the forward transformation appends a value to the input and the inverse removes this coordinate. The appended coordinate represents a pivot, e.g., `softmax(x) = exp(x-c) / sum(exp(x-c))` where `c` is the implicit last coordinate. Example Use: ```python bijector.SoftmaxCentered().forward(tf.math.log([2, 3, 4])) # Result: [0.2, 0.3, 0.4, 0.1] # Extra result: 0.1 bijector.SoftmaxCentered().inverse([0.2, 0.3, 0.4, 0.1]) # Result: tf.math.log([2, 3, 4]) # Extra coordinate removed. ``` At first blush it may seem like the [Invariance of domain]( https://en.wikipedia.org/wiki/Invariance_of_domain) theorem implies this implementation is not a bijection. However, the appended dimension makes the (forward) image non-open and the theorem does not directly apply. """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, validate_args=False, name="softmax_centered"): self._graph_parents = [] self._name = name super(SoftmaxCentered, self).__init__( forward_min_event_ndims=1, validate_args=validate_args, name=name) def _forward_event_shape(self, input_shape): if input_shape.ndims is None or input_shape[-1] is None: return input_shape return tensor_shape.TensorShape([input_shape[-1] + 1]) def _forward_event_shape_tensor(self, input_shape): return (input_shape[-1] + 1)[..., array_ops.newaxis] def _inverse_event_shape(self, output_shape): if output_shape.ndims is None or output_shape[-1] is None: return output_shape if output_shape[-1] <= 1: raise ValueError("output_shape[-1] = %d <= 1" % output_shape[-1]) return tensor_shape.TensorShape([output_shape[-1] - 1]) def _inverse_event_shape_tensor(self, output_shape): if self.validate_args: # It is not possible for a negative shape so we need only check <= 1. is_greater_one = check_ops.assert_greater( output_shape[-1], 1, message="Need last dimension greater than 1.") output_shape = control_flow_ops.with_dependencies( [is_greater_one], output_shape) return (output_shape[-1] - 1)[..., array_ops.newaxis] def _forward(self, x): # Pad the last dim with a zeros vector. We need this because it lets us # infer the scale in the inverse function. y = distribution_util.pad(x, axis=-1, back=True) # Set shape hints. if x.shape.ndims is not None: shape = x.shape[:-1].concatenate(x.shape.dims[-1] + 1) y.shape.assert_is_compatible_with(shape) y.set_shape(shape) return nn_ops.softmax(y) def _inverse(self, y): # To derive the inverse mapping note that: # y[i] = exp(x[i]) / normalization # and # y[end] = 1 / normalization. # Thus: # x[i] = log(exp(x[i])) - log(y[end]) - log(normalization) # = log(exp(x[i])/normalization) - log(y[end]) # = log(y[i]) - log(y[end]) # Do this first to make sure CSE catches that it'll happen again in # _inverse_log_det_jacobian. x = math_ops.log(y) log_normalization = (-x[..., -1])[..., array_ops.newaxis] x = x[..., :-1] + log_normalization # Set shape hints. if y.shape.ndims is not None: shape = y.shape[:-1].concatenate(y.shape.dims[-1] - 1) x.shape.assert_is_compatible_with(shape) x.set_shape(shape) return x def _inverse_log_det_jacobian(self, y): # WLOG, consider the vector case: # x = log(y[:-1]) - log(y[-1]) # where, # y[-1] = 1 - sum(y[:-1]). # We have: # det{ dX/dY } = det{ diag(1 ./ y[:-1]) + 1 / y[-1] } # = det{ inv{ diag(y[:-1]) - y[:-1]' y[:-1] } } (1) # = 1 / det{ diag(y[:-1]) - y[:-1]' y[:-1] } # = 1 / { (1 + y[:-1]' inv(diag(y[:-1])) y[:-1]) * # det(diag(y[:-1])) } (2) # = 1 / { y[-1] prod(y[:-1]) } # = 1 / prod(y) # (1) - https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula # or by noting that det{ dX/dY } = 1 / det{ dY/dX } from Bijector # docstring "Tip". # (2) - https://en.wikipedia.org/wiki/Matrix_determinant_lemma return -math_ops.reduce_sum(math_ops.log(y), axis=-1) def _forward_log_det_jacobian(self, x): # This code is similar to nn_ops.log_softmax but different because we have # an implicit zero column to handle. I.e., instead of: # reduce_sum(logits - reduce_sum(exp(logits), dim)) # we must do: # log_normalization = 1 + reduce_sum(exp(logits)) # -log_normalization + reduce_sum(logits - log_normalization) log_normalization = nn_ops.softplus( math_ops.reduce_logsumexp(x, axis=-1, keepdims=True)) return array_ops.squeeze( (-log_normalization + math_ops.reduce_sum( x - log_normalization, axis=-1, keepdims=True)), axis=-1)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/softmax_centered.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Real NVP bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.layers import core as layers from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import template as template_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = ["RealNVP", "real_nvp_default_template"] class RealNVP(bijector.Bijector): """RealNVP "affine coupling layer" for vector-valued events. Real NVP models a normalizing flow on a `D`-dimensional distribution via a single `D-d`-dimensional conditional distribution [(Dinh et al., 2017)][1]: `y[d:D] = y[d:D] * math_ops.exp(log_scale_fn(y[d:D])) + shift_fn(y[d:D])` `y[0:d] = x[0:d]` The last `D-d` units are scaled and shifted based on the first `d` units only, while the first `d` units are 'masked' and left unchanged. Real NVP's `shift_and_log_scale_fn` computes vector-valued quantities. For scale-and-shift transforms that do not depend on any masked units, i.e. `d=0`, use the `tfb.Affine` bijector with learned parameters instead. Masking is currently only supported for base distributions with `event_ndims=1`. For more sophisticated masking schemes like checkerboard or channel-wise masking [(Papamakarios et al., 2016)[4], use the `tfb.Permute` bijector to re-order desired masked units into the first `d` units. For base distributions with `event_ndims > 1`, use the `tfb.Reshape` bijector to flatten the event shape. Recall that the MAF bijector [(Papamakarios et al., 2016)][4] implements a normalizing flow via an autoregressive transformation. MAF and IAF have opposite computational tradeoffs - MAF can train all units in parallel but must sample units sequentially, while IAF must train units sequentially but can sample in parallel. In contrast, Real NVP can compute both forward and inverse computations in parallel. However, the lack of an autoregressive transformations makes it less expressive on a per-bijector basis. A "valid" `shift_and_log_scale_fn` must compute each `shift` (aka `loc` or "mu" in [Papamakarios et al. (2016)][4]) and `log(scale)` (aka "alpha" in [Papamakarios et al. (2016)][4]) such that each are broadcastable with the arguments to `forward` and `inverse`, i.e., such that the calculations in `forward`, `inverse` [below] are possible. For convenience, `real_nvp_default_nvp` is offered as a possible `shift_and_log_scale_fn` function. NICE [(Dinh et al., 2014)][2] is a special case of the Real NVP bijector which discards the scale transformation, resulting in a constant-time inverse-log-determinant-Jacobian. To use a NICE bijector instead of Real NVP, `shift_and_log_scale_fn` should return `(shift, None)`, and `is_constant_jacobian` should be set to `True` in the `RealNVP` constructor. Calling `real_nvp_default_template` with `shift_only=True` returns one such NICE-compatible `shift_and_log_scale_fn`. Caching: the scalar input depth `D` of the base distribution is not known at construction time. The first call to any of `forward(x)`, `inverse(x)`, `inverse_log_det_jacobian(x)`, or `forward_log_det_jacobian(x)` memoizes `D`, which is re-used in subsequent calls. This shape must be known prior to graph execution (which is the case if using tf.layers). #### Example Use ```python import tensorflow_probability as tfp tfd = tfp.distributions tfb = tfp.bijectors # A common choice for a normalizing flow is to use a Gaussian for the base # distribution. (However, any continuous distribution would work.) E.g., num_dims = 3 num_samples = 1 nvp = tfd.TransformedDistribution( distribution=tfd.MultivariateNormalDiag(loc=np.zeros(num_dims)), bijector=tfb.RealNVP( num_masked=2, shift_and_log_scale_fn=tfb.real_nvp_default_template( hidden_layers=[512, 512]))) x = nvp.sample(num_samples) nvp.log_prob(x) nvp.log_prob(np.zeros([num_samples, num_dims])) ``` For more examples, see [Jang (2018)][3]. #### References [1]: Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density Estimation using Real NVP. In _International Conference on Learning Representations_, 2017. https://arxiv.org/abs/1605.08803 [2]: Laurent Dinh, David Krueger, and Yoshua Bengio. NICE: Non-linear Independent Components Estimation. _arXiv preprint arXiv:1410.8516_, 2014. https://arxiv.org/abs/1410.8516 [3]: Eric Jang. Normalizing Flows Tutorial, Part 2: Modern Normalizing Flows. _Technical Report_, 2018. http://blog.evjang.com/2018/01/nf2.html [4]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked Autoregressive Flow for Density Estimation. In _Neural Information Processing Systems_, 2017. https://arxiv.org/abs/1705.07057 """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, num_masked, shift_and_log_scale_fn, is_constant_jacobian=False, validate_args=False, name=None): """Creates the Real NVP or NICE bijector. Args: num_masked: Python `int` indicating that the first `d` units of the event should be masked. Must be in the closed interval `[1, D-1]`, where `D` is the event size of the base distribution. shift_and_log_scale_fn: Python `callable` which computes `shift` and `log_scale` from both the forward domain (`x`) and the inverse domain (`y`). Calculation must respect the "autoregressive property" (see class docstring). Suggested default `masked_autoregressive_default_template(hidden_layers=...)`. Typically the function contains `tf.Variables` and is wrapped using `tf.compat.v1.make_template`. Returning `None` for either (both) `shift`, `log_scale` is equivalent to (but more efficient than) returning zero. is_constant_jacobian: Python `bool`. Default: `False`. When `True` the implementation assumes `log_scale` does not depend on the forward domain (`x`) or inverse domain (`y`) values. (No validation is made; `is_constant_jacobian=False` is always safe but possibly computationally inefficient.) validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str`, name given to ops managed by this object. Raises: ValueError: If num_masked < 1. """ name = name or "real_nvp" if num_masked <= 0: raise ValueError("num_masked must be a positive integer.") self._num_masked = num_masked # At construction time, we don't know input_depth. self._input_depth = None self._shift_and_log_scale_fn = shift_and_log_scale_fn super(RealNVP, self).__init__( forward_min_event_ndims=1, is_constant_jacobian=is_constant_jacobian, validate_args=validate_args, name=name) def _cache_input_depth(self, x): if self._input_depth is None: self._input_depth = tensor_shape.dimension_value( x.shape.with_rank_at_least(1)[-1]) if self._input_depth is None: raise NotImplementedError( "Rightmost dimension must be known prior to graph execution.") if self._num_masked >= self._input_depth: raise ValueError( "Number of masked units must be smaller than the event size.") def _forward(self, x): self._cache_input_depth(x) # Performs scale and shift. x0, x1 = x[:, :self._num_masked], x[:, self._num_masked:] shift, log_scale = self._shift_and_log_scale_fn( x0, self._input_depth - self._num_masked) y1 = x1 if log_scale is not None: y1 *= math_ops.exp(log_scale) if shift is not None: y1 += shift y = array_ops.concat([x0, y1], axis=-1) return y def _inverse(self, y): self._cache_input_depth(y) # Performs un-shift and un-scale. y0, y1 = y[:, :self._num_masked], y[:, self._num_masked:] shift, log_scale = self._shift_and_log_scale_fn( y0, self._input_depth - self._num_masked) x1 = y1 if shift is not None: x1 -= shift if log_scale is not None: x1 *= math_ops.exp(-log_scale) x = array_ops.concat([y0, x1], axis=-1) return x def _inverse_log_det_jacobian(self, y): self._cache_input_depth(y) y0 = y[:, :self._num_masked] _, log_scale = self._shift_and_log_scale_fn( y0, self._input_depth - self._num_masked) if log_scale is None: return constant_op.constant(0., dtype=y.dtype, name="ildj") return -math_ops.reduce_sum(log_scale, axis=-1) def _forward_log_det_jacobian(self, x): self._cache_input_depth(x) x0 = x[:, :self._num_masked] _, log_scale = self._shift_and_log_scale_fn( x0, self._input_depth - self._num_masked) if log_scale is None: return constant_op.constant(0., dtype=x.dtype, name="fldj") return math_ops.reduce_sum(log_scale, axis=-1) @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def real_nvp_default_template( hidden_layers, shift_only=False, activation=nn_ops.relu, name=None, *args, **kwargs): """Build a scale-and-shift function using a multi-layer neural network. This will be wrapped in a make_template to ensure the variables are only created once. It takes the `d`-dimensional input x[0:d] and returns the `D-d` dimensional outputs `loc` ("mu") and `log_scale` ("alpha"). Arguments: hidden_layers: Python `list`-like of non-negative integer, scalars indicating the number of units in each hidden layer. Default: `[512, 512]. shift_only: Python `bool` indicating if only the `shift` term shall be computed (i.e. NICE bijector). Default: `False`. activation: Activation function (callable). Explicitly setting to `None` implies a linear activation. name: A name for ops managed by this function. Default: "real_nvp_default_template". *args: `tf.compat.v1.layers.dense` arguments. **kwargs: `tf.compat.v1.layers.dense` keyword arguments. Returns: shift: `Float`-like `Tensor` of shift terms ("mu" in [Papamakarios et al. (2016)][1]). log_scale: `Float`-like `Tensor` of log(scale) terms ("alpha" in [Papamakarios et al. (2016)][1]). Raises: NotImplementedError: if rightmost dimension of `inputs` is unknown prior to graph execution. #### References [1]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked Autoregressive Flow for Density Estimation. In _Neural Information Processing Systems_, 2017. https://arxiv.org/abs/1705.07057 """ with ops.name_scope(name, "real_nvp_default_template"): def _fn(x, output_units): """Fully connected MLP parameterized via `real_nvp_template`.""" for units in hidden_layers: x = layers.dense( inputs=x, units=units, activation=activation, *args, **kwargs) x = layers.dense( inputs=x, units=(1 if shift_only else 2) * output_units, activation=None, *args, **kwargs) if shift_only: return x, None shift, log_scale = array_ops.split(x, 2, axis=-1) return shift, log_scale return template_ops.make_template("real_nvp_default_template", _fn)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/real_nvp.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """AffineLinearOperator bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.util import deprecation __all__ = [ "AffineLinearOperator", ] class AffineLinearOperator(bijector.Bijector): """Compute `Y = g(X; shift, scale) = scale @ X + shift`. `shift` is a numeric `Tensor` and `scale` is a `LinearOperator`. If `X` is a scalar then the forward transformation is: `scale * X + shift` where `*` denotes the scalar product. Note: we don't always simply transpose `X` (but write it this way for brevity). Actually the input `X` undergoes the following transformation before being premultiplied by `scale`: 1. If there are no sample dims, we call `X = tf.expand_dims(X, 0)`, i.e., `new_sample_shape = [1]`. Otherwise do nothing. 2. The sample shape is flattened to have one dimension, i.e., `new_sample_shape = [n]` where `n = tf.reduce_prod(old_sample_shape)`. 3. The sample dim is cyclically rotated left by 1, i.e., `new_shape = [B1,...,Bb, k, n]` where `n` is as above, `k` is the event_shape, and `B1,...,Bb` are the batch shapes for each of `b` batch dimensions. (For more details see `shape.make_batch_of_event_sample_matrices`.) The result of the above transformation is that `X` can be regarded as a batch of matrices where each column is a draw from the distribution. After premultiplying by `scale`, we take the inverse of this procedure. The input `Y` also undergoes the same transformation before/after premultiplying by `inv(scale)`. Example Use: ```python linalg = tf.linalg x = [1., 2, 3] shift = [-1., 0., 1] diag = [1., 2, 3] scale = linalg.LinearOperatorDiag(diag) affine = AffineLinearOperator(shift, scale) # In this case, `forward` is equivalent to: # y = scale @ x + shift y = affine.forward(x) # [0., 4, 10] shift = [2., 3, 1] tril = [[1., 0, 0], [2, 1, 0], [3, 2, 1]] scale = linalg.LinearOperatorLowerTriangular(tril) affine = AffineLinearOperator(shift, scale) # In this case, `forward` is equivalent to: # np.squeeze(np.matmul(tril, np.expand_dims(x, -1)), -1) + shift y = affine.forward(x) # [3., 7, 11] ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, shift=None, scale=None, validate_args=False, name="affine_linear_operator"): """Instantiates the `AffineLinearOperator` bijector. Args: shift: Floating-point `Tensor`. scale: Subclass of `LinearOperator`. Represents the (batch) positive definite matrix `M` in `R^{k x k}`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. Raises: TypeError: if `scale` is not a `LinearOperator`. TypeError: if `shift.dtype` does not match `scale.dtype`. ValueError: if not `scale.is_non_singular`. """ self._graph_parents = [] self._name = name self._validate_args = validate_args graph_parents = [] with self._name_scope("init", values=[shift]): # In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`. dtype = dtypes.float32 if shift is not None: shift = ops.convert_to_tensor(shift, name="shift") graph_parents += [shift] dtype = shift.dtype.base_dtype self._shift = shift if scale is not None: if (shift is not None and shift.dtype.base_dtype != scale.dtype.base_dtype): raise TypeError( "shift.dtype({}) is incompatible with scale.dtype({}).".format( shift.dtype, scale.dtype)) if not isinstance(scale, linear_operator.LinearOperator): raise TypeError("scale is not an instance of tf.LinearOperator") if validate_args and not scale.is_non_singular: raise ValueError("Scale matrix must be non-singular.") graph_parents += scale.graph_parents if scale.tensor_rank is not None: batch_ndims = scale.tensor_rank - 2 else: batch_ndims = scale.tensor_rank_tensor() - 2 graph_parents += [batch_ndims] if scale.dtype is not None: dtype = scale.dtype.base_dtype else: batch_ndims = 0 # We won't need shape inference when scale is None. self._scale = scale self._shaper = _DistributionShape( batch_ndims=batch_ndims, event_ndims=1, validate_args=validate_args) super(AffineLinearOperator, self).__init__( forward_min_event_ndims=1, graph_parents=graph_parents, is_constant_jacobian=True, dtype=dtype, validate_args=validate_args, name=name) @property def shift(self): """The `shift` `Tensor` in `Y = scale @ X + shift`.""" return self._shift @property def scale(self): """The `scale` `LinearOperator` in `Y = scale @ X + shift`.""" return self._scale def _forward(self, x): y = x if self.scale is not None: y, sample_shape = self._shaper.make_batch_of_event_sample_matrices( y, expand_batch_dim=False) with ops.control_dependencies(self._maybe_collect_assertions() if self.validate_args else []): y = self.scale.matmul(y) y = self._shaper.undo_make_batch_of_event_sample_matrices( y, sample_shape, expand_batch_dim=False) if self.shift is not None: y += self.shift return y def _inverse(self, y): x = y if self.shift is not None: x -= self.shift if self.scale is not None: x, sample_shape = self._shaper.make_batch_of_event_sample_matrices( x, expand_batch_dim=False) # Solve fails if the op is singular so we may safely skip this assertion. x = self.scale.solve(x) x = self._shaper.undo_make_batch_of_event_sample_matrices( x, sample_shape, expand_batch_dim=False) return x def _forward_log_det_jacobian(self, x): # is_constant_jacobian = True for this bijector, hence the # `log_det_jacobian` need only be specified for a single input, as this will # be tiled to match `event_ndims`. if self.scale is None: return constant_op.constant(0., dtype=x.dtype.base_dtype) with ops.control_dependencies(self._maybe_collect_assertions() if self.validate_args else []): return self.scale.log_abs_determinant() def _maybe_collect_assertions(self): try: return [self.scale.assert_non_singular()] except NotImplementedError: pass return []
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/affine_linear_operator.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TransformDiagonal bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import array_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "TransformDiagonal", ] class TransformDiagonal(bijector.Bijector): """Applies a Bijector to the diagonal of a matrix. #### Example ```python b = tfb.TransformDiagonal(diag_bijector=tfb.Exp()) b.forward([[1., 0.], [0., 1.]]) # ==> [[2.718, 0.], [0., 2.718]] ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, diag_bijector, validate_args=False, name="transform_diagonal"): """Instantiates the `TransformDiagonal` bijector. Args: diag_bijector: `Bijector` instance used to transform the diagonal. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ self._diag_bijector = diag_bijector super(TransformDiagonal, self).__init__( forward_min_event_ndims=2, inverse_min_event_ndims=2, validate_args=validate_args, name=name) def _forward(self, x): diag = self._diag_bijector.forward(array_ops.matrix_diag_part(x)) return array_ops.matrix_set_diag(x, diag) def _inverse(self, y): diag = self._diag_bijector.inverse(array_ops.matrix_diag_part(y)) return array_ops.matrix_set_diag(y, diag) def _forward_log_det_jacobian(self, x): # We formulate the Jacobian with respect to the flattened matrices # `vec(x)` and `vec(y)`. Suppose for notational convenience that # the first `n` entries of `vec(x)` are the diagonal of `x`, and # the remaining `n**2-n` entries are the off-diagonals in # arbitrary order. Then the Jacobian is a block-diagonal matrix, # with the Jacobian of the diagonal bijector in the first block, # and the identity Jacobian for the remaining entries (since this # bijector acts as the identity on non-diagonal entries): # # J_vec(x) (vec(y)) = # ------------------------------- # | J_diag(x) (diag(y)) 0 | n entries # | | # | 0 I | n**2-n entries # ------------------------------- # n n**2-n # # Since the log-det of the second (identity) block is zero, the # overall log-det-jacobian is just the log-det of first block, # from the diagonal bijector. # # Note that for elementwise operations (exp, softplus, etc) the # first block of the Jacobian will itself be a diagonal matrix, # but our implementation does not require this to be true. return self._diag_bijector.forward_log_det_jacobian( array_ops.matrix_diag_part(x), event_ndims=1) def _inverse_log_det_jacobian(self, y): return self._diag_bijector.inverse_log_det_jacobian( array_ops.matrix_diag_part(y), event_ndims=1)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/transform_diagonal.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ordered bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "Ordered", ] class Ordered(bijector.Bijector): """Bijector which maps a tensor x_k that has increasing elements in the last dimension to an unconstrained tensor y_k. Both the domain and the codomain of the mapping is [-inf, inf], however, the input of the forward mapping must be strictly increasing. The inverse of the bijector applied to a normal random vector `y ~ N(0, 1)` gives back a sorted random vector with the same distribution `x ~ N(0, 1)` where `x = sort(y)` On the last dimension of the tensor, Ordered bijector performs: `y[0] = x[0]` `y[1:] = math_ops.log(x[1:] - x[:-1])` #### Example Use: ```python bijector.Ordered().forward([2, 3, 4]) # Result: [2., 0., 0.] bijector.Ordered().inverse([0.06428002, -1.07774478, -0.71530371]) # Result: [0.06428002, 0.40464228, 0.8936858] ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, validate_args=False, name="ordered"): super(Ordered, self).__init__( forward_min_event_ndims=1, validate_args=validate_args, name=name) def _forward_event_shape(self, input_shape): if input_shape.ndims is None or input_shape[-1] is None: return input_shape return tensor_shape.TensorShape([input_shape[-1]]) def _forward_event_shape_tensor(self, input_shape): return (input_shape[-1])[..., array_ops.newaxis] def _inverse_event_shape(self, output_shape): if output_shape.ndims is None or output_shape[-1] is None: return output_shape if output_shape[-1] <= 1: raise ValueError("output_shape[-1] = %d <= 1" % output_shape[-1]) return tensor_shape.TensorShape([output_shape[-1]]) def _inverse_event_shape_tensor(self, output_shape): if self.validate_args: is_greater_one = check_ops.assert_greater( output_shape[-1], 1, message="Need last dimension greater than 1.") output_shape = control_flow_ops.with_dependencies( [is_greater_one], output_shape) return (output_shape[-1])[..., array_ops.newaxis] def _forward(self, x): x = self._maybe_assert_valid_x(x) y0 = x[..., 0, array_ops.newaxis] yk = math_ops.log(x[..., 1:] - x[..., :-1]) y = array_ops.concat([y0, yk], axis=-1) return y def _inverse(self, y): x0 = y[..., 0, array_ops.newaxis] xk = math_ops.exp(y[..., 1:]) x = array_ops.concat([x0, xk], axis=-1) return math_ops.cumsum(x, axis=-1) def _inverse_log_det_jacobian(self, y): # The Jacobian of the inverse mapping is lower # triangular, with the diagonal elements being: # J[i,i] = 1 if i=1, and # exp(y_i) if 1<i<=K # which gives the absolute Jacobian determinant: # |det(Jac)| = prod_{i=1}^{K} exp(y[i]). # (1) - Stan Modeling Language User's Guide and Reference Manual # Version 2.17.0 session 35.2 return math_ops.reduce_sum(y[..., 1:], axis=-1) def _forward_log_det_jacobian(self, x): x = self._maybe_assert_valid_x(x) return -math_ops.reduce_sum( math_ops.log(x[..., 1:] - x[..., :-1]), axis=-1) def _maybe_assert_valid_x(self, x): if not self.validate_args: return x is_valid = check_ops.assert_positive( x[..., 1:] - x[..., :-1], message="Forward transformation input must be strictly increasing.") return control_flow_ops.with_dependencies([is_valid], x)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/ordered.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Invert bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "Invert", ] class Invert(bijector.Bijector): """Bijector which inverts another Bijector. Example Use: [ExpGammaDistribution (see Background & Context)]( https://reference.wolfram.com/language/ref/ExpGammaDistribution.html) models `Y=log(X)` where `X ~ Gamma`. ```python exp_gamma_distribution = TransformedDistribution( distribution=Gamma(concentration=1., rate=2.), bijector=bijector.Invert(bijector.Exp()) ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, bijector, validate_args=False, name=None): """Creates a `Bijector` which swaps the meaning of `inverse` and `forward`. Note: An inverted bijector's `inverse_log_det_jacobian` is often more efficient if the base bijector implements `_forward_log_det_jacobian`. If `_forward_log_det_jacobian` is not implemented then the following code is used: ```python y = self.inverse(x, **kwargs) return -self.inverse_log_det_jacobian(y, **kwargs) ``` Args: bijector: Bijector instance. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str`, name given to ops managed by this object. """ if not bijector._is_injective: # pylint: disable=protected-access raise NotImplementedError( "Invert is not implemented for non-injective bijectors.") self._bijector = bijector super(Invert, self).__init__( graph_parents=bijector.graph_parents, forward_min_event_ndims=bijector.inverse_min_event_ndims, inverse_min_event_ndims=bijector.forward_min_event_ndims, is_constant_jacobian=bijector.is_constant_jacobian, validate_args=validate_args, dtype=bijector.dtype, name=name or "_".join(["invert", bijector.name])) def _forward_event_shape(self, input_shape): return self.bijector._inverse_event_shape(input_shape) # pylint: disable=protected-access def _forward_event_shape_tensor(self, input_shape): return self.bijector._inverse_event_shape_tensor(input_shape) # pylint: disable=protected-access def _inverse_event_shape(self, output_shape): return self.bijector._forward_event_shape(output_shape) # pylint: disable=protected-access def _inverse_event_shape_tensor(self, output_shape): return self.bijector._forward_event_shape_tensor(output_shape) # pylint: disable=protected-access @property def bijector(self): return self._bijector def _forward(self, x, **kwargs): return self.bijector._inverse(x, **kwargs) # pylint: disable=protected-access def _inverse(self, y, **kwargs): return self.bijector._forward(y, **kwargs) # pylint: disable=protected-access def _inverse_log_det_jacobian(self, y, **kwargs): return self.bijector._forward_log_det_jacobian(y, **kwargs) # pylint: disable=protected-access def _forward_log_det_jacobian(self, x, **kwargs): return self.bijector._inverse_log_det_jacobian(x, **kwargs) # pylint: disable=protected-access
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/invert.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Square bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "Square", ] class Square(bijector.Bijector): """Compute `g(X) = X^2`; X is a positive real number. g is a bijection between the non-negative real numbers (R_+) and the non-negative real numbers. #### Examples ```python bijector.Square().forward(x=[[1., 0], [2, 1]]) # Result: [[1., 0], [4, 1]], i.e., x^2 bijector.Square().inverse(y=[[1., 4], [9, 1]]) # Result: [[1., 2], [3, 1]], i.e., sqrt(y). ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, validate_args=False, name="square"): """Instantiates the `Square` bijector. Args: validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ self._name = name super(Square, self).__init__( forward_min_event_ndims=0, validate_args=validate_args, name=name) def _forward(self, x): x = self._maybe_assert_valid(x) return math_ops.square(x) def _inverse(self, y): y = self._maybe_assert_valid(y) return math_ops.sqrt(y) def _forward_log_det_jacobian(self, x): x = self._maybe_assert_valid(x) return np.log(2.) + math_ops.log(x) def _maybe_assert_valid(self, t): if not self.validate_args: return t is_valid = check_ops.assert_non_negative( t, message="All elements must be non-negative.") return control_flow_ops.with_dependencies([is_valid], t)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/square.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Softplus bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.ops.distributions import util as distribution_util from tensorflow.python.util import deprecation __all__ = [ "Softplus", ] class Softplus(bijector.Bijector): """Bijector which computes `Y = g(X) = Log[1 + exp(X)]`. The softplus `Bijector` has the following two useful properties: * The domain is the positive real numbers * `softplus(x) approx x`, for large `x`, so it does not overflow as easily as the `Exp` `Bijector`. The optional nonzero `hinge_softness` parameter changes the transition at zero. With `hinge_softness = c`, the bijector is: ```f_c(x) := c * g(x / c) = c * Log[1 + exp(x / c)].``` For large `x >> 1`, `c * Log[1 + exp(x / c)] approx c * Log[exp(x / c)] = x`, so the behavior for large `x` is the same as the standard softplus. As `c > 0` approaches 0 from the right, `f_c(x)` becomes less and less soft, approaching `max(0, x)`. * `c = 1` is the default. * `c > 0` but small means `f(x) approx ReLu(x) = max(0, x)`. * `c < 0` flips sign and reflects around the `y-axis`: `f_{-c}(x) = -f_c(-x)`. * `c = 0` results in a non-bijective transformation and triggers an exception. Example Use: ```python # Create the Y=g(X)=softplus(X) transform which works only on Tensors with 1 # batch ndim and 2 event ndims (i.e., vector of matrices). softplus = Softplus() x = [[[1., 2], [3, 4]], [[5, 6], [7, 8]]] log(1 + exp(x)) == softplus.forward(x) log(exp(x) - 1) == softplus.inverse(x) ``` Note: log(.) and exp(.) are applied element-wise but the Jacobian is a reduction over the event space. """ @distribution_util.AppendDocstring( kwargs_dict={ "hinge_softness": ( "Nonzero floating point `Tensor`. Controls the softness of what " "would otherwise be a kink at the origin. Default is 1.0")}) @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, hinge_softness=None, validate_args=False, name="softplus"): with ops.name_scope(name, values=[hinge_softness]): if hinge_softness is not None: self._hinge_softness = ops.convert_to_tensor( hinge_softness, name="hinge_softness") else: self._hinge_softness = None if validate_args: nonzero_check = check_ops.assert_none_equal( ops.convert_to_tensor( 0, dtype=self.hinge_softness.dtype), self.hinge_softness, message="hinge_softness must be non-zero") self._hinge_softness = control_flow_ops.with_dependencies( [nonzero_check], self.hinge_softness) super(Softplus, self).__init__( forward_min_event_ndims=0, validate_args=validate_args, name=name) def _forward(self, x): if self.hinge_softness is None: return nn_ops.softplus(x) hinge_softness = math_ops.cast(self.hinge_softness, x.dtype) return hinge_softness * nn_ops.softplus(x / hinge_softness) def _inverse(self, y): if self.hinge_softness is None: return distribution_util.softplus_inverse(y) hinge_softness = math_ops.cast(self.hinge_softness, y.dtype) return hinge_softness * distribution_util.softplus_inverse( y / hinge_softness) def _inverse_log_det_jacobian(self, y): # Could also do: # ildj = math_ops.reduce_sum(y - distribution_util.softplus_inverse(y), # axis=event_dims) # but the following is more numerically stable. Ie, # Y = Log[1 + exp{X}] ==> X = Log[exp{Y} - 1] # ==> dX/dY = exp{Y} / (exp{Y} - 1) # = 1 / (1 - exp{-Y}), # which is the most stable for large Y > 0. For small Y, we use # 1 - exp{-Y} approx Y. if self.hinge_softness is not None: y /= math_ops.cast(self.hinge_softness, y.dtype) return -math_ops.log(-math_ops.expm1(-y)) def _forward_log_det_jacobian(self, x): if self.hinge_softness is not None: x /= math_ops.cast(self.hinge_softness, x.dtype) return -nn_ops.softplus(-x) @property def hinge_softness(self): return self._hinge_softness
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/softplus.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Sigmoid bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "Sigmoid", ] class Sigmoid(bijector.Bijector): """Bijector which computes `Y = g(X) = 1 / (1 + exp(-X))`.""" @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, validate_args=False, name="sigmoid"): super(Sigmoid, self).__init__( forward_min_event_ndims=0, validate_args=validate_args, name=name) def _forward(self, x): return math_ops.sigmoid(x) def _inverse(self, y): return math_ops.log(y) - math_ops.log1p(-y) def _inverse_log_det_jacobian(self, y): return -math_ops.log(y) - math_ops.log1p(-y) def _forward_log_det_jacobian(self, x): return -nn_ops.softplus(-x) - nn_ops.softplus(x)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/sigmoid.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Exp bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.distributions.python.ops.bijectors import power_transform from tensorflow.python.util import deprecation __all__ = [ "Exp", ] class Exp(power_transform.PowerTransform): """Compute `Y = g(X) = exp(X)`. Example Use: ```python # Create the Y=g(X)=exp(X) transform which works only on Tensors with 1 # batch ndim 2. exp = Exp() x = [[[1., 2], [3, 4]], [[5, 6], [7, 8]]] exp(x) == exp.forward(x) log(x) == exp.inverse(x) ``` Note: the exp(.) is applied element-wise but the Jacobian is a reduction over the event space. """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, validate_args=False, name="exp"): """Instantiates the `Exp` bijector. Args: validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ # forward_min_event_ndims = 0. # No forward_min_event_ndims specified as this is done in PowerTransform. super(Exp, self).__init__( validate_args=validate_args, name=name)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/exp.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Affine bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "AffineScalar", ] class AffineScalar(bijector.Bijector): """Compute `Y = g(X; shift, scale) = scale * X + shift`. Examples: ```python # Y = X b = AffineScalar() # Y = X + shift b = AffineScalar(shift=[1., 2, 3]) # Y = 2 * X + shift b = AffineScalar( shift=[1., 2, 3], scale=2.) ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, shift=None, scale=None, validate_args=False, name="affine_scalar"): """Instantiates the `AffineScalar` bijector. This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments, giving the forward operation: ```none Y = g(X) = scale * X + shift ``` if `scale` is not specified, then the bijector has the semantics of `scale = 1.`. Similarly, if `shift` is not specified, then the bijector has the semantics of `shift = 0.`. Args: shift: Floating-point `Tensor`. If this is set to `None`, no shift is applied. scale: Floating-point `Tensor`. If this is set to `None`, no scale is applied. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ self._graph_parents = [] self._name = name self._validate_args = validate_args with self._name_scope("init", values=[scale, shift]): self._shift = shift self._scale = scale if self._shift is not None: self._shift = ops.convert_to_tensor(shift, name="shift") if self._scale is not None: self._scale = ops.convert_to_tensor(self._scale, name="scale") if validate_args: self._scale = control_flow_ops.with_dependencies( [check_ops.assert_none_equal( self._scale, array_ops.zeros([], dtype=self._scale.dtype))], self._scale) super(AffineScalar, self).__init__( forward_min_event_ndims=0, is_constant_jacobian=True, validate_args=validate_args, name=name) @property def shift(self): """The `shift` `Tensor` in `Y = scale @ X + shift`.""" return self._shift @property def scale(self): """The `scale` `LinearOperator` in `Y = scale @ X + shift`.""" return self._scale def _forward(self, x): y = array_ops.identity(x) if self.scale is not None: y *= self.scale if self.shift is not None: y += self.shift return y def _inverse(self, y): x = array_ops.identity(y) if self.shift is not None: x -= self.shift if self.scale is not None: x /= self.scale return x def _forward_log_det_jacobian(self, x): # is_constant_jacobian = True for this bijector, hence the # `log_det_jacobian` need only be specified for a single input, as this will # be tiled to match `event_ndims`. if self.scale is None: return constant_op.constant(0., dtype=x.dtype.base_dtype) return math_ops.log(math_ops.abs(self.scale))
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/affine_scalar.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Inline bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "Inline", ] class Inline(bijector.Bijector): """Bijector constructed from custom callables. Example Use: ```python exp = Inline( forward_fn=tf.exp, inverse_fn=tf.math.log, inverse_log_det_jacobian_fn=( lambda y: -tf.reduce_sum(tf.math.log(y), axis=-1)), name="exp") ``` The above example is equivalent to the `Bijector` `Exp()`. """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, forward_fn=None, inverse_fn=None, inverse_log_det_jacobian_fn=None, forward_log_det_jacobian_fn=None, forward_event_shape_fn=None, forward_event_shape_tensor_fn=None, inverse_event_shape_fn=None, inverse_event_shape_tensor_fn=None, is_constant_jacobian=False, validate_args=False, forward_min_event_ndims=None, inverse_min_event_ndims=None, name="inline"): """Creates a `Bijector` from callables. Args: forward_fn: Python callable implementing the forward transformation. inverse_fn: Python callable implementing the inverse transformation. inverse_log_det_jacobian_fn: Python callable implementing the log o det o jacobian of the inverse transformation. forward_log_det_jacobian_fn: Python callable implementing the log o det o jacobian of the forward transformation. forward_event_shape_fn: Python callable implementing non-identical static event shape changes. Default: shape is assumed unchanged. forward_event_shape_tensor_fn: Python callable implementing non-identical event shape changes. Default: shape is assumed unchanged. inverse_event_shape_fn: Python callable implementing non-identical static event shape changes. Default: shape is assumed unchanged. inverse_event_shape_tensor_fn: Python callable implementing non-identical event shape changes. Default: shape is assumed unchanged. is_constant_jacobian: Python `bool` indicating that the Jacobian is constant for all input arguments. validate_args: Python `bool` indicating whether arguments should be checked for correctness. forward_min_event_ndims: Python `int` indicating the minimal dimensionality this bijector acts on. inverse_min_event_ndims: Python `int` indicating the minimal dimensionality this bijector acts on. name: Python `str`, name given to ops managed by this object. """ super(Inline, self).__init__( forward_min_event_ndims=forward_min_event_ndims, inverse_min_event_ndims=inverse_min_event_ndims, is_constant_jacobian=is_constant_jacobian, validate_args=validate_args, name=name) self._forward_fn = forward_fn self._inverse_fn = inverse_fn self._inverse_log_det_jacobian_fn = inverse_log_det_jacobian_fn self._forward_log_det_jacobian_fn = forward_log_det_jacobian_fn self._forward_event_shape_fn = forward_event_shape_fn self._forward_event_shape_tensor_fn = forward_event_shape_tensor_fn self._inverse_event_shape_fn = inverse_event_shape_fn self._inverse_event_shape_tensor_fn = inverse_event_shape_tensor_fn def _forward_event_shape(self, input_shape): if self._forward_event_shape_fn is None: # By default assume shape doesn't change. return input_shape return self._forward_event_shape_fn(input_shape) def _forward_event_shape_tensor(self, input_shape): if self._forward_event_shape_tensor_fn is None: # By default assume shape doesn't change. return input_shape return self._forward_event_shape_tensor_fn(input_shape) def _inverse_event_shape(self, output_shape): if self._inverse_event_shape_fn is None: # By default assume shape doesn't change. return output_shape return self._inverse_event_shape_fn(output_shape) def _inverse_event_shape_tensor(self, output_shape): if self._inverse_event_shape_tensor_fn is None: # By default assume shape doesn't change. return output_shape return self._inverse_event_shape_tensor_fn(output_shape) def _forward(self, x, **kwargs): if not callable(self._forward_fn): raise NotImplementedError( "forward_fn is not a callable function.") return self._forward_fn(x, **kwargs) def _inverse(self, y, **kwargs): if not callable(self._inverse_fn): raise NotImplementedError( "inverse_fn is not a callable function.") return self._inverse_fn(y, **kwargs) def _inverse_log_det_jacobian(self, y, **kwargs): if not callable(self._inverse_log_det_jacobian_fn): raise NotImplementedError( "inverse_log_det_jacobian_fn is not a callable function.") return self._inverse_log_det_jacobian_fn(y, **kwargs) def _forward_log_det_jacobian(self, x, **kwargs): if not callable(self._forward_log_det_jacobian_fn): raise NotImplementedError( "forward_log_det_jacobian_fn is not a callable function.") return self._forward_log_det_jacobian_fn(x, **kwargs)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/inline.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """AbsoluteValue bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "AbsoluteValue", ] class AbsoluteValue(bijector.Bijector): """Computes `Y = g(X) = Abs(X)`, element-wise. This non-injective bijector allows for transformations of scalar distributions with the absolute value function, which maps `(-inf, inf)` to `[0, inf)`. * For `y in (0, inf)`, `AbsoluteValue.inverse(y)` returns the set inverse `{x in (-inf, inf) : |x| = y}` as a tuple, `-y, y`. * `AbsoluteValue.inverse(0)` returns `0, 0`, which is not the set inverse (the set inverse is the singleton `{0}`), but "works" in conjunction with `TransformedDistribution` to produce a left semi-continuous pdf. * For `y < 0`, `AbsoluteValue.inverse(y)` happily returns the wrong thing, `-y, y`. This is done for efficiency. If `validate_args == True`, `y < 0` will raise an exception. ```python tfd = tf.contrib.distributions abs = tfd.bijectors.AbsoluteValue() abs.forward([-1., 0., 1.]) ==> [1., 0., 1.] abs.inverse(1.) ==> [-1., 1.] # The |dX/dY| is constant, == 1. So Log|dX/dY| == 0. abs.inverse_log_det_jacobian(1.) ==> [0., 0.] # Special case handling of 0. abs.inverse(0.) ==> [0., 0.] abs.inverse_log_det_jacobian(0.) ==> [0., 0.] ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, validate_args=False, name="absolute_value"): """Instantiates the `AbsoluteValue` bijector. Args: validate_args: Python `bool` indicating whether arguments should be checked for correctness, in particular whether inputs to `inverse` and `inverse_log_det_jacobian` are non-negative. name: Python `str` name given to ops managed by this object. """ self._graph_parents = [] self._name = name with self._name_scope("init"): super(AbsoluteValue, self).__init__( forward_min_event_ndims=0, is_constant_jacobian=True, validate_args=validate_args, name=name) def _forward(self, x): return math_ops.abs(x) def _inverse(self, y): if self.validate_args: y = control_flow_ops.with_dependencies( [check_ops.assert_non_negative(y, message="Argument y was negative")], y) return -y, y def _inverse_log_det_jacobian(self, y): # If event_ndims = 2, # F^{-1}(y) = (-y, y), so DF^{-1}(y) = (-1, 1), # so Log|DF^{-1}(y)| = Log[1, 1] = [0, 0]. zeros = constant_op.constant(0., dtype=y.dtype) if self.validate_args: zeros = control_flow_ops.with_dependencies( [check_ops.assert_non_negative(y, message="Argument y was negative")], zeros) return zeros, zeros @property def _is_injective(self): return False
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/absolute_value.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """MatrixInverseTriL bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "MatrixInverseTriL", ] class MatrixInverseTriL(bijector.Bijector): """Computes `g(L) = inv(L)`, where `L` is a lower-triangular matrix. `L` must be nonsingular; equivalently, all diagonal entries of `L` must be nonzero. The input must have `rank >= 2`. The input is treated as a batch of matrices with batch shape `input.shape[:-2]`, where each matrix has dimensions `input.shape[-2]` by `input.shape[-1]` (hence `input.shape[-2]` must equal `input.shape[-1]`). #### Examples ```python tfd.bijectors.MatrixInverseTriL().forward(x=[[1., 0], [2, 1]]) # Result: [[1., 0], [-2, 1]], i.e., inv(x) tfd.bijectors.MatrixInverseTriL().inverse(y=[[1., 0], [-2, 1]]) # Result: [[1., 0], [2, 1]], i.e., inv(y). ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, validate_args=False, name="matrix_inverse_tril"): """Instantiates the `MatrixInverseTriL` bijector. Args: validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ self._graph_parents = [] self._name = name super(MatrixInverseTriL, self).__init__( forward_min_event_ndims=2, validate_args=validate_args, name=name) def _forward(self, x): with ops.control_dependencies(self._assertions(x)): shape = array_ops.shape(x) return linalg_ops.matrix_triangular_solve( x, linalg_ops.eye(shape[-1], batch_shape=shape[:-2]), lower=True) def _inverse(self, y): return self._forward(y) def _forward_log_det_jacobian(self, x): # Calculation of the Jacobian: # # Let X = (x_{ij}), 0 <= i,j < n, be a matrix of indeterminates. Let Z = # X^{-1} where Z = (z_{ij}). Then # # dZ/dx_{ij} = (d/dt | t=0) Y(t)^{-1}, # # where Y(t) = X + t*E_{ij} and E_{ij} is the matrix with a 1 in the (i,j) # entry and zeros elsewhere. By the product rule, # # 0 = d/dt [Identity matrix] # = d/dt [Y Y^{-1}] # = Y d/dt[Y^{-1}] + dY/dt Y^{-1} # # so # # d/dt[Y^{-1}] = -Y^{-1} dY/dt Y^{-1} # = -Y^{-1} E_{ij} Y^{-1}. # # Evaluating at t=0, # # dZ/dx_{ij} = -Z E_{ij} Z. # # Taking the (r,s) entry of each side, # # dz_{rs}/dx_{ij} = -z_{ri}z_{sj}. # # Now, let J be the Jacobian dZ/dX, arranged as the n^2-by-n^2 matrix whose # (r*n + s, i*n + j) entry is dz_{rs}/dx_{ij}. Considering J as an n-by-n # block matrix with n-by-n blocks, the above expression for dz_{rs}/dx_{ij} # shows that the block at position (r,i) is -z_{ri}Z. Hence # # J = -KroneckerProduct(Z, Z), # det(J) = (-1)^(n^2) (det Z)^(2n) # = (-1)^n (det X)^(-2n). with ops.control_dependencies(self._assertions(x)): return (-2. * math_ops.cast(array_ops.shape(x)[-1], x.dtype.base_dtype) * math_ops.reduce_sum( math_ops.log(math_ops.abs(array_ops.matrix_diag_part(x))), axis=-1)) def _assertions(self, x): if not self.validate_args: return [] shape = array_ops.shape(x) is_matrix = check_ops.assert_rank_at_least( x, 2, message="Input must have rank at least 2.") is_square = check_ops.assert_equal( shape[-2], shape[-1], message="Input must be a square matrix.") above_diagonal = array_ops.matrix_band_part( array_ops.matrix_set_diag( x, array_ops.zeros(shape[:-1], dtype=dtypes.float32)), 0, -1) is_lower_triangular = check_ops.assert_equal( above_diagonal, array_ops.zeros_like(above_diagonal), message="Input must be lower triangular.") # A lower triangular matrix is nonsingular iff all its diagonal entries are # nonzero. diag_part = array_ops.matrix_diag_part(x) is_nonsingular = check_ops.assert_none_equal( diag_part, array_ops.zeros_like(diag_part), message="Input must have all diagonal entries nonzero.") return [is_matrix, is_square, is_lower_triangular, is_nonsingular]
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/distributions/python/ops/bijectors/matrix_inverse_tril.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Labels for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.labeled_tensor.python.ops import core as _core from tensorflow.contrib.labeled_tensor.python.ops import io_ops as _io_ops from tensorflow.contrib.labeled_tensor.python.ops import nn from tensorflow.contrib.labeled_tensor.python.ops import ops as _ops from tensorflow.contrib.labeled_tensor.python.ops import sugar as _sugar # pylint: disable=invalid-name # Core types. Axis = _core.Axis Axes = _core.Axes LabeledTensor = _core.LabeledTensor as_axis = _core.as_axis convert_to_labeled_tensor = _core.convert_to_labeled_tensor identity = _core.identity slice = _core.slice_function # pylint: disable=redefined-builtin transpose = _core.transpose expand_dims = _core.expand_dims align = _core.align axis_order_scope = _core.axis_order_scope check_axis_order = _core.check_axis_order impose_axis_order = _core.impose_axis_order AxisOrderError = _core.AxisOrderError define_unary_op = _core.define_unary_op define_binary_op = _core.define_binary_op define_reduce_op = _ops.define_reduce_op abs = _core.abs_function # pylint: disable=redefined-builtin neg = _core.neg sign = _core.sign reciprocal = _core.reciprocal square = _core.square round = _core.round_function # pylint: disable=redefined-builtin sqrt = _core.sqrt rsqrt = _core.rsqrt exp = _core.exp log = _core.log ceil = _core.ceil floor = _core.floor cos = _core.cos sin = _core.sin tan = _core.tan acos = _core.acos asin = _core.asin atan = _core.atan lgamma = _core.lgamma digamma = _core.digamma erf = _core.erf erfc = _core.erfc logical_not = _core.logical_not tanh = _core.tanh sigmoid = _core.sigmoid add = _core.add sub = _core.sub mul = _core.mul div = _core.div mod = _core.mod pow = _core.pow_function # pylint: disable=redefined-builtin equal = _core.equal greater = _core.greater greater_equal = _core.greater_equal not_equal = _core.not_equal less = _core.less less_equal = _core.less_equal logical_and = _core.logical_and logical_or = _core.logical_or logical_xor = _core.logical_xor maximum = _core.maximum minimum = _core.minimum squared_difference = _core.squared_difference igamma = _core.igamma igammac = _core.igammac zeta = _core.zeta polygamma = _core.polygamma select = _ops.select concat = _ops.concat pack = _ops.pack unpack = _ops.unpack reshape = _ops.reshape rename_axis = _ops.rename_axis random_crop = _ops.random_crop map_fn = _ops.map_fn foldl = _ops.foldl squeeze = _ops.squeeze matmul = _ops.matmul tile = _ops.tile pad = _ops.pad constant = _ops.constant zeros_like = _ops.zeros_like ones_like = _ops.ones_like cast = _ops.cast verify_tensor_all_finite = _ops.verify_tensor_all_finite boolean_mask = _ops.boolean_mask where = _ops.where reduce_all = _ops.reduce_all reduce_any = _ops.reduce_any reduce_logsumexp = _ops.reduce_logsumexp reduce_max = _ops.reduce_max reduce_mean = _ops.reduce_mean reduce_min = _ops.reduce_min reduce_prod = _ops.reduce_prod reduce_sum = _ops.reduce_sum batch = _ops.batch shuffle_batch = _ops.shuffle_batch FixedLenFeature = _io_ops.FixedLenFeature parse_example = _io_ops.parse_example parse_single_example = _io_ops.parse_single_example placeholder = _io_ops.placeholder ReshapeCoder = _sugar.ReshapeCoder
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/labeled_tensor/__init__.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.labeled_tensor.python.ops import core from tensorflow.contrib.labeled_tensor.python.ops import io_ops from tensorflow.contrib.labeled_tensor.python.ops import test_util from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.platform import test as test_lib class ParseBase(test_util.Base): def setUp(self): super(ParseBase, self).setUp() examples = [ example_pb2.Example(features=feature_pb2.Features(feature={ 'a': feature_pb2.Feature( int64_list=feature_pb2.Int64List(value=[1])), 'b': feature_pb2.Feature( int64_list=feature_pb2.Int64List(value=[2, 3, 4])), })), example_pb2.Example(features=feature_pb2.Features(feature={ 'a': feature_pb2.Feature( int64_list=feature_pb2.Int64List(value=[5])), 'b': feature_pb2.Feature( int64_list=feature_pb2.Int64List(value=[6, 7, 8])), })), ] self.serialized = core.LabeledTensor( constant_op.constant([ex.SerializeToString() for ex in examples]), ['batch']) self.features = { 'a': io_ops.FixedLenFeature([], dtypes.int64), 'b': io_ops.FixedLenFeature([('x', 3)], dtypes.int64) } class TestParseExample(ParseBase): def test(self): expected_a = core.LabeledTensor(constant_op.constant([1, 5]), ['batch']) expected_b = core.LabeledTensor( constant_op.constant([[2, 3, 4], [6, 7, 8]]), ['batch', 'x']) parsed = io_ops.parse_example(self.serialized, self.features) self.assertLabeledTensorsEqual(expected_a, parsed['a']) self.assertLabeledTensorsEqual(expected_b, parsed['b']) def test_placeholder(self): serialized = core.LabeledTensor( array_ops.placeholder(dtypes.string, [None]), ['batch']) # should not raise io_ops.parse_example(serialized, self.features) class TestParseSingleExample(ParseBase): def test(self): expected_a = core.LabeledTensor(constant_op.constant(1), []) expected_b = core.LabeledTensor(constant_op.constant([2, 3, 4]), ['x']) parsed = io_ops.parse_single_example(self.serialized[0], self.features) self.assertLabeledTensorsEqual(expected_a, parsed['a']) self.assertLabeledTensorsEqual(expected_b, parsed['b']) def test_unknown_size(self): features = {'a': io_ops.FixedLenFeature([('x', None)], dtypes.int64)} serialized = array_ops.placeholder(dtypes.string, []) with self.assertRaisesRegexp(ValueError, 'unknown size'): io_ops.parse_single_example(serialized, features) class PlaceholderTest(test_util.Base): def test_name(self): placeholder_lt = io_ops.placeholder(dtypes.float32, []) self.assertIn('lt_placeholder', placeholder_lt.name) def test(self): placeholder_lt = io_ops.placeholder(dtypes.float32, ['batch', ('x', ['a', 'b'])]) self.assertEqual(placeholder_lt.dtype, dtypes.float32) self.assertEqual(placeholder_lt.axes, core.Axes([('batch', None), ('x', ['a', 'b'])])) def test_feed(self): sess = session.Session() placeholder_lt = io_ops.placeholder(dtypes.float32, []) two_times = 2.0 * placeholder_lt result = sess.run(two_times, {placeholder_lt.tensor: 1}) self.assertEqual(result, 2.0) if __name__ == '__main__': test_lib.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/labeled_tensor/python/ops/io_ops_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Neural network ops for LabeledTensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib import nn as contrib_nn from tensorflow.contrib.labeled_tensor.python.ops import core from tensorflow.python.ops import nn relu = core.define_unary_op('relu', nn.relu) relu6 = core.define_unary_op('relu6', nn.relu6) crelu = core.define_unary_op('crelu', nn.crelu) elu = core.define_unary_op('elu', nn.elu) softplus = core.define_unary_op('softplus', nn.softplus) l2_loss = core.define_unary_op('l2_loss', nn.l2_loss) sigmoid_cross_entropy_with_logits = core.define_binary_op( 'sigmoid_cross_entropy_with_logits', contrib_nn.deprecated_flipped_sigmoid_cross_entropy_with_logits) softmax = core.define_unary_op('softmax', nn.softmax) log_softmax = core.define_unary_op('log_softmax', nn.log_softmax) softmax_cross_entropy_with_logits = core.define_binary_op( 'softmax_cross_entropy_with_logits', contrib_nn.deprecated_flipped_softmax_cross_entropy_with_logits) sparse_softmax_cross_entropy_with_logits = core.define_binary_op( 'sparse_softmax_cross_entropy_with_logits', contrib_nn.deprecated_flipped_sparse_softmax_cross_entropy_with_logits)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/labeled_tensor/python/ops/nn.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range # pylint: disable=redefined-builtin from tensorflow.contrib.labeled_tensor.python.ops import core from tensorflow.contrib.labeled_tensor.python.ops import ops from tensorflow.contrib.labeled_tensor.python.ops import sugar from tensorflow.contrib.labeled_tensor.python.ops import test_util from tensorflow.python.framework import constant_op from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class Base(test_util.Base): def setUp(self): super(Base, self).setUp() self.small_lt = core.LabeledTensor(constant_op.constant([1]), [('x', 1)]) class ReshapeCoderTest(Base): def setUp(self): super(ReshapeCoderTest, self).setUp() self.batch_size = 8 self.num_rows = 50 self.num_columns = 100 self.channels = ['red', 'green', 'blue'] self.masks = [False, True] tensor = math_ops.range(0, self.batch_size * self.num_rows * self.num_columns * len(self.channels) * len(self.masks)) tensor = array_ops.reshape(tensor, [ self.batch_size, self.num_rows, self.num_columns, len(self.channels), len(self.masks) ]) self.batch_axis = ('batch', range(self.batch_size)) self.row_axis = ('row', range(self.num_rows)) self.column_axis = ('column', range(self.num_columns)) self.channel_axis = ('channel', self.channels) self.mask_axis = ('mask', self.masks) axes = [ self.batch_axis, self.row_axis, self.column_axis, self.channel_axis, self.mask_axis ] self.masked_image_lt = core.LabeledTensor(tensor, axes) def test_name(self): rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth']) encode_lt = rc.encode(self.masked_image_lt) decode_lt = rc.decode(encode_lt) self.assertIn('lt_reshape_encode', encode_lt.name) self.assertIn('lt_reshape_decode', decode_lt.name) def test_bijection_flat(self): rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth']) encode_lt = rc.encode(self.masked_image_lt) golden_axes = core.Axes([ self.batch_axis, self.row_axis, self.column_axis, ('depth', len(self.channels) * len(self.masks)) ]) self.assertEqual(encode_lt.axes, golden_axes) decode_lt = rc.decode(encode_lt) self.assertLabeledTensorsEqual(decode_lt, self.masked_image_lt) def test_bijection_with_labels(self): depth_axis = core.Axis('depth', range(len(self.channels) * len(self.masks))) rc = sugar.ReshapeCoder(['channel', 'mask'], [depth_axis, ('other', ['label'])]) encode_lt = rc.encode(self.masked_image_lt) golden_axes = core.Axes([ self.batch_axis, self.row_axis, self.column_axis, depth_axis, ('other', ['label']) ]) self.assertEqual(encode_lt.axes, golden_axes) decode_lt = rc.decode(encode_lt) self.assertLabeledTensorsEqual(decode_lt, self.masked_image_lt) def test_invalid_input(self): with self.assertRaises(ValueError): rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth']) rc.decode(self.masked_image_lt) with self.assertRaises(ValueError): rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth']) rc.encode(self.masked_image_lt) rc.encode(ops.select(self.masked_image_lt, {'channel': 'red'})) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/labeled_tensor/python/ops/sugar_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Minimal runtime type checking library. This module should not be considered public API. """ # TODO(ericmc,shoyer): Delete this in favor of using pytype or mypy from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import re from tensorflow.python.util import tf_inspect from tensorflow.python.util.compat import collections_abc # used for register_type_abbreviation and _type_repr below. _TYPE_ABBREVIATIONS = {} class Type(object): """Base class for type checker types. The custom types defined in this module are based on types in the standard library's typing module (in Python 3.5): https://docs.python.org/3/library/typing.html The only difference should be that we use actual instances of Type classes to represent custom types rather than the metaclass magic typing uses to create new class objects. In practice, all this should mean is that we use `List(int)` rather than `List[int]`. Custom types should implement __instancecheck__ and inherit from Type. Every argument in the constructor must be a type or Type instance, and these arguments must be stored as a tuple on the `_types` attribute. """ def __init__(self, *types): self._types = types def __repr__(self): args_repr = ", ".join(repr(t) for t in self._types) return "typecheck.%s(%s)" % (type(self).__name__, args_repr) class _SingleArgumentType(Type): """Use this subclass for parametric types that accept only one argument.""" def __init__(self, tpe): super(_SingleArgumentType, self).__init__(tpe) @property def _type(self): tpe, = self._types # pylint: disable=unbalanced-tuple-unpacking return tpe class _TwoArgumentType(Type): """Use this subclass for parametric types that accept two arguments.""" def __init__(self, first_type, second_type): super(_TwoArgumentType, self).__init__(first_type, second_type) class Union(Type): """A sum type. A correct type is any of the types provided. """ def __instancecheck__(self, instance): return isinstance(instance, self._types) class Optional(_SingleArgumentType): """An optional type. A correct type is either the provided type or NoneType. """ def __instancecheck__(self, instance): # types.NoneType does not exist in Python 3 return isinstance(instance, (self._type, type(None))) class List(_SingleArgumentType): """A typed list. A correct type is a list where each element has the single provided type. """ def __instancecheck__(self, instance): return (isinstance(instance, list) and all(isinstance(x, self._type) for x in instance)) class Sequence(_SingleArgumentType): """A typed sequence. A correct type is a sequence where each element has the single provided type. """ def __instancecheck__(self, instance): return (isinstance(instance, collections_abc.Sequence) and all(isinstance(x, self._type) for x in instance)) class Collection(_SingleArgumentType): """A sized, iterable container. A correct type is an iterable and container with known size where each element has the single provided type. We use this in preference to Iterable because we check each instance of the iterable at runtime, and hence need to avoid iterables that could be exhausted. """ def __instancecheck__(self, instance): return (isinstance(instance, collections_abc.Iterable) and isinstance(instance, collections_abc.Sized) and isinstance(instance, collections_abc.Container) and all(isinstance(x, self._type) for x in instance)) class Tuple(Type): """A typed tuple. A correct type is a tuple with the correct length where each element has the correct type. """ def __instancecheck__(self, instance): return (isinstance(instance, tuple) and len(instance) == len(self._types) and all(isinstance(x, t) for x, t in zip(instance, self._types))) class Mapping(_TwoArgumentType): """A typed mapping. A correct type has the correct parametric types for keys and values. """ def __instancecheck__(self, instance): key_type, value_type = self._types # pylint: disable=unbalanced-tuple-unpacking return (isinstance(instance, collections_abc.Mapping) and all(isinstance(k, key_type) for k in instance.keys()) and all(isinstance(k, value_type) for k in instance.values())) class Dict(Mapping): """A typed dict. A correct type has the correct parametric types for keys and values. """ def __instancecheck__(self, instance): return (isinstance(instance, dict) and super(Dict, self).__instancecheck__(instance)) def _replace_forward_references(t, context): """Replace forward references in the given type.""" if isinstance(t, str): return context[t] elif isinstance(t, Type): return type(t)(*[_replace_forward_references(t, context) for t in t._types]) # pylint: disable=protected-access else: return t def register_type_abbreviation(name, alias): """Register an abbreviation for a type in typecheck tracebacks. This makes otherwise very long typecheck errors much more readable. Example: typecheck.register_type_abbreviation(tf.compat.v1.Dimension, 'tf.compat.v1.Dimension') Args: name: type or class to abbreviate. alias: string alias to substitute. """ _TYPE_ABBREVIATIONS[name] = alias def _type_repr(t): """A more succinct repr for typecheck tracebacks.""" string = repr(t) for type_, alias in _TYPE_ABBREVIATIONS.items(): string = string.replace(repr(type_), alias) string = re.sub(r"<(class|type) '([\w.]+)'>", r"\2", string) string = re.sub(r"typecheck\.(\w+)", r"\1", string) return string class Error(TypeError): """Exception for typecheck failures.""" def accepts(*types): """A decorator which checks the input types of a function. Based on: http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments The above draws from: https://www.python.org/dev/peps/pep-0318/ Args: *types: A list of Python types. Returns: A function to use as a decorator. """ def check_accepts(f): """Check the types.""" spec = tf_inspect.getargspec(f) num_function_arguments = len(spec.args) if len(types) != num_function_arguments: raise Error( "Function %r has %d arguments but only %d types were provided in the " "annotation." % (f, num_function_arguments, len(types))) if spec.defaults: num_defaults = len(spec.defaults) for (name, a, t) in zip(spec.args[-num_defaults:], spec.defaults, types[-num_defaults:]): allowed_type = _replace_forward_references(t, f.__globals__) if not isinstance(a, allowed_type): raise Error("default argument value %r of type %r is not an instance " "of the allowed type %s for the %s argument to %r" % (a, type(a), _type_repr(allowed_type), name, f)) @functools.wraps(f) def new_f(*args, **kwds): """A helper function.""" for (a, t) in zip(args, types): allowed_type = _replace_forward_references(t, f.__globals__) if not isinstance(a, allowed_type): raise Error("%r of type %r is not an instance of the allowed type %s " "for %r" % (a, type(a), _type_repr(allowed_type), f)) return f(*args, **kwds) return new_f return check_accepts def returns(*types): """A decorator which checks the return types of a function. Based on: http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments The above draws from: https://www.python.org/dev/peps/pep-0318/ Args: *types: A list of Python types. A list of one element corresponds to a single return value. A list of several elements corresponds to several return values. Note that a function with no explicit return value has an implicit NoneType return and should be annotated correspondingly. Returns: A function to use as a decorator. """ def check_returns(f): """Check the types.""" if not types: raise TypeError("A return type annotation must contain at least one type") @functools.wraps(f) def new_f(*args, **kwds): """A helper function.""" return_value = f(*args, **kwds) if len(types) == 1: # The function has a single return value. allowed_type = _replace_forward_references(types[0], f.__globals__) if not isinstance(return_value, allowed_type): raise Error( "%r of type %r is not an instance of the allowed type %s " "for %r" % (return_value, type(return_value), _type_repr(allowed_type), f)) else: if len(return_value) != len(types): raise Error("Function %r has %d return values but only %d types were " "provided in the annotation." % (f, len(return_value), len(types))) for (r, t) in zip(return_value, types): allowed_type = _replace_forward_references(t, f.__globals__) if not isinstance(r, allowed_type): raise Error("%r of type %r is not an instance of allowed type %s " "for %r" % (r, type(r), _type_repr(allowed_type), f)) return return_value return new_f return check_returns
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/labeled_tensor/python/ops/_typecheck.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Core classes and core ops for LabeledTensor. Core ops are ops which will eventually be called by LabeledTensor methods, and ops which a core op depends upon. For example, `add` is a core op because we'll eventually support the `+` operator. Non-core ops should go in `ops.py`. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import contextlib import numbers import types import numpy as np from six import binary_type from six import string_types from six import text_type from six.moves import range # pylint: disable=redefined-builtin from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.util.compat import collections_abc # pylint: disable=invalid-name # Types coercible to Axis.labels # We use this instead of collections_abc.Sequence to exclude strings. LabelsLike = tc.Union(np.ndarray, range, list, tuple) # Types coercible to a tf.compat.v1.Dimension DimensionLike = tc.Optional(tc.Union(tensor_shape.Dimension, int)) # Types usable for axis values AxisValue = tc.Union(LabelsLike, DimensionLike) # Valid scalar values for TensorFlow Scalar = tc.Union(numbers.Number, bool, binary_type, text_type) # pylint: enable=invalid-name class Axis(object): """Size and label information for an axis. Axis contains either a tf.compat.v1.Dimension indicating the size of an axis, or a tuple of tick labels for the axis. If tick labels are provided, they must be unique. """ @tc.accepts(object, string_types, AxisValue) def __init__(self, name, value): """Construct an Axis. Args: name: Name of the axis. value: Either None, an int or tf.compat.v1.Dimension giving the size of the axis, or a sequence that is not a string additionally providing coordinate (tick) labels. Raises: ValueError: If the user provides labels with duplicate values. """ if isinstance(value, tensor_shape.Dimension): dimension = value labels = None elif isinstance(value, int) or value is None: dimension = tensor_shape.Dimension(value) labels = None else: dimension = tensor_shape.Dimension(len(value)) labels = tuple(value) if dimension.value == 0: # Treat a zero-length axis as if it has labels. labels = () if labels is not None: index = dict(zip(labels, range(len(labels)))) if len(index) != len(labels): raise ValueError( 'Tick labels must be unique, but got {}'.format(labels)) else: index = None self._name = name # type: string_types self._dimension = dimension # type: tensor_shape.Dimension self._labels = labels # type: Optional[tuple] self._index = index # type: Optional[Dict[Any, int]] @property @tc.returns(string_types) def name(self): return self._name @tc.returns(string_types) def __repr__(self): # Axis('x', Dimension(2)) # TODO(shoyer): make very long reprs more succint? return "%s('%s', %r)" % (type(self).__name__, self.name, self.value) @tc.returns(bool) def __eq__(self, other): return (isinstance(other, Axis) and self.name == other.name and self.size == other.size and self.labels == other.labels) def __hash__(self): return hash((self.name, self.size, self.labels)) @tc.returns(bool) def __ne__(self, other): return not self == other @tc.returns(int) def __len__(self): size = self.size if size is None: raise ValueError('axis %r has unknown length' % self.name) return size @property @tc.returns(tc.Optional(tensor_shape.Dimension)) def dimension(self): return self._dimension @property @tc.returns(tc.Optional(int)) def size(self): return self._dimension.value @property @tc.returns(tc.Union(tuple, tensor_shape.Dimension)) def value(self): """Returns the tf.compat.v1.Dimension or tuple specifying axis ticks.""" if self.labels is None: return self.dimension else: return self.labels @property @tc.returns(tc.Optional(tuple)) def labels(self): """Returns the tuple containing coordinate labels, else None.""" return self._labels def index(self, value): """Returns the integer position of the given tick label.""" if self._index is None: raise ValueError('Axis does not have tick labels') return self._index[value] # tc class for anything that can be coerced into an Axis # pylint: disable=invalid-name AxisLike = tc.Union(Axis, tc.Tuple(string_types, AxisValue)) # pylint: enable=invalid-name @tc.returns(Axis) @tc.accepts(AxisLike) def as_axis(axis_data): """Convert an AxisLike object into an Axis. Args: axis_data: Axis object or tuple (axis_name, axis_value) describing an axis. Returns: Axis object. This may be the original object if axis_data is an Axis. """ if isinstance(axis_data, Axis): axis = axis_data else: axis = Axis(*axis_data) return axis class Axes(collections_abc.Mapping): """Axis names and indices for a tensor. It is an ordered mapping, with keys given by axis name and values given by Axis objects. Duplicate axis names are not allowed. """ @tc.accepts(object, tc.List(AxisLike)) def __init__(self, axes): """Construct an Axes. Args: axes: A list of Axis objects or (axis_name, axis_value) tuples. Raises: ValueError: If the user provides empty or duplicate axis names. """ self._axes = collections.OrderedDict() for axis_data in axes: axis = as_axis(axis_data) name = axis.name if name in self._axes: raise ValueError('Duplicate axis name: %s' % name) self._axes[name] = axis def __iter__(self): return iter(self._axes) @tc.returns(string_types) def __repr__(self): # Axes([('x', Dimension(2)), # ('y', ['a', 'b', 'c']), # ('z', Dimension(4))]) cls_name = type(self).__name__ values = ["('%s', %r)" % (v.name, v.value) for v in self._axes.values()] values_repr = (',\n' + ' ' * len(cls_name + '([')).join(values) return '%s([%s])' % (cls_name, values_repr) @tc.returns(Axis) @tc.accepts(object, string_types) def __getitem__(self, name): return self._axes[name] @tc.returns(bool) def __contains__(self, name): return name in self._axes @tc.returns(int) def __len__(self): return len(self._axes) def __hash__(self): return hash(tuple(self.items())) @tc.accepts(object, string_types) def remove(self, axis_name): """Creates a new Axes object without the given axis.""" if axis_name not in self: raise KeyError(axis_name) remaining_axes = [axis for axis in self.values() if axis.name != axis_name] return Axes(remaining_axes) class LabeledTensor(object): """A tensor with annotated axes. It has the following invariants: 1) The dimensionality of the tensor is equal to the number of elements in axes. 2) The number of coordinate values in the ith dimension is equal to the size of the tensor in the ith dimension. Attributes: tensor: tf.Tensor containing the data. axes: lt.Axes containing axis names and coordinate labels. """ @tc.accepts(object, ops.Tensor, tc.Union(Axes, tc.Collection(tc.Union(string_types, AxisLike)))) def __init__(self, tensor, axes): """Construct a LabeledTensor. Args: tensor: The underlying tensor containing the data. axes: An Axes object, or a collection of strings, Axis objects or tuples of (name, value) pairs indicating the axes. Raises: ValueError: If the provided axes do not satisfy the class invariants. """ self._tensor = tensor shape = tensor.get_shape() if isinstance(axes, Axes): unvalidated_axes = axes else: mutable_axes = [] for position, axis_like in enumerate(axes): if isinstance(axis_like, string_types): # The coordinates for this axes are unlabeled. # Infer the size of the axis. value = shape[position] axis_like = (axis_like, value) mutable_axes.append(axis_like) # Construct the Axis object, which will additionally validate the contents # of the object. unvalidated_axes = Axes(mutable_axes) # Check our invariants. # First, the rank of the tensor must be equal to the number of axes. if len(shape) != len(unvalidated_axes): raise ValueError( 'Tensor rank was not equal to the number of axes: %r, %r' % (shape, unvalidated_axes)) # Second, the size of each tensor dimension must match the size of the # corresponding indices. for (d, axis) in zip(shape, unvalidated_axes.values()): if d != axis.size: raise ValueError( 'Provided axis size %d does not match tensor dimension size %d' 'in tensor %r' % (axis.size, d, tensor)) self._axes = unvalidated_axes def __repr__(self): # <LabeledTensor 'foo' shape=(2, 3, 4) dtype=float32 # axes=[('x', Dimension(2)), # ('y', ('a', 'b', 'c'), # ('z', Dimension(4))]> axes = ["('%s', %r)" % (v.name, v.value) for v in self.axes.values()] axes_repr = (',\n' + ' ' * len(' axes=[')).join(axes) return ("<%s '%s' shape=%s dtype=%s\n axes=[%s]>" % (type(self).__name__, self.tensor.name, self.tensor.get_shape(), self.tensor.dtype.name, axes_repr)) @property def tensor(self): return self._tensor def _as_graph_element(self): """Support tf.Graph.as_graph_element on LabeledTensor objects. This allows operations such as tf.name_scope to take labeled tensors. Returns: self.tensor """ return self.tensor @property def axes(self): return self._axes # properties/methods directly borrowed from tf.Tensor: @property def dtype(self): return self._tensor.dtype @property def shape(self): return self._tensor.shape @property def name(self): return self._tensor.name def get_shape(self): """Returns the TensorShape that represents the shape of this tensor. See tf.Tensor.get_shape(). Returns: A TensorShape representing the shape of this tensor. """ return self._tensor.get_shape() # TODO(shoyer): consider how/if to implement .eval(). Maybe it should return # an xarray.DataArray? def __getitem__(self, key): # This should work exactly like tf.Tensor.__getitem__, except it preserves # labels. if not isinstance(key, tuple): key = (key,) if len(key) != len(self.axes): raise ValueError('indexer %r must have the same length as the Tensor ' 'rank (%r)' % (key, len(self.axes))) selection = {a: k for a, k in zip(self.axes.keys(), key)} return slice_function(self, selection) # special methods for overloading arithmetic operations: def __abs__(self): return abs_function(self) def __neg__(self): return neg(self) def __pos__(self): return self def __add__(self, other): return add(self, other) def __radd__(self, other): return add(other, self) def __sub__(self, other): return sub(self, other) def __rsub__(self, other): return sub(other, self) def __mul__(self, other): return mul(self, other) def __rmul__(self, other): return mul(other, self) def __truediv__(self, other): return div(self, other) __div__ = __truediv__ def __rtruediv__(self, other): return div(other, self) __rdiv__ = __rtruediv__ def __mod__(self, other): return mod(self, other) def __rmod__(self, other): return mod(other, self) def __pow__(self, other): return pow_function(self, other) def __rpow__(self, other): return pow_function(other, self) # logical operations: def __invert__(self): return logical_not(self) def __and__(self, other): return logical_and(self, other) def __or__(self, other): return logical_or(self, other) def __xor__(self, other): return logical_xor(self, other) # boolean operations: def __lt__(self, other): return less(self, other) def __le__(self, other): return less_equal(self, other) def __gt__(self, other): return greater(self, other) def __ge__(self, other): return greater_equal(self, other) def __eq__(self, other): # for consistency with tf.Tensor if not isinstance(other, LabeledTensor): return False return self.tensor == other.tensor and self.axes == other.axes def __ne__(self, other): return not self == other def __hash__(self): return hash((self.tensor, self.axes)) # typecheck type abbreviations: # abbreviations for third-party types with very long reprs tc.register_type_abbreviation(tensor_shape.Dimension, 'tensorflow.Dimension') tc.register_type_abbreviation(ops.Tensor, 'tensorflow.Tensor') tc.register_type_abbreviation(dtypes.DType, 'tensorflow.DType') # core LabeledTensor types tc.register_type_abbreviation(Axis, 'labeled_tensor.Axis') tc.register_type_abbreviation(Axes, 'labeled_tensor.Axes') tc.register_type_abbreviation(LabeledTensor, 'labeled_tensor.LabeledTensor') @tc.returns(ops.Tensor) @tc.accepts(LabeledTensor) def _convert_labeled_tensor_to_tensor(value, *args, **kwargs): # call ops.convert_to_tensor to handle optional arguments appropriately return ops.internal_convert_to_tensor(value.tensor, *args, **kwargs) ops.register_tensor_conversion_function(LabeledTensor, _convert_labeled_tensor_to_tensor) # tc class for anything that can be coerced into a LabeledTensor # pylint: disable=invalid-name LabeledTensorLike = tc.Union(LabeledTensor, ops.Tensor, np.ndarray, Scalar) # pylint: enable=invalid-name @tc.returns(LabeledTensor) @tc.accepts(LabeledTensorLike, object, tc.Optional(string_types)) def convert_to_labeled_tensor(value, dtype=None, name=None): """Converts the given `value` to a `LabeledTensor`. This function accepts `LabeledTensor` objects, 0-dimensional `Tensor` objects and numpy arrays, and Python scalars. Higher dimensional unlabeled tensors must use the `LabeledTensor` constructor explicitly. Args: value: Object to convert. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of value. name: Optional name to use if a new Tensor is created. Returns: `value` converted into a `LabeledTensor` object. Raises: ValueError: If the output would have rank>0 but the input was not already a `LabeledTensor`. """ # TODO(shoyer): consider extending to accept xarray.DataArray as input. if isinstance(value, LabeledTensor): axes = value.axes.values() value = value.tensor else: axes = [] # We call convert_to_tensor even for LabeledTensor input because it also # checks to make sure the dtype argument is compatible. tensor = ops.convert_to_tensor(value, dtype=dtype, name=name) if len(tensor.get_shape()) != len(axes): raise ValueError('cannot automatically convert unlabeled arrays or tensors ' 'with rank>0 into LabeledTensors: %r' % value) return LabeledTensor(tensor, axes) @tc.returns(Axis) @tc.accepts(tc.Collection(Axis)) def concat_axes(axes): """Concatenate a list of Axes. Args: axes: A collection of Axis objects. Returns: The concatenation of the axes. If all axes have labels, the result has the concatenation of the labels. Else, the result has no labels, and its size is the sum of the sizes of the axes. Raises: ValueError: If `others` is not a collection of Axes or if it is empty. """ if not axes: raise ValueError('axes must not be empty') for a in axes: if not isinstance(a, Axis): raise ValueError('Expected an Axis, but got %r of type %r' % (a, type(a))) names = set(a.name for a in axes) if len(names) > 1: raise ValueError('axes do not all have the same name: %r' % names) name, = names all_have_labels = all(a.labels is not None for a in axes) any_has_unknown_size = any(a.size is None for a in axes) if all_have_labels: value = tuple(label for a in axes for label in a.labels) elif any_has_unknown_size: value = None else: value = sum(len(a) for a in axes) return Axis(name, value) @tc.returns(LabeledTensor) @tc.accepts(LabeledTensorLike, tc.Optional(string_types)) def identity(labeled_tensor, name=None): """The identity op. See tf.identity. Args: labeled_tensor: The input tensor. name: Optional op name. Returns: The tensor. """ with ops.name_scope(name, 'lt_identity', [labeled_tensor]) as scope: labeled_tensor = convert_to_labeled_tensor(labeled_tensor) return LabeledTensor( array_ops.identity(labeled_tensor.tensor, name=scope), labeled_tensor.axes) # We don't call this slice because that shadows a built-in. Instead, we alias # this to lt.slice in __init__.py. @tc.returns(LabeledTensor) @tc.accepts(LabeledTensorLike, tc.Mapping(string_types, tc.Union(int, slice)), tc.Optional(string_types)) def slice_function(labeled_tensor, selection, name=None): """Slice out a subset of the tensor. This is an analog of tf.slice. For example: >>> tensor = tf.reshape(tf.range(0, 6), [3, 2]) >>> labeled_tensor = lt.LabeledTensor(tensor, ['a', ('b', ['foo', 'bar'])]) >>> lt.slice(labeled_tensor, {'a': slice(0, 2), 'b': 1}) <LabeledTensor 'lt_slice:...' shape=(2,) dtype=int32 axes=[('a', Dimension(2))]> Args: labeled_tensor: The input tensor. selection: A dictionary of type str -> Union(int, slice of int) mapping axis names to sub-selections. name: Optional op name. Returns: The slice as a `LabeledTensor`. """ with ops.name_scope(name, 'lt_slice', [labeled_tensor]) as scope: labeled_tensor = convert_to_labeled_tensor(labeled_tensor) slices = [] for axis_name in labeled_tensor.axes: if axis_name not in selection: # We're not sub-selecting this axis, so use the full slice. slices.append(slice(None)) else: slices.append(selection[axis_name]) sliced_tensor = labeled_tensor.tensor[tuple(slices)] sliced_axes = [] for axis, s in zip(labeled_tensor.axes.values(), slices): # We sub-select this axis's index with the slice s. # `s` is either an int or a proper slice. if isinstance(s, slice): if axis.labels is None: # We're not tracking coordinate names for this axis. sliced_axes.append(axis.name) else: sliced_axes.append((axis.name, axis.labels[s])) else: # If the slice is an int this dimension now has size 1, so we remove it. assert isinstance(s, int) return LabeledTensor( array_ops.identity(sliced_tensor, name=scope), sliced_axes) @tc.returns(LabeledTensor) @tc.accepts(LabeledTensorLike, tc.Optional(tc.Collection(string_types)), tc.Optional(string_types)) def transpose(labeled_tensor, axis_order=None, name=None): """Permute a tensor's axes. See tf.transpose. Args: labeled_tensor: The input tensor. axis_order: Optional desired axis order, as a list of names. By default, the order of axes is reversed. name: Optional op name. Returns: The permuted tensor. Raises: ValueError: If axis_order isn't a permutation of the existing axes. """ with ops.name_scope(name, 'lt_transpose', [labeled_tensor]) as scope: labeled_tensor = convert_to_labeled_tensor(labeled_tensor) original_order = list(labeled_tensor.axes.keys()) if axis_order is None: axis_order = list(reversed(original_order)) elif sorted(axis_order) != sorted(original_order): raise ValueError( 'The new axis order must have the same names as the original axes, ' 'but the new order is %r while the original order is %r' % (axis_order, original_order)) axis_names = list(labeled_tensor.axes.keys()) permutation = [axis_names.index(n) for n in axis_order] # Note: TensorFlow doesn't copy data for the identity transpose. transpose_tensor = array_ops.transpose( labeled_tensor.tensor, permutation, name=scope) permuted_axes = [labeled_tensor.axes[n] for n in axis_order] return LabeledTensor(transpose_tensor, permuted_axes) @tc.returns(LabeledTensor) @tc.accepts(LabeledTensorLike, tc.Collection( tc.Union(string_types, tc.Tuple(string_types, collections_abc.Hashable))), tc.Optional(string_types)) def expand_dims(labeled_tensor, axes, name=None): """Insert dimensions of size 1. See tf.expand_dims. Args: labeled_tensor: The input tensor. axes: The desired axis names as strings or tuples of (name, label), where `label` is the coordinate name for the new dimension `name`. These must include the existing axis names, and the existing names must appear in the same order in this list as they do in the input tensor. name: Optional op name. Returns: A tensor with an axis for each axis in axes. New axes are created with size 1 and do not have labeled coordinates. Raises: AxisOrderError: If axis names don't appear in the same order in axes and the labeled tensor. """ with ops.name_scope(name, 'lt_expand_dims', [labeled_tensor]) as scope: labeled_tensor = convert_to_labeled_tensor(labeled_tensor) axis_names = [a if isinstance(a, string_types) else a[0] for a in axes] check_axis_order(labeled_tensor, axis_names) reshaped_axes = [] shape = [] for axis_spec in axes: if axis_spec in labeled_tensor.axes: axis = labeled_tensor.axes[axis_spec] reshaped_axes.append(axis) shape.append(-1 if axis.size is None else axis.size) else: if isinstance(axis_spec, string_types): reshaped_axes.append((axis_spec, 1)) else: (name, label) = axis_spec reshaped_axes.append((name, (label,))) shape.append(1) reshaped_tensor = array_ops.reshape( labeled_tensor.tensor, shape, name=scope) return LabeledTensor(reshaped_tensor, reshaped_axes) # This should only be added to a graph collection once. _AXIS_ORDER_KEY = ('__axis_order',) @tc.returns(tc.Optional(tc.List(string_types))) def get_axis_order(): """Get the axis_order set by any containing axis_order_scope. Returns: List of strings giving an order to use for axis names, or None, if no axis order is set. """ # By storing axis_order in the graph, we can ensure that axis_order_scope is # thread-safe. axis_order_list = ops.get_collection(_AXIS_ORDER_KEY) if axis_order_list: axis_order, = axis_order_list else: axis_order = None return axis_order @tc.accepts(tc.Optional(tc.List(string_types))) def _set_axis_order(axis_order): axis_order_list = ops.get_collection_ref(_AXIS_ORDER_KEY) if axis_order_list: axis_order_list[0] = axis_order else: axis_order_list.append(axis_order) @contextlib.contextmanager @tc.accepts(tc.Optional(tc.List(string_types))) def axis_order_scope(axis_order=None): """Set axis order for the result of broadcasting operations within a scope. This allows you to ensure that tensors resulting from arithmetic have a predictable axis order. Example usage: with lt.axis_order_scope(['x', 'y', 'z']): # result is guaranteed to have the correct axis order result = w + b You can nest scopes, in which case only the inner-most scope applies, e.g., with lt.axis_order(['x', 'y', 'z']): with lt.axis_order(): result = w + b # uses the default (left-most) axis ordering Args: axis_order: optional list of strings providing axis names. By default, creates a scope without axis order. Yields: The provided axis_order or `None`. """ original_axis_order = get_axis_order() _set_axis_order(axis_order) try: yield axis_order finally: _set_axis_order(original_axis_order) @tc.returns(tc.List(string_types)) def _get_valid_axis_order(): axis_order = get_axis_order() if axis_order is None: raise AxisOrderError('an explicit axis order must be provided with the ' 'axis_order argument or by using an axis_order_scope') return axis_order class AxisOrderError(ValueError): """Error class for cases where there is no valid axis order.""" # TODO(shoyer): should this function accept a list of labeled tensors instead? @tc.returns(type(None)) @tc.accepts(LabeledTensorLike, tc.Optional(tc.Collection(string_types))) def check_axis_order(labeled_tensor, axis_order=None): """Verify that the given tensor has a consistent axis order. Args: labeled_tensor: The input tensor. All axes on this tensor must appear in axis_order. axis_order: Optional desired axis order, as a list of names. If not provided, defaults to the current axis_order_scope (if set). Raises: AxisOrderError: If the axis_order is unavailable, inconsistent or does not include all existing axes. """ labeled_tensor = convert_to_labeled_tensor(labeled_tensor) if axis_order is None: axis_order = _get_valid_axis_order() relevant_axis_order = [a for a in axis_order if a in labeled_tensor.axes] if len(relevant_axis_order) < len(labeled_tensor.axes): raise AxisOrderError( 'not all axis names appear in the required axis order %r: %r' % (axis_order, labeled_tensor)) if relevant_axis_order != list(labeled_tensor.axes): raise AxisOrderError( 'axes on a labeled tensor do not appear in the same order as the ' 'required axis order %r: %r' % (axis_order, labeled_tensor)) @tc.returns(LabeledTensor) @tc.accepts(LabeledTensorLike, tc.Optional(tc.Collection(string_types)), tc.Optional(string_types)) def impose_axis_order(labeled_tensor, axis_order=None, name=None): """Impose desired axis order on a labeled tensor. Args: labeled_tensor: The input tensor. axis_order: Optional desired axis order, as a list of names. If not provided, defaults to the current axis_order_scope (if set). name: Optional op name. Returns: Labeled tensor with possibly transposed axes. Raises: AxisOrderError: If no axis_order is provided or axis_order does not contain all axes on the input tensor. """ with ops.name_scope(name, 'lt_impose_axis_order', [labeled_tensor]) as scope: labeled_tensor = convert_to_labeled_tensor(labeled_tensor) if axis_order is None: axis_order = _get_valid_axis_order() relevant_axis_order = [a for a in axis_order if a in labeled_tensor.axes] return transpose(labeled_tensor, relevant_axis_order, name=scope) @tc.returns(tc.Optional(list)) @tc.accepts(list, list) def _find_consistent_ordering(a, b): """Find the left-most consistent ordering between two lists of unique items. A consistent ordering combines all elements in both a and b while keeping all elements in their original order in both inputs. The left-most consistent ordering orders elements from `a` not found in `b` before elements in `b` not found in `a`. For example, given ['x', 'z'] and ['y', 'z'], both ['x', 'y', 'z'] and ['y', 'x', 'z'] are consistent orderings because each of the inputs appears in each consistent ordering in the same order, and ['x', 'y', 'z'] is the left-most, because 'x' appears only in `a` and 'y' appears only in `b`. In contrast, there is no consistent ordering between ['x', 'y'] and ['y', 'x']. Args: a: list with unique elements. b: list with unique elements. Returns: List containing all elements in either a or b, or None, if no consistent ordering exists. """ a_set = set(a) b_set = set(b) i = 0 j = 0 ordering = [] while i < len(a) and j < len(b): if a[i] not in b_set: ordering.append(a[i]) i += 1 elif b[j] not in a_set: ordering.append(b[j]) j += 1 elif a[i] == b[j]: ordering.append(a[i]) i += 1 j += 1 else: return None ordering.extend(a[i:]) ordering.extend(b[j:]) return ordering @tc.returns(LabeledTensor, LabeledTensor, Axes) @tc.accepts(LabeledTensorLike, LabeledTensorLike, tc.Optional(string_types)) def align(labeled_tensor_0, labeled_tensor_1, name=None): """Align the axes of two tensors so they may be broadcast to each other. Axes are ordered by the current axis order scope, if present, or by the left- most consistent ordering. An exception is raised if it is impossible to align the tensors without a transpose (align never copies the input data). Example usage: >>> a = lt.LabeledTensor(tf.ones((2, 4)), ['x', 'z']) >>> b = lt.LabeledTensor(tf.ones((3, 4)), ['y', 'z']) >>> a2, b2, axes = lt.align(a, b) >>> a2 <LabeledTensor 'lt_align_1/lt_align_1/0:...' shape=(2, 1, 4) dtype=float32 axes=[('x', Dimension(2)), ('y', Dimension(1)), ('z', Dimension(4))]> >>> b2 <LabeledTensor 'lt_align_1/lt_align_1/1:...' shape=(1, 3, 4) dtype=float32 axes=[('x', Dimension(1)), ('y', Dimension(3)), ('z', Dimension(4))]> >>> axes Axes([('x', Dimension(2)), ('y', Dimension(3)), ('z', Dimension(4))]) Args: labeled_tensor_0: An input tensor. labeled_tensor_1: An input tensor. name: Optional op name. Returns: The aligned tensors and the axes the resulting tensor would have if the two aligned tensors were broadcast to each other. The aligned tensors have the same rank but not necessarily the same shape, with axes in the same order. Raises: ValueError: If axes with the same name on the inputs are not equal. AxisOrderError: If there is no way to reshape the input tensors into the output without a transpose. """ with ops.name_scope(name, 'lt_align', [labeled_tensor_0, labeled_tensor_1]) as scope: labeled_tensor_0 = convert_to_labeled_tensor(labeled_tensor_0) labeled_tensor_1 = convert_to_labeled_tensor(labeled_tensor_1) axes_0 = labeled_tensor_0.axes axes_1 = labeled_tensor_1.axes for axis_name in axes_0: if axis_name in axes_1: if axes_0[axis_name] != axes_1[axis_name]: raise ValueError('Mismatched %r axis on input tensors: %r and %r' % (axis_name, axes_0[axis_name], axes_1[axis_name])) axis_scope_order = get_axis_order() if axis_scope_order is not None: # we are in an axis_order_scope axis_names_set = set(axes_0) | set(axes_1) new_axis_names = [a for a in axis_scope_order if a in axis_names_set] check_axis_order(labeled_tensor_0, axis_scope_order) check_axis_order(labeled_tensor_1, axis_scope_order) else: # attempt to find a consistent ordering new_axis_names = _find_consistent_ordering(list(axes_0), list(axes_1)) if new_axis_names is None: raise AxisOrderError( 'No consistent axis order allows for aligning tensors with axis ' 'orders %r and %r without copying data. Use transpose or ' 'impose_axis_order to reorder axes on one of more of the inputs.' % (axes_0.keys(), axes_1.keys())) labeled_tensor_0 = expand_dims( labeled_tensor_0, new_axis_names, name=scope + '0') labeled_tensor_1 = expand_dims( labeled_tensor_1, new_axis_names, name=scope + '1') broadcast_axes = [] for axis_name in new_axis_names: if axis_name in axes_0: broadcast_axes.append(axes_0[axis_name]) else: broadcast_axes.append(axes_1[axis_name]) return labeled_tensor_0, labeled_tensor_1, Axes(broadcast_axes) @tc.returns(types.FunctionType) @tc.accepts(string_types, collections_abc.Callable) def define_unary_op(op_name, elementwise_function): """Define a unary operation for labeled tensors. Args: op_name: string name of the TensorFlow op. elementwise_function: function to call to evaluate the op on a single tf.Tensor object. This function must accept two arguments: a tf.Tensor object, and an optional `name`. Returns: Function defining the given op that acts on LabeledTensors. """ default_name = 'lt_%s' % op_name @tc.returns(LabeledTensor) @tc.accepts(LabeledTensorLike, tc.Optional(string_types)) def op(labeled_tensor, name=None): """LabeledTensor version of `tf.{op_name}`. See `tf.{op_name}` for full details. Args: labeled_tensor: Input tensor. name: Optional op name. Returns: A LabeledTensor with result of applying `tf.{op_name}` elementwise. """ with ops.name_scope(name, default_name, [labeled_tensor]) as scope: labeled_tensor = convert_to_labeled_tensor(labeled_tensor) result_tensor = elementwise_function(labeled_tensor.tensor, name=scope) return LabeledTensor(result_tensor, labeled_tensor.axes) op.__doc__ = op.__doc__.format(op_name=op_name) op.__name__ = op_name return op abs_function = define_unary_op('abs', math_ops.abs) neg = define_unary_op('neg', math_ops.negative) sign = define_unary_op('sign', math_ops.sign) reciprocal = define_unary_op('reciprocal', math_ops.reciprocal) square = define_unary_op('square', math_ops.square) round_function = define_unary_op('round', math_ops.round) sqrt = define_unary_op('sqrt', math_ops.sqrt) rsqrt = define_unary_op('rsqrt', math_ops.rsqrt) exp = define_unary_op('exp', math_ops.exp) log = define_unary_op('log', math_ops.log) ceil = define_unary_op('ceil', math_ops.ceil) floor = define_unary_op('floor', math_ops.floor) cos = define_unary_op('cos', math_ops.cos) sin = define_unary_op('sin', math_ops.sin) tan = define_unary_op('tan', math_ops.tan) acos = define_unary_op('acos', math_ops.acos) asin = define_unary_op('asin', math_ops.asin) atan = define_unary_op('atan', math_ops.atan) lgamma = define_unary_op('lgamma', math_ops.lgamma) digamma = define_unary_op('digamma', math_ops.digamma) erf = define_unary_op('erf', math_ops.erf) erfc = define_unary_op('erfc', math_ops.erfc) logical_not = define_unary_op('logical_not', math_ops.logical_not) tanh = define_unary_op('tanh', math_ops.tanh) sigmoid = define_unary_op('sigmoid', math_ops.sigmoid) @tc.returns(types.FunctionType) @tc.accepts(string_types, collections_abc.Callable) def define_binary_op(op_name, elementwise_function): """Define a binary operation that broadcasts labeled tensors. Args: op_name: string name of the TensorFlow op. elementwise_function: function to call to evaluate the op on tf.Tensor objects. This function must accept three arguments: two tf.Tensor objects, and an optional `name`. Returns: Function defining the given op that acts on LabeledTensors. """ default_name = 'lt_%s' % op_name @tc.returns(LabeledTensor) @tc.accepts(LabeledTensorLike, LabeledTensorLike, tc.Optional(string_types)) def op(labeled_tensor_0, labeled_tensor_1, name=None): """LabeledTensor version of `tf.{op_name}` with label based alignment. See `tf.{op_name}` for full details. Args: labeled_tensor_0: Input tensor. labeled_tensor_1: Input tensor. name: Optional op name. Returns: A LabeledTensor with result of applying `tf.{op_name}` elementwise. """ with ops.name_scope(name, default_name, [labeled_tensor_0, labeled_tensor_1]) as scope: align_0, align_1, broadcast_axes = align(labeled_tensor_0, labeled_tensor_1) tensor = elementwise_function(align_0.tensor, align_1.tensor, name=scope) return LabeledTensor(tensor, broadcast_axes) op.__doc__ = op.__doc__.format(op_name=op_name) op.__name__ = op_name return op add = define_binary_op('add', math_ops.add) sub = define_binary_op('sub', math_ops.subtract) mul = define_binary_op('mul', math_ops.multiply) div = define_binary_op('div', math_ops.div) mod = define_binary_op('mod', math_ops.mod) pow_function = define_binary_op('pow', math_ops.pow) equal = define_binary_op('equal', math_ops.equal) greater = define_binary_op('greater', math_ops.greater) greater_equal = define_binary_op('greater_equal', math_ops.greater_equal) not_equal = define_binary_op('not_equal', math_ops.not_equal) less = define_binary_op('less', math_ops.less) less_equal = define_binary_op('less_equal', math_ops.less_equal) logical_and = define_binary_op('logical_and', math_ops.logical_and) logical_or = define_binary_op('logical_or', math_ops.logical_or) logical_xor = define_binary_op('logical_xor', math_ops.logical_xor) maximum = define_binary_op('maximum', math_ops.maximum) minimum = define_binary_op('minimum', math_ops.minimum) squared_difference = define_binary_op('squared_difference', math_ops.squared_difference) igamma = define_binary_op('igamma', math_ops.igamma) igammac = define_binary_op('igammac', math_ops.igammac) zeta = define_binary_op('zeta', math_ops.zeta) polygamma = define_binary_op('polygamma', math_ops.polygamma)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/labeled_tensor/python/ops/core.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utils for writing tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.platform import test from tensorflow.python.training import coordinator from tensorflow.python.training import queue_runner_impl class Base(test.TestCase): """A class with some useful methods for testing.""" def eval(self, tensors): with self.cached_session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord) try: results = sess.run(tensors) finally: coord.request_stop() coord.join(threads) return results def assertTensorsEqual(self, tensor_0, tensor_1): [tensor_0_eval, tensor_1_eval] = self.eval([tensor_0, tensor_1]) self.assertAllEqual(tensor_0_eval, tensor_1_eval) def assertLabeledTensorsEqual(self, tensor_0, tensor_1): self.assertEqual(tensor_0.axes, tensor_1.axes) self.assertTensorsEqual(tensor_0.tensor, tensor_1.tensor)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/labeled_tensor/python/ops/test_util.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Input parsing code for LabeledTensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six import string_types from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc from tensorflow.contrib.labeled_tensor.python.ops import core from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import parsing_ops class FixedLenFeature(object): """Configuration for parsing a fixed-length input feature. Fields: axes: A list of Axis objects or tuples (axis_name, axis_value), where `axis_name` is a string and `axis_value` is None (unknown size), an integer or a list of tick labels. dtype: Data type of input. default_value: Value to be used if an example is missing this feature. It must be compatible with `dtype`. """ def __init__(self, axes, dtype, default_value=None): self._axes = [core.as_axis(a) for a in axes] self._dtype = dtype self._default_value = default_value @property def axes(self): return self._axes @property def dtype(self): return self._dtype @property def default_value(self): return self._default_value @tc.returns(tc.Dict(string_types, parsing_ops.FixedLenFeature)) @tc.accepts(tc.Mapping(string_types, FixedLenFeature)) def _labeled_to_unlabeled_features(features): """Convert a dict of lt.FixedLenFeature into a dict of tf.FixedLenFeature.""" unlabeled_features = {} for name, labeled_feature in features.items(): shape = [ax.size for ax in labeled_feature.axes] if any(size is None for size in shape): # This should be caught on the TensorFlow side, but it isn't yet: # https://github.com/tensorflow/tensorflow/issues/2874 raise ValueError('axes with unknown size are not supported') dtype = labeled_feature.dtype default_value = labeled_feature.default_value unlabeled_features[name] = parsing_ops.FixedLenFeature( shape, dtype, default_value) return unlabeled_features @tc.returns(tc.Dict(string_types, core.LabeledTensor)) @tc.accepts(core.LabeledTensorLike, tc.Mapping(string_types, FixedLenFeature), tc.Optional(string_types), object) def parse_example(serialized, features, name=None, example_names=None): """Parse `Example` protos into a `dict` of labeled tensors. See tf.parse_example. Args: serialized: A 1-D LabeledTensor of strings, a batch of binary serialized `Example` protos. features: A `dict` mapping feature keys to `labeled_tensor.FixedLenFeature` values. name: A name for this operation (optional). example_names: A vector (1-D Tensor) of strings (optional), the names of the serialized protos in the batch. Returns: A `dict` mapping feature keys to `LabeledTensor` values. The single axis from `serialized` will be prepended to the axes provided by each feature. Raises: ValueError: if any feature is invalid. """ serialized = core.convert_to_labeled_tensor(serialized) unlabeled_features = _labeled_to_unlabeled_features(features) unlabeled_parsed = parsing_ops.parse_example( serialized.tensor, unlabeled_features, name, example_names) parsed = {} for name, parsed_feature in unlabeled_parsed.items(): axes = list(serialized.axes.values()) + features[name].axes parsed[name] = core.LabeledTensor(parsed_feature, axes) return parsed @tc.returns(tc.Dict(string_types, core.LabeledTensor)) @tc.accepts(core.LabeledTensorLike, tc.Mapping(string_types, FixedLenFeature), tc.Optional(string_types), object) def parse_single_example(serialized, features, name=None, example_names=None): """Parses a single `Example` proto. See tf.parse_single_example. Args: serialized: A scalar string Tensor or LabeledTensor, a single serialized Example. features: A `dict` mapping feature keys to `labeled_tensor.FixedLenFeature` values. name: A name for this operation (optional). example_names: (Optional) A scalar string Tensor, the associated name. Returns: A `dict` mapping feature keys to `LabeledTensor` values. Raises: ValueError: if any feature is invalid. """ serialized = core.convert_to_labeled_tensor(serialized) unlabeled_features = _labeled_to_unlabeled_features(features) unlabeled_parsed = parsing_ops.parse_single_example( serialized.tensor, unlabeled_features, name, example_names) parsed = {} for name, parsed_feature in unlabeled_parsed.items(): parsed[name] = core.LabeledTensor(parsed_feature, features[name].axes) return parsed @tc.returns(core.LabeledTensor) @tc.accepts(dtypes.DType, tc.Collection(tc.Union(string_types, core.AxisLike)), tc.Optional(string_types)) def placeholder(dtype, axes, name=None): """Create a placeholder for a labeled tensor. For example: lt.placeholder(tf.float32, ['batch', ('channel', ['r', 'g', 'b'])]) See tf.compat.v1.placeholder for more details. Args: dtype: The type of elements in the tensor to be fed. axes: sequence of strings (denoting axes of unknown size) and/or objects convertable to lt.Axis to label the result. name: Optional op name. Returns: Placeholder labeled tensor. """ with ops.name_scope(name, 'lt_placeholder', []) as scope: axes = core.Axes([(axis, None) if isinstance(axis, string_types) else axis for axis in axes]) shape = [axis.size for axis in axes.values()] tensor = array_ops.placeholder(dtype, shape, name=scope) return core.LabeledTensor(tensor, axes)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/labeled_tensor/python/ops/io_ops.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import operator import re import textwrap import numpy as np from six.moves import range # pylint: disable=redefined-builtin from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc from tensorflow.contrib.labeled_tensor.python.ops import core from tensorflow.contrib.labeled_tensor.python.ops import test_util from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test as test_lib class AxisTest(test_lib.TestCase): def setUp(self): d_7 = tensor_shape.Dimension(7) p_rgb = ['red', 'green', 'blue'] self.i_7 = core.Axis('7', d_7) self.i_7p = core.Axis('7prime', d_7) self.i_rgb = core.Axis('rgb', p_rgb) self.i_range = core.Axis('range', range(7)) self.i_unknown = core.Axis('unknown', None) def test_equality(self): axes = [self.i_7, self.i_7p, self.i_rgb, self.i_range, self.i_unknown] for i, axis_0 in enumerate(axes): for j, axis_1 in enumerate(axes): if i == j: self.assertEqual(axis_0, axis_1) else: self.assertNotEqual(axis_0, axis_1) def test_axis_value(self): self.assertEqual(self.i_7.value, tensor_shape.Dimension(7)) self.assertTrue(self.i_range.value == tuple(range(7))) def test_axis_input(self): axes = [self.i_7, self.i_7p, self.i_rgb, self.i_range, self.i_unknown] for axis in axes: self.assertEqual(axis, core.Axis(axis.name, axis.value)) def test_axis_value_input(self): axis = self.i_range for value in [range(7), list(range(7)), np.arange(7)]: self.assertEqual(axis, core.Axis(axis.name, value)) def test_size(self): self.assertEqual(len(self.i_7), 7) self.assertEqual(len(self.i_rgb), 3) self.assertEqual(len(self.i_range), 7) self.assertEqual(self.i_unknown.size, None) def test_concat_single(self): red = core.Axis('rgb', ['red']) self.assertEqual(core.concat_axes([red]), red) def test_concat_many(self): red = core.Axis('rgb', ['red']) green = core.Axis('rgb', ['green']) blue = core.Axis('rgb', ['blue']) red_green_blue = core.Axis('rgb', ['red', 'green', 'blue']) self.assertEqual(core.concat_axes([red, green, blue]), red_green_blue) def test_concat_different_names(self): red = core.Axis('red', ['red']) green = core.Axis('green', ['red']) with self.assertRaises(ValueError): core.concat_axes([red, green]) def test_concat_unknown(self): red = core.Axis('rgb', None) green = core.Axis('rgb', None) self.assertEqual(core.concat_axes([red, green]), red) def test_repr(self): self.assertEqual("Axis('7', Dimension(7))", repr(self.i_7)) def test_invalid_input(self): with self.assertRaises(TypeError): core.Axis('foo', [{}]) with self.assertRaises(ValueError): core.Axis('foo', [1, 2, 3, 1]) red = core.Axis('foo', ['red']) with self.assertRaises(tc.Error): core.concat_axes([red, 1]) def test_as_axis(self): self.assertEqual(self.i_7, core.as_axis(('7', 7))) self.assertEqual(self.i_7, core.as_axis(self.i_7)) class AxesTest(test_lib.TestCase): def setUp(self): d_7 = tensor_shape.Dimension(7) d_8 = tensor_shape.Dimension(8) p_rgb = ['red', 'green', 'blue'] p_range = range(7) self.i_8 = core.Axis('8', d_8) self.a0 = core.Axes([('d7', d_7)]) self.a1 = core.Axes([('d7', d_7)]) self.a2 = core.Axes([('d7', d_7), ('rgb', p_rgb)]) self.a3 = core.Axes([('8', d_8), ('range', p_range)]) def test_equality(self): self.assertEqual(self.a0, self.a0) self.assertEqual(self.a0, self.a1) self.assertNotEqual(self.a0, self.a2) def test_repr(self): self.assertEqual("Axes([('d7', Dimension(7))])", repr(self.a0)) def test_remove(self): a = self.a3.remove('range') self.assertEqual(a, core.Axes([self.i_8])) with self.assertRaises(KeyError): self.a3.remove('foobar') def test_typecheck_error_message(self): pattern = ('List(Union(labeled_tensor.Axis, Tuple(..., ' 'Union(Union(numpy.ndarray, %s, list, tuple), ' 'Optional(Union(tensorflow.Dimension, int))))))' % range.__name__) regexp = re.escape(pattern).replace(re.escape('...'), '.*') with self.assertRaisesRegexp(tc.Error, 'allowed type ' + regexp): core.Axes(None) class LabeledTensorTest(test_util.Base): def setUp(self): tensor = array_ops.ones([7, 3, 8, 1]) a0 = ('x', range(7)) a1 = ('channel', ['red', 'green', 'blue']) a2 = ('y', 8) a3 = ('z', tensor_shape.Dimension(1)) self.lt = core.LabeledTensor(tensor, [a0, a1, a2, a3]) def test_repr(self): pattern = textwrap.dedent("""\ <LabeledTensor '...' shape=(7, 3, 8, 1) dtype=float32 axes=[('x', ...), ('channel', ...), ('y', Dimension(8)), ('z', Dimension(1))]>""") regexp = re.escape(pattern).replace(re.escape('...'), '.*') self.assertRegexpMatches(repr(self.lt), regexp) def test_reuse_existing_axes(self): alt_lt = core.LabeledTensor(self.lt.tensor, self.lt.axes) self.assertLabeledTensorsEqual(alt_lt, self.lt) def test_reuse_existing_axis_objects(self): alt_lt = core.LabeledTensor(self.lt.tensor, self.lt.axes.values()) self.assertLabeledTensorsEqual(alt_lt, self.lt) def test_indexing_scalars(self): actual = self.lt[:, :, :, 0] expected = core.LabeledTensor(self.lt.tensor[:, :, :, 0], list(self.lt.axes.values())[:-1]) self.assertLabeledTensorsEqual(actual, expected) actual = self.lt[1, :, :, 0] expected = core.LabeledTensor(self.lt.tensor[1, :, :, 0], list(self.lt.axes.values())[1:-1]) self.assertLabeledTensorsEqual(actual, expected) actual = self.lt[1, 2, :, 0] expected = core.LabeledTensor(self.lt.tensor[1, 2, :, 0], list(self.lt.axes.values())[2:-1]) self.assertLabeledTensorsEqual(actual, expected) def test_indexing_1d(self): lt_1d = self.lt[1, 2, :, 0] actual = lt_1d[3] expected = core.LabeledTensor(lt_1d.tensor[3], []) self.assertLabeledTensorsEqual(actual, expected) def test_indexing_slices(self): actual = self.lt[:3, :, :, :] axes = [('x', range(3))] + list(self.lt.axes.values())[1:] expected = core.LabeledTensor(self.lt.tensor[:3, :, :, :], axes) self.assertLabeledTensorsEqual(actual, expected) def test_invalid_indexing(self): with self.assertRaises(ValueError): self.lt[0] # pylint: disable=pointless-statement with self.assertRaises(ValueError): self.lt[:, :, :, :, 0] # pylint: disable=pointless-statement def test_unknown_size(self): tensor = array_ops.placeholder(dtypes.string, [None]) actual = core.LabeledTensor(tensor, ['x']) self.assertIsNone(actual.axes['x'].size) self.assertIsNone(actual.axes['x'].value.value) def test_eq(self): self.assertEqual(self.lt, self.lt) self.assertNotEqual(self.lt, self.lt.tensor) self.assertNotEqual(self.lt.tensor, self.lt) def test_hash(self): lt1 = self.lt lt2 = core.LabeledTensor(self.lt.tensor, self.lt.axes) self.assertEqual(lt1, lt2) self.assertEqual(hash(lt1), hash(lt2)) def test_name(self): self.assertEqual(self.lt.name, self.lt.tensor.name) def test_dtype(self): self.assertEqual(self.lt.dtype, self.lt.tensor.dtype) def test_shape(self): self.assertEqual(self.lt.shape, self.lt.tensor.shape) def test_get_shape(self): self.assertEqual(self.lt.get_shape(), self.lt.tensor.get_shape()) def test_convert_to_tensor(self): expected = self.lt.tensor actual = ops.convert_to_tensor(self.lt) self.assertIs(expected, actual) class Base(test_util.Base): def setUp(self): self.x_size = 7 self.channel_size = 3 self.z_size = 4 self.probs_size = 11 tensor = math_ops.range(0, self.x_size * self.channel_size * self.z_size * self.probs_size) tensor = array_ops.reshape( tensor, [self.x_size, self.channel_size, self.z_size, self.probs_size]) a0 = ('x', range(self.x_size)) a1 = ('channel', ['red', 'green', 'blue']) a2 = 'z' a3 = ('probs', np.linspace(0.0, 1.0, self.probs_size)) self.tensor = tensor self.a0 = a0 self.a1 = a1 self.a2 = a2 self.a3 = a3 self.original_lt = core.LabeledTensor(tensor, [a0, a1, a2, a3]) self.x_probs_lt = core.slice_function(self.original_lt, {'z': 0, 'channel': 0}) self.channel_probs_lt = core.slice_function(self.original_lt, {'x': 3, 'z': 0}) class IdentityTest(Base): def test_name(self): identity_lt = core.identity(self.original_lt) self.assertIn('lt_identity', identity_lt.name) class SliceFunctionTest(Base): def test_name(self): select_lt = core.slice_function(self.original_lt, {'channel': 1}) self.assertIn('lt_slice', select_lt.name) def test_scalar(self): select_lt = core.slice_function(self.original_lt, {'channel': 1}) golden_lt = core.LabeledTensor(self.tensor[:, 1, :, :], [self.a0, self.a2, self.a3]) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_slice(self): select_lt = core.slice_function(self.original_lt, {'channel': slice(0, 2)}) a1_sliced = ('channel', ['red', 'green']) golden_lt = core.LabeledTensor(self.tensor[:, :2, :, :], [self.a0, a1_sliced, self.a2, self.a3]) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_slices(self): select_lt = core.slice_function( self.original_lt, {'x': slice(1, 5), 'channel': slice(1, None)}) a0_sliced = ('x', range(1, 5)) a1_sliced = ('channel', ['green', 'blue']) golden_lt = core.LabeledTensor(self.tensor[1:5, 1:, :, :], [a0_sliced, a1_sliced, self.a2, self.a3]) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_slice_unlabeled(self): select_lt = core.slice_function(self.original_lt, {'z': slice(1, 3)}) a2_sliced = 'z' golden_lt = core.LabeledTensor(self.tensor[:, :, 1:3, :], [self.a0, self.a1, a2_sliced, self.a3]) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_slice_unknown_shape(self): lt = core.LabeledTensor( array_ops.placeholder(dtypes.float32, [None, 1]), ['x', 'y']) sliced_lt = core.slice_function(lt, {'y': 0}) self.assertEqual(list(sliced_lt.axes.values()), [lt.axes['x']]) class TransposeTest(Base): def test_name(self): transpose_lt = core.transpose(self.original_lt, self.original_lt.axes.keys()) self.assertIn('lt_transpose', transpose_lt.name) def test_identity(self): transpose_lt = core.transpose(self.original_lt, self.original_lt.axes.keys()) golden_lt = self.original_lt self.assertLabeledTensorsEqual(transpose_lt, golden_lt) def test(self): transpose_lt = core.transpose(self.original_lt, ['z', 'channel', 'x', 'probs']) golden_lt = core.LabeledTensor( array_ops.transpose(self.tensor, [2, 1, 0, 3]), [self.a2, self.a1, self.a0, self.a3]) self.assertLabeledTensorsEqual(transpose_lt, golden_lt) def test_default_axis_order(self): transpose_lt = core.transpose(self.original_lt) golden_lt = core.LabeledTensor( array_ops.transpose(self.tensor, [3, 2, 1, 0]), list(reversed(list(self.original_lt.axes.values())))) self.assertLabeledTensorsEqual(transpose_lt, golden_lt) def test_invalid_input(self): with self.assertRaises(ValueError): core.transpose(self.original_lt, ['channel', 'x', 'probs']) with self.assertRaises(ValueError): core.transpose(self.original_lt, ['z', 'foo', 'x', 'probs']) class ExpandDimsTest(Base): def test_name(self): expand_lt = core.expand_dims(self.original_lt, self.original_lt.axes.keys()) self.assertIn('lt_expand', expand_lt.name) def test_identity(self): expand_lt = core.expand_dims(self.original_lt, self.original_lt.axes.keys()) golden_lt = self.original_lt self.assertLabeledTensorsEqual(expand_lt, golden_lt) def test(self): expand_lt = core.expand_dims( self.original_lt, ['foo', 'x', 'bar', 'channel', 'z', 'probs', 'grok']) golden_lt = core.LabeledTensor( array_ops.reshape(self.tensor, [ 1, self.x_size, 1, self.channel_size, self.z_size, self.probs_size, 1 ]), ['foo', self.a0, 'bar', self.a1, self.a2, self.a3, 'grok']) self.assertLabeledTensorsEqual(expand_lt, golden_lt) def test_label(self): expand_lt = core.expand_dims(self.original_lt, [ 'x', 'channel', ('foo', 'bar'), 'z', 'probs', ]) golden_lt = core.LabeledTensor( array_ops.reshape( self.tensor, [self.x_size, self.channel_size, 1, self.z_size, self.probs_size]), [self.a0, self.a1, ('foo', ['bar']), self.a2, self.a3]) self.assertLabeledTensorsEqual(expand_lt, golden_lt) def test_unknown_dimension(self): orig_lt = core.LabeledTensor( array_ops.placeholder(dtypes.float32, [None]), ['x']) expand_lt = core.expand_dims(orig_lt, ['x', 'y']) self.assertEqual(expand_lt.axes, core.Axes([('x', None), ('y', 1)])) def test_invalid_input(self): with self.assertRaises(core.AxisOrderError): core.expand_dims(self.original_lt, ['foo', 'not_x', 'bar', 'channel', 'z', 'probs', 'grok']) with self.assertRaises(core.AxisOrderError): core.expand_dims(self.original_lt, ['foo', 'z', 'bar', 'channel', 'x', 'probs', 'grok']) class AxisOrderScopeTest(Base): def test(self): xyz = ['x', 'y', 'z'] abc = ['a', 'b', 'c'] self.assertIsNone(core.get_axis_order()) with core.axis_order_scope(xyz): self.assertEqual(core.get_axis_order(), xyz) with core.axis_order_scope(): self.assertIsNone(core.get_axis_order()) with core.axis_order_scope(abc): self.assertEqual(core.get_axis_order(), abc) self.assertIsNone(core.get_axis_order()) self.assertEqual(core.get_axis_order(), xyz) self.assertIsNone(core.get_axis_order()) class CheckAxisOrderTest(Base): def test_passes(self): axis_order = ['w', 'x', 'y', 'z'] lt = core.LabeledTensor(array_ops.ones((1, 1, 1, 1)), axis_order) core.check_axis_order(lt, axis_order) lt = core.LabeledTensor(array_ops.ones((1, 1, 1)), axis_order[1:]) core.check_axis_order(lt, axis_order) lt = core.LabeledTensor(array_ops.ones((1, 1, 1)), axis_order[:-1]) core.check_axis_order(lt, axis_order) def test_invalid(self): axis_order = ['w', 'x', 'y', 'z'] lt = core.LabeledTensor(array_ops.ones((1, 1, 1, 1)), axis_order) with self.assertRaises(core.AxisOrderError): core.check_axis_order(lt) with self.assertRaises(core.AxisOrderError): core.check_axis_order(lt, axis_order[:-1]) with self.assertRaises(core.AxisOrderError): core.check_axis_order(lt, axis_order[::-1]) def test_scope(self): axis_order = ['w', 'x', 'y', 'z'] lt = core.LabeledTensor(array_ops.ones((1, 1, 1, 1)), axis_order) with core.axis_order_scope(axis_order): core.check_axis_order(lt) class ImposeAxisOrderTest(Base): def test_identity(self): axis_order = ['w', 'x', 'y', 'z'] lt = core.LabeledTensor( array_ops.reshape(math_ops.range(24), (1, 2, 3, 4)), axis_order) actual = core.impose_axis_order(lt, axis_order) self.assertLabeledTensorsEqual(lt, actual) lt = core.LabeledTensor( array_ops.reshape(math_ops.range(6), (1, 2, 3)), axis_order[:3]) actual = core.impose_axis_order(lt, axis_order) self.assertLabeledTensorsEqual(lt, actual) def test_reverse(self): axis_order = ['w', 'x', 'y', 'z'] lt = core.LabeledTensor( array_ops.reshape(math_ops.range(24), (1, 2, 3, 4)), axis_order) actual = core.impose_axis_order(lt, axis_order[::-1]) expected = core.transpose(lt, axis_order[::-1]) self.assertLabeledTensorsEqual(expected, actual) lt = core.LabeledTensor( array_ops.reshape(math_ops.range(6), (1, 2, 3)), axis_order[:3]) actual = core.impose_axis_order(lt, axis_order[::-1]) expected = core.transpose(lt, ['y', 'x', 'w']) self.assertLabeledTensorsEqual(expected, actual) def test_scope(self): axis_order = ['w', 'x', 'y', 'z'] lt = core.LabeledTensor( array_ops.reshape(math_ops.range(24), (1, 2, 3, 4)), axis_order) expected = core.transpose(lt, axis_order[::-1]) with core.axis_order_scope(axis_order[::-1]): actual = core.impose_axis_order(lt) self.assertLabeledTensorsEqual(expected, actual) def test_invalid(self): lt = core.LabeledTensor( array_ops.reshape(math_ops.range(2), (1, 2)), ['x', 'y']) with self.assertRaises(ValueError): core.impose_axis_order(lt) with self.assertRaises(ValueError): core.impose_axis_order(lt, ['x']) class FindConsistentOrderingTest(Base): def test(self): cases = [ ([], [], []), (['x'], [], ['x']), ([], ['x'], ['x']), (['x'], ['x'], ['x']), (['x'], ['y'], ['x', 'y']), (['y'], ['x'], ['y', 'x']), (['x', 'y'], ['x', 'y'], ['x', 'y']), (['x', 'y'], ['y', 'x'], None), (['x', 'y'], ['y', 'z'], ['x', 'y', 'z']), (['x', 'z'], ['y', 'z'], ['x', 'y', 'z']), (['x', 'y'], ['x', 'z'], ['x', 'y', 'z']), (['w', 'x'], ['y', 'z'], ['w', 'x', 'y', 'z']), (['x', 'y', 'z'], ['z', 'x'], None), (['x', 'y', 'z'], ['x'], ['x', 'y', 'z']), ([], ['x', 'y', 'z'], ['x', 'y', 'z']), ] for a, b, expected in cases: actual = core._find_consistent_ordering(a, b) msg = ('unexpected ordering between %r and %r:\nexpected: %r\nactual: %r' % (a, b, expected, actual)) self.assertEqual(expected, actual, msg=msg) class AlignTest(Base): def test_name(self): align_lt_0, align_lt_1, _ = core.align(self.original_lt, self.original_lt) self.assertIn('lt_align', align_lt_0.name) self.assertIn('/0', align_lt_0.name) self.assertIn('lt_align', align_lt_1.name) self.assertIn('/1', align_lt_1.name) def test_identical_shaped_inputs(self): offset_tensor = self.original_lt.tensor + 1 offset_lt = core.LabeledTensor(offset_tensor, self.original_lt.axes) align_lt, align_offset_lt, broadcast_axes = core.align(self.original_lt, offset_lt) self.assertLabeledTensorsEqual(align_lt, self.original_lt) self.assertLabeledTensorsEqual(align_offset_lt, offset_lt) self.assertEqual(broadcast_axes, self.original_lt.axes) def test_different_inputs(self): # The correct axis ordering is ['x', 'channel', 'probs']. align_x_probs_lt, align_channel_probs_lt, broadcast_axes = core.align( self.x_probs_lt, self.channel_probs_lt) x_probs_golden_lt = core.LabeledTensor( array_ops.reshape(self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size]), [self.a0, 'channel', self.a3]) self.assertLabeledTensorsEqual(align_x_probs_lt, x_probs_golden_lt) channel_probs_golden_lt = core.LabeledTensor( array_ops.reshape(self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size]), ['x', self.a1, self.a3]) self.assertLabeledTensorsEqual(align_channel_probs_lt, channel_probs_golden_lt) self.assertEqual(broadcast_axes, core.Axes([self.a0, self.a1, self.a3])) def test_axis_order_scope(self): xz_lt = core.LabeledTensor(array_ops.ones((2, 3)), ['x', 'z']) yz_lt = core.LabeledTensor(array_ops.ones((4, 3)), ['y', 'z']) _, _, broadcast_axes = core.align(xz_lt, yz_lt) self.assertEqual(list(broadcast_axes.keys()), ['x', 'y', 'z']) _, _, broadcast_axes = core.align(yz_lt, xz_lt) self.assertEqual(list(broadcast_axes.keys()), ['y', 'x', 'z']) with core.axis_order_scope(['x', 'y', 'z']): _, _, broadcast_axes = core.align(yz_lt, xz_lt) self.assertEqual(list(broadcast_axes.keys()), ['x', 'y', 'z']) with core.axis_order_scope(['x', 'y']): with self.assertRaises(core.AxisOrderError): core.align(xz_lt, yz_lt) with self.assertRaises(core.AxisOrderError): core.align(yz_lt, xz_lt) def test_invalid_input(self): lt_0 = core.LabeledTensor(array_ops.zeros([5]), [('a', range(5))]) lt_1 = core.LabeledTensor(array_ops.zeros([5]), [('a', range(1, 6))]) with self.assertRaises(ValueError): core.align(lt_0, lt_1) class ConvertToLabeledTensorTest(Base): # TODO(shoyer): Simplify these tests once we can reuse labeled tensors in # assertLabeledTensorsEqual. def test_labeled_tensor(self): actual = core.convert_to_labeled_tensor(self.original_lt) self.assertLabeledTensorsEqual(actual, self.original_lt) def test_python_scalar(self): actual = core.convert_to_labeled_tensor(42) golden_lt = core.LabeledTensor(ops.convert_to_tensor(42), []) self.assertLabeledTensorsEqual(actual, golden_lt) def test_numpy_array(self): actual = core.convert_to_labeled_tensor(np.array(42)) golden_lt = core.LabeledTensor(ops.convert_to_tensor(42), []) self.assertLabeledTensorsEqual(actual, golden_lt) def test_tensor(self): actual = core.convert_to_labeled_tensor(constant_op.constant(42)) golden_lt = core.LabeledTensor(ops.convert_to_tensor(42), []) self.assertLabeledTensorsEqual(actual, golden_lt) def test_invalid_input(self): with self.assertRaises(ValueError): core.convert_to_labeled_tensor(math_ops.range(5)) with self.assertRaises(ValueError): core.convert_to_labeled_tensor(np.array([1, 2])) class DocStringCheckMixin(object): # requires self.ops to be defined def test_function_docstring_and_name(self): for op_name, _, _, lt_op in self.ops: if lt_op is not None: self.assertIn('tf.%s' % op_name, lt_op.__doc__) self.assertEqual(op_name, lt_op.__name__) class UnaryOpsTestsMixin(object): # requires self.ops and self.test_lt to be defined def test_core_op(self): for op_name, _, tf_op, lt_op in self.ops: if tf_op is not None: golden_lt = core.LabeledTensor( tf_op(self.test_lt.tensor), self.test_lt.axes) actual_lt = lt_op(self.test_lt) self.assertIn(op_name, actual_lt.name) self.assertLabeledTensorsEqual(golden_lt, actual_lt) def test_infix(self): for op_name, infix_op, _, _ in self.ops: if infix_op is not None: expected_lt = core.LabeledTensor( infix_op(self.test_lt.tensor), self.test_lt.axes) actual_lt = infix_op(self.test_lt) self.assertIn(op_name, actual_lt.name) self.assertLabeledTensorsEqual(expected_lt, actual_lt) class CoreUnaryOpsTest(Base, DocStringCheckMixin, UnaryOpsTestsMixin): def setUp(self): super(CoreUnaryOpsTest, self).setUp() self.ops = [ ('abs', operator.abs, math_ops.abs, core.abs_function), ('neg', operator.neg, math_ops.negative, core.neg), # TODO(shoyer): add unary + to core TensorFlow ('pos', None, None, None), ('sign', None, math_ops.sign, core.sign), ('reciprocal', None, math_ops.reciprocal, core.reciprocal), ('square', None, math_ops.square, core.square), ('round', None, math_ops.round, core.round_function), ('sqrt', None, math_ops.sqrt, core.sqrt), ('rsqrt', None, math_ops.rsqrt, core.rsqrt), ('log', None, math_ops.log, core.log), ('exp', None, math_ops.exp, core.exp), ('log', None, math_ops.log, core.log), ('ceil', None, math_ops.ceil, core.ceil), ('floor', None, math_ops.floor, core.floor), ('cos', None, math_ops.cos, core.cos), ('sin', None, math_ops.sin, core.sin), ('tan', None, math_ops.tan, core.tan), ('acos', None, math_ops.acos, core.acos), ('asin', None, math_ops.asin, core.asin), ('atan', None, math_ops.atan, core.atan), ('lgamma', None, math_ops.lgamma, core.lgamma), ('digamma', None, math_ops.digamma, core.digamma), ('erf', None, math_ops.erf, core.erf), ('erfc', None, math_ops.erfc, core.erfc), ('lgamma', None, math_ops.lgamma, core.lgamma), ] total_size = np.prod([v.size for v in self.original_lt.axes.values()]) self.test_lt = core.LabeledTensor( math_ops.cast(self.original_lt, dtypes.float32) / total_size, self.original_lt.axes) class LogicalNotTest(Base, DocStringCheckMixin, UnaryOpsTestsMixin): def setUp(self): super(LogicalNotTest, self).setUp() self.ops = [('logical_not', operator.invert, math_ops.logical_not, core.logical_not),] self.test_lt = self.original_lt < 10 class BinaryOpsTestsMixin(object): # requires self.ops, self.test_lt_1, self.test_lt_2, self.test_lt_1_broadcast # and self.test_lt_2_broadcast to be defined def test_core_op(self): for op_name, _, tf_op, lt_op in self.ops: golden_tensor = tf_op(self.test_lt_1_broadcast, self.test_lt_2_broadcast) golden_lt = core.LabeledTensor(golden_tensor, self.broadcast_axes) actual_lt = lt_op(self.test_lt_1, self.test_lt_2) self.assertIn(op_name, actual_lt.name) self.assertLabeledTensorsEqual(golden_lt, actual_lt) def test_infix(self): for op_name, infix_op, _, lt_op in self.ops: if infix_op is not None: expected_lt = lt_op(self.test_lt_1, self.test_lt_2) actual_lt = infix_op(self.test_lt_1, self.test_lt_2) self.assertIn(op_name, actual_lt.name) self.assertLabeledTensorsEqual(expected_lt, actual_lt) class CoreBinaryOpsTest(Base, DocStringCheckMixin, BinaryOpsTestsMixin): def setUp(self): super(CoreBinaryOpsTest, self).setUp() self.x_probs_broadcast_tensor = array_ops.reshape( self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size]) self.channel_probs_broadcast_tensor = array_ops.reshape( self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size]) # == and != are not element-wise for tf.Tensor, so they shouldn't be # elementwise for LabeledTensor, either. self.ops = [ ('add', operator.add, math_ops.add, core.add), ('sub', operator.sub, math_ops.subtract, core.sub), ('mul', operator.mul, math_ops.multiply, core.mul), ('div', operator.truediv, math_ops.div, core.div), ('mod', operator.mod, math_ops.mod, core.mod), ('pow', operator.pow, math_ops.pow, core.pow_function), ('equal', None, math_ops.equal, core.equal), ('less', operator.lt, math_ops.less, core.less), ('less_equal', operator.le, math_ops.less_equal, core.less_equal), ('not_equal', None, math_ops.not_equal, core.not_equal), ('greater', operator.gt, math_ops.greater, core.greater), ('greater_equal', operator.ge, math_ops.greater_equal, core.greater_equal), ] self.test_lt_1 = self.x_probs_lt self.test_lt_2 = self.channel_probs_lt self.test_lt_1_broadcast = self.x_probs_broadcast_tensor self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor self.broadcast_axes = [self.a0, self.a1, self.a3] def test_reflexive(self): labeled_tensor = self.x_probs_lt + 1 # all elements must be >0 for division for op_name, infix_op, _, lt_op in self.ops: if infix_op is not None: expected_lt = lt_op(2, labeled_tensor) actual_lt = infix_op(2, labeled_tensor) # Python uses greater for the reflexive version of less (and vise-versa) if 'less' in op_name: op_name = op_name.replace('less', 'greater') elif 'greater' in op_name: op_name = op_name.replace('greater', 'less') self.assertIn(op_name, actual_lt.name) self.assertLabeledTensorsEqual(expected_lt, actual_lt) class LogicalBinaryOpsTest(Base, DocStringCheckMixin, BinaryOpsTestsMixin): def setUp(self): super(LogicalBinaryOpsTest, self).setUp() self.ops = [ ('logical_and', operator.and_, math_ops.logical_and, core.logical_and), ('logical_or', operator.or_, math_ops.logical_or, core.logical_or), ('logical_xor', operator.xor, math_ops.logical_xor, core.logical_xor), ] self.test_lt_1 = self.original_lt < 10 self.test_lt_2 = self.original_lt < 5 self.test_lt_1_broadcast = self.test_lt_1.tensor self.test_lt_2_broadcast = self.test_lt_2.tensor self.broadcast_axes = self.test_lt_1.axes class FloatBinaryOpsTest(Base, DocStringCheckMixin, BinaryOpsTestsMixin): def setUp(self): super(FloatBinaryOpsTest, self).setUp() self.ops = [ ('igamma', None, math_ops.igamma, core.igamma), ('igammac', None, math_ops.igammac, core.igammac), ('zeta', None, math_ops.zeta, core.zeta), ('polygamma', None, math_ops.polygamma, core.polygamma), ('maximum', None, math_ops.maximum, core.maximum), ('minimum', None, math_ops.minimum, core.minimum), ('squared_difference', None, math_ops.squared_difference, core.squared_difference), ] total_size = np.prod([v.size for v in self.original_lt.axes.values()]) test_lt = core.LabeledTensor( math_ops.cast(self.original_lt, dtypes.float32) / total_size, self.original_lt.axes) self.test_lt_1 = test_lt self.test_lt_2 = 1.0 - test_lt self.test_lt_1_broadcast = self.test_lt_1.tensor self.test_lt_2_broadcast = self.test_lt_2.tensor self.broadcast_axes = self.test_lt_1.axes if __name__ == '__main__': test_lib.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/labeled_tensor/python/ops/core_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.labeled_tensor.python.ops import core from tensorflow.contrib.labeled_tensor.python.ops import nn from tensorflow.contrib.labeled_tensor.python.ops import test_util from tensorflow.python.ops import nn_impl from tensorflow.python.ops import nn_ops class NNTests(test_util.Base): def setUp(self): super(NNTests, self).setUp() self.axes = ['x'] self.original_lt = core.LabeledTensor([0.0, 0.5, 1.0], self.axes) self.other_lt = 1 - self.original_lt def test_unary_ops(self): ops = [ ('relu', nn_ops.relu, nn.relu), ('relu6', nn_ops.relu6, nn.relu6), ('crelu', nn_ops.crelu, nn.crelu), ('elu', nn_ops.elu, nn.elu), ('softplus', nn_ops.softplus, nn.softplus), ('l2_loss', nn_ops.l2_loss, nn.l2_loss), ('softmax', nn_ops.softmax, nn.softmax), ('log_softmax', nn_ops.log_softmax, nn.log_softmax), ] for op_name, tf_op, lt_op in ops: golden_tensor = tf_op(self.original_lt.tensor) golden_lt = core.LabeledTensor(golden_tensor, self.axes) actual_lt = lt_op(self.original_lt) self.assertIn(op_name, actual_lt.name) self.assertLabeledTensorsEqual(golden_lt, actual_lt) def test_binary_ops(self): ops = [ ('sigmoid_cross_entropy_with_logits', nn_impl.sigmoid_cross_entropy_with_logits, nn.sigmoid_cross_entropy_with_logits), ('softmax_cross_entropy_with_logits', nn_ops.softmax_cross_entropy_with_logits, nn.softmax_cross_entropy_with_logits), ('sparse_softmax_cross_entropy_with_logits', nn_ops.sparse_softmax_cross_entropy_with_logits, nn.sparse_softmax_cross_entropy_with_logits), ] for op_name, tf_op, lt_op in ops: golden_tensor = tf_op(self.original_lt.tensor, self.other_lt.tensor) golden_lt = core.LabeledTensor(golden_tensor, self.axes) actual_lt = lt_op(self.original_lt, self.other_lt) self.assertIn(op_name, actual_lt.name) self.assertLabeledTensorsEqual(golden_lt, actual_lt)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/labeled_tensor/python/ops/nn_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tools to make it a bit easier to use LabeledTensor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six import string_types from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc from tensorflow.contrib.labeled_tensor.python.ops import core from tensorflow.contrib.labeled_tensor.python.ops import ops from tensorflow.python.framework import ops as tf_ops class ReshapeCoder(object): """Utility class for mapping to and from another shape. For example, say you have a function `crop_center` which expects a LabeledTensor with axes named ['batch', 'row', 'column', 'depth'], and you have a LabeledTensor `masked_image_lt` with axes ['batch', 'row', 'column', 'channel', 'mask']. To call `crop_center` with `masked_image_lt` you'd normally have to write: >>> reshape_lt = lt.reshape(masked_image_lt, ['channel', 'mask'], ['depth']) >>> crop_lt = crop_center(reshape_lt) >>> result_lt = lt.reshape(crop_lt, ['depth'], ... [masked_image_lt.axes['channel'], masked_image_lt.axes['mask']]) ReshapeCoder takes care of this renaming logic for you, allowing you to instead write: >>> rc = ReshapeCoder(['channel', 'mask'], ['depth']) >>> result_lt = rc.decode(crop_center(rc.encode(masked_image_lt))) Here, `decode` restores the original axes 'channel' and 'mask', so `crop_center` must not have modified the size of the 'depth' axis. """ @tc.accepts(object, tc.Collection(str), tc.Collection(tc.Union(str, core.AxisLike)), tc.Optional(str)) def __init__(self, existing_axis_names, new_axes, name=None): self._name = name self._existing_axis_names = existing_axis_names self._new_axes = new_axes self._existing_axes = None @tc.returns(core.LabeledTensor) @tc.accepts(object, core.LabeledTensorLike) def encode(self, labeled_tensor): """Reshape the input to the target shape. If called several times, the axes named in existing_axis_names must be identical. Args: labeled_tensor: The input tensor. Returns: The input reshaped to the target shape. Raises: ValueError: If the axes in existing_axis_names don't match the axes of a tensor in a previous invocation of this method. """ with tf_ops.name_scope(self._name, 'lt_reshape_encode', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) reshape_lt = ops.reshape(labeled_tensor, self._existing_axis_names, self._new_axes, name=scope) axes = [labeled_tensor.axes[n] for n in self._existing_axis_names] if self._existing_axes is not None and self._existing_axes != axes: raise ValueError( 'input axes %r do not match axes from previous method call %r' % (axes, self._existing_axes)) else: self._existing_axes = axes return reshape_lt @tc.returns(core.LabeledTensor) @tc.accepts(object, core.LabeledTensorLike) def decode(self, labeled_tensor): """Reshape the input to the original shape. This is the inverse of encode. Encode must have been called at least once prior to this method being called. Args: labeled_tensor: The input tensor. Returns: The input reshaped to the original shape. Raises: ValueError: If this method was called before encode was called. """ if self._existing_axes is None: raise ValueError('decode called before encode') with tf_ops.name_scope(self._name, 'lt_reshape_decode', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) new_axis_names = [axis if isinstance(axis, string_types) else core.as_axis(axis).name for axis in self._new_axes] return ops.reshape(labeled_tensor, new_axis_names, self._existing_axes, name=scope)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/labeled_tensor/python/ops/sugar.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Non-core ops for LabeledTensor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import types import numpy as np from six import string_types from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc from tensorflow.contrib.labeled_tensor.python.ops import core from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import functional_ops from tensorflow.python.ops import map_fn as map_fn_lib from tensorflow.python.ops import math_ops from tensorflow.python.ops import numerics from tensorflow.python.ops import random_ops from tensorflow.python.training import input # pylint: disable=redefined-builtin from tensorflow.python.util.compat import collections_abc @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis, tc.Optional(string_types)) def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None): with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope: temp_axes = core.Axes([axis] + list( labeled_tensor.axes.remove(axis.name).values())) transposed = core.transpose(labeled_tensor, temp_axes.keys()) indexed = core.LabeledTensor( array_ops.gather(transposed.tensor, indexer), temp_axes) return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Mapping(string_types, tc.Union(slice, collections_abc.Hashable, list)), tc.Optional(string_types)) def select(labeled_tensor, selection, name=None): """Slice out a subset of the tensor. Args: labeled_tensor: The input tensor. selection: A dictionary mapping an axis name to a scalar, slice or list of values to select. Currently supports two types of selections: (a) Any number of scalar and/or slice selections. (b) Exactly one list selection, without any scalars or slices. name: Optional op name. Returns: The selection as a `LabeledTensor`. Raises: ValueError: If the tensor doesn't have an axis in the selection or if that axis lacks labels. KeyError: If any labels in a selection are not found in the original axis. NotImplementedError: If you attempt to combine a list selection with scalar selection or another list selection. """ with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) slices = {} indexers = {} for axis_name, value in selection.items(): if axis_name not in labeled_tensor.axes: raise ValueError( 'The tensor does not have an axis named %s. Its axes are: %r' % (axis_name, labeled_tensor.axes.keys())) axis = labeled_tensor.axes[axis_name] if axis.labels is None: raise ValueError( 'The axis named %s does not have labels. The axis is: %r' % (axis_name, axis)) if isinstance(value, slice): # TODO(shoyer): consider deprecating using slices in favor of lists if value.start is None: start = None else: start = axis.index(value.start) if value.stop is None: stop = None else: # For now, follow the pandas convention of making labeled slices # inclusive of both bounds. stop = axis.index(value.stop) + 1 if value.step is not None: raise NotImplementedError('slicing with a step is not yet supported') slices[axis_name] = slice(start, stop) # Needs to be after checking for slices, since slice objects claim to be # instances of collections_abc.Hashable but hash() on them fails. elif isinstance(value, collections_abc.Hashable): slices[axis_name] = axis.index(value) elif isinstance(value, list): if indexers: raise NotImplementedError( 'select does not yet support more than one list selection at ' 'the same time') indexer = [axis.index(v) for v in value] indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64) else: # If type checking is working properly, this shouldn't be possible. raise TypeError('cannot handle arbitrary types') if indexers and slices: raise NotImplementedError( 'select does not yet support combined scalar and list selection') # For now, handle array selection separately, because tf.gather_nd does # not support gradients yet. Later, using gather_nd will let us combine # these paths. if indexers: (axis_name, indexer), = indexers.items() axis = core.Axis(axis_name, selection[axis_name]) return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope) else: return core.slice_function(labeled_tensor, slices, name=scope) @tc.returns(core.LabeledTensor) @tc.accepts( tc.Collection(core.LabeledTensorLike), string_types, tc.Optional(string_types)) def concat(labeled_tensors, axis_name, name=None): """Concatenate tensors along a dimension. See tf.concat. Args: labeled_tensors: A list of input LabeledTensors. axis_name: The name of the axis along which to concatenate. name: Optional op name. Returns: The concatenated tensor. The coordinate labels for the concatenation dimension are also concatenated, if they are available for every tensor. Raises: ValueError: If fewer than one tensor inputs is provided, if the tensors have incompatible axes, or if `axis_name` isn't the name of an axis. """ with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope: labeled_tensors = [ core.convert_to_labeled_tensor(lt) for lt in labeled_tensors ] if len(labeled_tensors) < 1: raise ValueError('concat expects at least 1 tensor, but received %s' % labeled_tensors) # All tensors must have these axes. axes_0 = labeled_tensors[0].axes axis_names = list(axes_0.keys()) if axis_name not in axis_names: raise ValueError('%s not in %s' % (axis_name, axis_names)) shared_axes = axes_0.remove(axis_name) tensors = [labeled_tensors[0].tensor] concat_axis_list = [axes_0[axis_name]] for labeled_tensor in labeled_tensors[1:]: current_shared_axes = labeled_tensor.axes.remove(axis_name) if current_shared_axes != shared_axes: # TODO(shoyer): add more specific checks about what went wrong, # including raising AxisOrderError when appropriate raise ValueError('Mismatched shared axes: the first tensor ' 'had axes %r but this tensor has axes %r.' % (shared_axes, current_shared_axes)) # Accumulate the axis labels, if they're available. concat_axis_list.append(labeled_tensor.axes[axis_name]) tensors.append(labeled_tensor.tensor) concat_axis = core.concat_axes(concat_axis_list) concat_dimension = axis_names.index(axis_name) concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope) values = list(axes_0.values()) concat_axes = (values[:concat_dimension] + [concat_axis] + values[concat_dimension + 1:]) return core.LabeledTensor(concat_tensor, concat_axes) # TODO(shoyer): rename pack/unpack to stack/unstack @tc.returns(core.LabeledTensor) @tc.accepts( tc.Collection(core.LabeledTensorLike), tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types)) def pack(labeled_tensors, new_axis, axis_position=0, name=None): """Pack tensors along a new axis. See tf.pack. Args: labeled_tensors: The input tensors, which must have identical axes. new_axis: The name of the new axis, or a tuple containing the name and coordinate labels. axis_position: Optional integer position at which to insert the new axis. name: Optional op name. Returns: The packed tensors as a single LabeledTensor, with `new_axis` in the given `axis_position`. Raises: ValueError: If fewer than one input tensors is provided, or if the tensors don't have identical axes. """ with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope: labeled_tensors = [ core.convert_to_labeled_tensor(lt) for lt in labeled_tensors ] if len(labeled_tensors) < 1: raise ValueError('pack expects at least 1 tensors, but received %s' % labeled_tensors) axes_0 = labeled_tensors[0].axes for t in labeled_tensors: if t.axes != axes_0: raise ValueError('Non-identical axes. Expected %s but got %s' % (axes_0, t.axes)) pack_op = array_ops.stack( [t.tensor for t in labeled_tensors], axis=axis_position, name=scope) axes = list(axes_0.values()) axes.insert(axis_position, new_axis) return core.LabeledTensor(pack_op, axes) @tc.returns(tc.List(core.LabeledTensor)) @tc.accepts(core.LabeledTensorLike, tc.Optional(string_types), tc.Optional(string_types)) def unpack(labeled_tensor, axis_name=None, name=None): """Unpack the tensor. See tf.unpack. Args: labeled_tensor: The input tensor. axis_name: Optional name of axis to unpack. By default, the first axis is used. name: Optional op name. Returns: The list of unpacked LabeledTensors. Raises: ValueError: If `axis_name` is not an axis on the input. """ with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) axis_names = list(labeled_tensor.axes.keys()) if axis_name is None: axis_name = axis_names[0] if axis_name not in axis_names: raise ValueError('%s not in %s' % (axis_name, axis_names)) axis = axis_names.index(axis_name) unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope) axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis] return [core.LabeledTensor(t, axes) for t in unpack_ops] @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Collection(string_types), tc.Collection(tc.Union(string_types, core.AxisLike)), tc.Optional(string_types)) def reshape(labeled_tensor, existing_axes, new_axes, name=None): """Reshape specific axes of a LabeledTensor. Non-indicated axes remain in their original locations. Args: labeled_tensor: The input tensor. existing_axes: List of axis names found on the input tensor. These must appear sequentially in the list of axis names on the input. In other words, they must be a valid slice of `list(labeled_tensor.axes.keys())`. new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects providing new axes with which to replace `existing_axes` in the reshaped result. At most one element of `new_axes` may be a string, indicating an axis with unknown size. name: Optional op name. Returns: The reshaped LabeledTensor. Raises: ValueError: If `existing_axes` are not all axes on the input, or if more than one of `new_axes` has unknown size. AxisOrderError: If `existing_axes` are not a slice of axis names on the input. """ with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) original_axis_names = list(labeled_tensor.axes.keys()) existing_axes = list(existing_axes) if not set(existing_axes) <= set(original_axis_names): raise ValueError('existing_axes %r are not contained in the set of axis ' 'names %r on the input labeled tensor' % (existing_axes, original_axis_names)) start = original_axis_names.index(existing_axes[0]) stop = original_axis_names.index(existing_axes[-1]) + 1 if existing_axes != original_axis_names[start:stop]: # We could support existing_axes that aren't a slice by using transpose, # but that could lead to unpredictable performance consequences because # transposes are not free in TensorFlow. If we did transpose # automatically, the user might never realize that their data is being # produced with the wrong order. (The later will occur with some frequency # because of how broadcasting automatically choose axis order.) # So for now we've taken the strict approach. raise core.AxisOrderError( 'existing_axes %r are not a slice of axis names %r on the input ' 'labeled tensor. Use `transpose` or `impose_axis_order` to reorder ' 'axes on the input explicitly.' % (existing_axes, original_axis_names)) if sum(isinstance(axis, string_types) for axis in new_axes) > 1: raise ValueError( 'at most one axis in new_axes can have unknown size. All other ' 'axes must have an indicated integer size or labels: %r' % new_axes) original_values = list(labeled_tensor.axes.values()) axis_size = lambda axis: -1 if axis.size is None else axis.size shape = [axis_size(axis) for axis in original_values[:start]] for axis_ref in new_axes: if isinstance(axis_ref, string_types): shape.append(-1) else: axis = core.as_axis(axis_ref) shape.append(axis_size(axis)) shape.extend(axis_size(axis) for axis in original_values[stop:]) reshaped_tensor = array_ops.reshape( labeled_tensor.tensor, shape, name=scope) axes = original_values[:start] + list(new_axes) + original_values[stop:] return core.LabeledTensor(reshaped_tensor, axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, string_types, string_types, tc.Optional(string_types)) def rename_axis(labeled_tensor, existing_name, new_name, name=None): """Rename an axis of LabeledTensor. Args: labeled_tensor: The input tensor. existing_name: Name for an existing axis on the input. new_name: Desired replacement name. name: Optional op name. Returns: LabeledTensor with renamed axis. Raises: ValueError: If `existing_name` is not an axis on the input. """ with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope: if existing_name not in labeled_tensor.axes: raise ValueError('existing_name %r are not contained in the set of axis ' 'names %r on the input labeled tensor' % (existing_name, labeled_tensor.axes.keys())) new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value) return reshape(labeled_tensor, [existing_name], [new_axis], name=scope) @tc.returns(tc.List(core.LabeledTensor)) @tc.accepts(string_types, collections_abc.Callable, int, bool, tc.Collection(core.LabeledTensorLike), bool, tc.Optional(string_types)) def _batch_helper(default_name, batch_fn, batch_size, enqueue_many, labeled_tensors, allow_smaller_final_batch, name=None): with ops.name_scope(name, default_name, labeled_tensors) as scope: labeled_tensors = [ core.convert_to_labeled_tensor(lt) for lt in labeled_tensors ] batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope) # TODO(shoyer): Remove this when they sanitize the TF API. if not isinstance(batch_ops, list): assert isinstance(batch_ops, ops.Tensor) batch_ops = [batch_ops] if allow_smaller_final_batch: batch_size = None @tc.returns(core.Axes) @tc.accepts(core.Axes) def output_axes(axes): if enqueue_many: if 'batch' not in axes or list(axes.keys()).index('batch') != 0: raise ValueError( 'When enqueue_many is True, input tensors must have an axis ' 'called "batch" as their first dimension, ' 'but axes were %s' % axes) culled_axes = axes.remove('batch') return core.Axes([('batch', batch_size)] + list(culled_axes.values())) else: return core.Axes([('batch', batch_size)] + list(axes.values())) output_labeled_tensors = [] for i, tensor in enumerate(batch_ops): axes = output_axes(labeled_tensors[i].axes) output_labeled_tensors.append(core.LabeledTensor(tensor, axes)) return output_labeled_tensors @tc.returns(tc.List(core.LabeledTensor)) @tc.accepts( tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool, tc.Optional(string_types)) def batch(labeled_tensors, batch_size, num_threads=1, capacity=32, enqueue_many=False, allow_smaller_final_batch=False, name=None): """Rebatch a tensor. See tf.batch. Args: labeled_tensors: The input tensors. batch_size: The output batch size. num_threads: See tf.batch. capacity: See tf.batch. enqueue_many: If true, the input tensors must contain a 'batch' axis as their first axis. If false, the input tensors must not contain a 'batch' axis. See tf.batch. allow_smaller_final_batch: See tf.batch. name: Optional op name. Returns: The rebatched tensors. If enqueue_many is false, the output tensors will have a new 'batch' axis as their first axis. Raises: ValueError: If enqueue_many is True and the first axis of the tensors isn't "batch". """ def fn(tensors, scope): return input.batch( tensors, batch_size=batch_size, num_threads=num_threads, capacity=capacity, enqueue_many=enqueue_many, allow_smaller_final_batch=allow_smaller_final_batch, name=scope) return _batch_helper('lt_batch', fn, batch_size, enqueue_many, labeled_tensors, allow_smaller_final_batch, name) @tc.returns(tc.List(core.LabeledTensor)) @tc.accepts( tc.Collection(core.LabeledTensorLike), int, int, int, bool, int, tc.Optional(int), bool, tc.Optional(string_types)) def shuffle_batch(labeled_tensors, batch_size, num_threads=1, capacity=32, enqueue_many=False, min_after_dequeue=0, seed=None, allow_smaller_final_batch=False, name=None): """Rebatch a tensor, with shuffling. See tf.batch. Args: labeled_tensors: The input tensors. batch_size: The output batch size. num_threads: See tf.batch. capacity: See tf.batch. enqueue_many: If true, the input tensors must contain a 'batch' axis as their first axis. If false, the input tensors must not contain a 'batch' axis. See tf.batch. min_after_dequeue: Minimum number of elements in the queue after a dequeue, used to ensure mixing. seed: Optional random seed. allow_smaller_final_batch: See tf.batch. name: Optional op name. Returns: The rebatched tensors. If enqueue_many is false, the output tensors will have a new 'batch' axis as their first axis. Raises: ValueError: If enqueue_many is True and the first axis of the tensors isn't "batch". """ def fn(tensors, scope): return input.shuffle_batch( tensors, batch_size=batch_size, num_threads=num_threads, capacity=capacity, enqueue_many=enqueue_many, min_after_dequeue=min_after_dequeue, seed=seed, allow_smaller_final_batch=allow_smaller_final_batch, name=scope) return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many, labeled_tensors, allow_smaller_final_batch, name) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Mapping(string_types, int), tc.Optional(int), tc.Optional(string_types)) def random_crop(labeled_tensor, shape_map, seed=None, name=None): """Randomly crops a tensor to a given size. See tf.random_crop. Args: labeled_tensor: The input tensor. shape_map: A dictionary mapping axis names to the size of the random crop for that dimension. seed: An optional random seed. name: An optional op name. Returns: A tensor of the same rank as `labeled_tensor`, cropped randomly in the selected dimensions. Raises: ValueError: If the shape map contains an axis name not in the input tensor. """ with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) for axis_name in shape_map: if axis_name not in labeled_tensor.axes: raise ValueError('Selection axis %s not in axes %s' % (axis_name, labeled_tensor.axes)) shape = [] axes = [] for axis in labeled_tensor.axes.values(): if axis.name in shape_map: size = shape_map[axis.name] shape.append(size) # We lose labels for the axes we crop, leaving just the size. axes.append((axis.name, size)) else: shape.append(len(axis)) axes.append(axis) crop_op = random_ops.random_crop( labeled_tensor.tensor, shape, seed=seed, name=scope) return core.LabeledTensor(crop_op, axes) # TODO(shoyer): Allow the user to select the axis over which to map. @tc.returns(core.LabeledTensor) @tc.accepts(collections_abc.Callable, core.LabeledTensorLike, tc.Optional(string_types)) def map_fn(fn, labeled_tensor, name=None): """Map on the list of tensors unpacked from labeled_tensor. See tf.map_fn. Args: fn: The function to apply to each unpacked LabeledTensor. It should have type LabeledTensor -> LabeledTensor. labeled_tensor: The input tensor. name: Optional op name. Returns: A tensor that packs the results of applying fn to the list of tensors unpacked from labeled_tensor. """ with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) unpack_lts = unpack(labeled_tensor) # TODO(ericmc): Fix this upstream. if labeled_tensor.dtype == dtypes.string: # We must construct the full graph here, because map_fn_lib.map_fn # doesn't work for string-valued tensors. # Constructing the full graph may be slow. map_lts = [fn(t) for t in unpack_lts] return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope) else: # Figure out what the axis labels should be, but use tf.map_fn to # construct the graph because it's efficient. # It may be slow to construct the full graph, so we infer the labels from # the first element. # TODO(ericmc): This builds a subgraph which then gets thrown away. # Find a more elegant solution. first_map_lt = fn(unpack_lts[0]) final_axes = list(labeled_tensor.axes.values())[:1] + list( first_map_lt.axes.values()) @tc.returns(ops.Tensor) @tc.accepts(ops.Tensor) def tf_fn(tensor): original_axes = list(labeled_tensor.axes.values())[1:] tensor_lt = core.LabeledTensor(tensor, original_axes) return fn(tensor_lt).tensor map_op = map_fn_lib.map_fn( tf_fn, labeled_tensor.tensor, dtype=first_map_lt.dtype) map_lt = core.LabeledTensor(map_op, final_axes) return core.identity(map_lt, name=scope) @tc.returns(core.LabeledTensor) @tc.accepts(collections_abc.Callable, core.LabeledTensorLike, core.LabeledTensorLike, tc.Optional(string_types)) def foldl(fn, labeled_tensor, initial_value, name=None): """Left fold on the list of tensors unpacked from labeled_tensor. See tf.foldl. Args: fn: The function to apply to each unpacked LabeledTensor. It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor. Its arguments are (accumulated_value, next_value). labeled_tensor: The input tensor. initial_value: The initial value of the accumulator. name: Optional op name. Returns: The accumulated value. """ with ops.name_scope(name, 'lt_foldl', [labeled_tensor, initial_value]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) initial_value = core.convert_to_labeled_tensor(initial_value) @tc.returns(ops.Tensor) @tc.accepts(ops.Tensor, ops.Tensor) def tf_fn(accumulator, next_element): accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes) next_element_lt = core.LabeledTensor( next_element, list(labeled_tensor.axes.values())[1:]) return fn(accumulator_lt, next_element_lt).tensor foldl_op = functional_ops.foldl( tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor) foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes) return core.identity(foldl_lt, name=scope) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Optional(tc.Collection(string_types)), tc.Optional(string_types)) def squeeze(labeled_tensor, axis_names=None, name=None): """Remove size-1 dimensions. See tf.squeeze. Args: labeled_tensor: The input tensor. axis_names: The names of the dimensions to remove, or None to remove all size-1 dimensions. name: Optional op name. Returns: A tensor with the specified dimensions removed. Raises: ValueError: If the named axes are not in the tensor, or if they are not size-1. """ with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) if axis_names is None: axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1] for axis_name in axis_names: if axis_name not in labeled_tensor.axes: raise ValueError('axis %s is not in tensor axes %s' % (axis_name, labeled_tensor.axes)) elif len(labeled_tensor.axes[axis_name]) != 1: raise ValueError( 'cannot squeeze axis with size greater than 1: (%s, %s)' % (axis_name, labeled_tensor.axes[axis_name])) squeeze_dimensions = [] axes = [] for i, axis in enumerate(labeled_tensor.axes.values()): if axis.name in axis_names: squeeze_dimensions.append(i) else: axes.append(axis) if squeeze_dimensions: squeeze_op = array_ops.squeeze( labeled_tensor.tensor, squeeze_dimensions, name=scope) else: squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope) return core.LabeledTensor(squeeze_op, axes) # pylint: disable=invalid-name ReduceAxis = tc.Union(string_types, tc.Tuple(string_types, collections_abc.Hashable)) ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis))) # pylint: enable=invalid-name @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike, tc.Optional(string_types)) def matmul(a, b, name=None): """Matrix multiply two tensors with rank 1 or 2. If both tensors have rank 2, a matrix-matrix product is performed. If one tensor has rank 1 and the other has rank 2, then a matrix-vector product is performed. If both tensors have rank 1, then a vector dot-product is performed. (This behavior matches that of `numpy.dot`.) Both tensors must share exactly one dimension in common, which is the dimension the operation is summed along. The inputs will be automatically transposed if necessary as part of the matmul op. We intend to eventually support `matmul` on higher rank input, and also eventually support summing over any number shared dimensions (via an `axis` argument), but neither of these features has been implemented yet. Args: a: First LabeledTensor. b: Second LabeledTensor. name: Optional op name. Returns: LabeledTensor with the result of matrix multiplication. Axes are ordered by the current axis_order_scope, if set, or in or order of appearance on the inputs. Raises: NotImplementedError: If inputs have rank >2 or share multiple axes. ValueError: If the inputs have rank 0 or do not share any axes. """ with ops.name_scope(name, 'lt_matmul', [a, b]) as scope: a = core.convert_to_labeled_tensor(a) b = core.convert_to_labeled_tensor(b) if len(a.axes) > 2 or len(b.axes) > 2: # We could pass batched inputs to tf.matmul to make this work, but we # would also need to use tf.tile and/or tf.transpose. These are more # expensive than doing reshapes, so it's not clear if it's a good idea to # do this automatically. raise NotImplementedError( 'matmul currently requires inputs with rank 2 or less, but ' 'inputs have ranks %r and %r' % (len(a.axes), len(b.axes))) if not a.axes or not b.axes: raise ValueError( 'matmul currently requires inputs with at least rank 1, but ' 'inputs have ranks %r and %r' % (len(a.axes), len(b.axes))) shared_axes = set(a.axes) & set(b.axes) if len(shared_axes) > 1: raise NotImplementedError( 'matmul does not yet support summing over multiple shared axes: %r. ' 'Use transpose and reshape to create a single shared axis to sum ' 'over.' % shared_axes) if not shared_axes: raise ValueError('there must have exactly one axis in common between ' 'input to matmul: %r, %r' % (a.axes.keys(), b.axes.keys())) shared_axis, = shared_axes if a.axes[shared_axis] != b.axes[shared_axis]: raise ValueError('axis %r does not match on input arguments: %r vs %r' % (shared_axis, a.axes[shared_axis].value, b.axes[shared_axis].value)) result_axes = [] for axes in [a.axes, b.axes]: for axis in axes.values(): if axis.name != shared_axis: result_axes.append(axis) axis_scope_order = core.get_axis_order() if axis_scope_order is not None: result_axis_names = [axis.name for axis in result_axes] new_axis_names = [ name for name in axis_scope_order if name in result_axis_names ] if new_axis_names != result_axis_names: # switch a and b b, a = a, b # result_axes is a list of length 1 or 2 result_axes = result_axes[::-1] squeeze_dims = [] if len(a.axes) == 1: a_tensor = array_ops.reshape(a.tensor, (1, -1)) squeeze_dims.append(0) transpose_a = False else: a_tensor = a.tensor transpose_a = list(a.axes.keys()).index(shared_axis) == 0 if len(b.axes) == 1: b_tensor = array_ops.reshape(b.tensor, (-1, 1)) squeeze_dims.append(1) transpose_b = False else: b_tensor = b.tensor transpose_b = list(b.axes.keys()).index(shared_axis) == 1 result_op = math_ops.matmul( a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b) if squeeze_dims: result_op = array_ops.squeeze(result_op, squeeze_dims) result_op = array_ops.identity(result_op, name=scope) return core.LabeledTensor(result_op, result_axes) @tc.returns(types.FunctionType) @tc.accepts(string_types, collections_abc.Callable) def define_reduce_op(op_name, reduce_fn): """Define a reduction op for labeled tensors. Args: op_name: string name of the TensorFlow op. reduce_fn: function to call to evaluate the op on a tf.Tensor. Returns: Function defining the given reduction op that acts on a LabeledTensor. """ default_name = 'lt_%s' % op_name @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types)) def op(labeled_tensor, axes=None, name=None): """Computes the given reduction across the given axes of a LabeledTensor. See `tf.{op_name}` for full details. Args: labeled_tensor: The input tensor. axes: A set of axes or None. If None, all axes will be reduced. Axes must all be strings, in which case those dimensions will be removed, or pairs of (name, None) or (name, label), in which case those dimensions will be kept. name: Optional op name. Returns: The reduced LabeledTensor. Raises: ValueError: if any of the axes to reduce over are not found on `labeled_tensor`. """ with ops.name_scope(name, default_name, [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) if axes is None: axes = labeled_tensor.axes.keys() if isinstance(axes, (string_types, tuple)): axes = [axes] reduction_axes = {} axes_to_squeeze = [] for a in axes: if isinstance(a, string_types): # We squeeze out this axis. reduction_axes[a] = a axes_to_squeeze.append(a) else: # We keep this axis, with the user-provided labels. (axis_name, label) = a if label is not None: # The input was a single label, so make it a list so it can be # turned into an Axis. label = [label] reduction_axes[axis_name] = (axis_name, label) for axis_name in reduction_axes: if axis_name not in labeled_tensor.axes: raise ValueError('Axis %s not in axes %s' % (axis_name, labeled_tensor.axes)) intermediate_axes = [] reduction_dimensions = [] for i, axis in enumerate(labeled_tensor.axes.values()): if axis.name in reduction_axes: intermediate_axes.append(reduction_axes[axis.name]) reduction_dimensions.append(i) else: intermediate_axes.append(axis) reduce_op = reduce_fn( labeled_tensor.tensor, reduction_dimensions, keepdims=True) reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes) return squeeze(reduce_lt, axes_to_squeeze, name=scope) op.__doc__ = op.__doc__.format(op_name=op_name) op.__name__ = op_name return op reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all) reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any) reduce_logsumexp = define_reduce_op('reduce_logsumexp', math_ops.reduce_logsumexp) reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max) reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean) reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min) reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod) reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Mapping(str, tc.Union(int, ops.Tensor)), tc.Optional(string_types)) def tile(labeled_tensor, multiples, name=None): """Constructs a tensor by tiling a given tensor. Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled tensors would no longer be unique.) See lt.tile. Args: labeled_tensor: The input tensor. multiples: A mapping where the keys are axis names and the values are the integer number of times to tile along that axis. Only axes with a multiple different than 1 need be included. name: Optional op name. Returns: A tensor with the indicated axes tiled. Raises: ValueError: If the tiled axes are not axes in the input tensor, or if any axes in multiples have tick labels. """ with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()): raise ValueError('tile axes %r are not contained in the set of axis ' 'names %r on the input labeled tensor' % (multiples.keys(), labeled_tensor.axes)) labeled_axes = [ name for name in multiples if labeled_tensor.axes[name].labels is not None ] if labeled_axes: raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes) multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes] tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope) new_axes = [ axis.name if axis.labels is None else axis for axis in labeled_tensor.axes.values() ] return core.LabeledTensor(tile_op, new_axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)), string_types, tc.Optional(string_types)) def pad(labeled_tensor, paddings, mode='CONSTANT', name=None): """Pads a tensor. See tf.pad. Args: labeled_tensor: The input tensor. paddings: A mapping where the keys are axis names and the values are tuples where the first element is the padding to insert at the beginning of the axis and the second is the padding to insert at the end of the axis. mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC". name: Optional op name. Returns: A tensor with the indicated axes padded, optionally with those axes extended with the provided labels. Raises: ValueError: If the padded axes are not axes in the input tensor. """ with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()): raise ValueError('pad axes %r are not contained in the set of axis ' 'names %r on the input labeled tensor' % (paddings.keys(), labeled_tensor.axes)) new_axes = [] padding_pairs = [] for name, axis in labeled_tensor.axes.items(): if name in paddings: padding_before, padding_after = paddings[name] axis_before = core.Axis(name, padding_before) axis_after = core.Axis(name, padding_after) new_axes.append(core.concat_axes([axis_before, axis, axis_after])) padding_pairs.append((len(axis_before), len(axis_after))) else: new_axes.append(axis) padding_pairs.append((0, 0)) pad_op = array_ops.pad(labeled_tensor.tensor, padding_pairs, mode, name=scope) return core.LabeledTensor(pad_op, new_axes) @tc.returns(core.LabeledTensor) @tc.accepts( tc.Union(np.ndarray, list, tuple, core.Scalar), tc.Optional(dtypes.DType), tc.Optional( tc.Union(core.Axes, tc.Collection( tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types)) def constant(value, dtype=None, axes=None, name=None): """Creates a constant tensor. If `axes` includes any strings, shape is inferred from `value`. Otherwise, the sizes of the given `axes` are used to set `shape` for `tf.constant`. See tf.constant for more details. Args: value: The input tensor. dtype: The type of the returned tensor. axes: Optional Axes, list of strings or list of objects coercible to Axis objects. By default, axes are assumed to be an empty list (i.e., `value` is treated as a scalar). name: Optional op name. Returns: The tensor with elements set to zero. """ with ops.name_scope(name, 'lt_constant', [value]) as scope: if axes is None: axes = [] if isinstance(axes, core.Axes): axes = axes.values() if any(isinstance(ax, string_types) for ax in axes): # need to infer shape shape = None else: # axes already indicate shape axes = [core.as_axis(a) for a in axes] shape = [a.size for a in axes] op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope) return core.LabeledTensor(op, axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Optional(dtypes.DType), tc.Optional(string_types)) def zeros_like(labeled_tensor, dtype=None, name=None): """Creates an identical tensor with all elements set to zero. Args: labeled_tensor: The input tensor. dtype: The type of the returned tensor. name: Optional op name. Returns: The tensor with elements set to zero. """ with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope) return core.LabeledTensor(op, labeled_tensor.axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Optional(dtypes.DType), tc.Optional(string_types)) def ones_like(labeled_tensor, dtype=None, name=None): """Creates an identical tensor with all elements set to one. Args: labeled_tensor: The input tensor. dtype: The type of the returned tensor. name: Optional op name. Returns: The tensor with elements set to one. """ with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope) return core.LabeledTensor(op, labeled_tensor.axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Optional(dtypes.DType), tc.Optional(string_types)) def cast(labeled_tensor, dtype=None, name=None): """Casts a labeled tensor to a new type. Args: labeled_tensor: The input tensor. dtype: The type of the returned tensor. name: Optional op name. Returns: A labeled tensor with the new dtype. """ with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope) return core.LabeledTensor(op, labeled_tensor.axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types)) def verify_tensor_all_finite(labeled_tensor, message, name=None): """Asserts a tensor doesn't contain NaNs or Infs. See tf.verify_tensor_all_finite. Args: labeled_tensor: The input tensor. message: Message to log on failure. name: Optional op name. Returns: The input tensor. """ with ops.name_scope(name, 'lt_verify_tensor_all_finite', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) op = numerics.verify_tensor_all_finite( labeled_tensor.tensor, msg=message, name=scope) return core.LabeledTensor(op, labeled_tensor.axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike, tc.Optional(string_types)) def boolean_mask(labeled_tensor, mask, name=None): """Apply a boolean mask to a labeled tensor. Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks. The mask is applied to the first axis of `labeled_tensor`. Labels on the first axis are removed, because True indices in `mask` may not be known dynamically. Args: labeled_tensor: The input tensor. mask: The type of the returned tensor. name: Optional op name. Returns: The masked labeled tensor. Raises: ValueError: if the first axis of the mask """ with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) mask = core.convert_to_labeled_tensor(mask) if len(mask.axes) > 1: raise NotImplementedError( "LabeledTensor's boolean_mask currently only supports 1D masks") mask_axis = list(mask.axes.values())[0] lt_axis = list(labeled_tensor.axes.values())[0] if mask_axis != lt_axis: raise ValueError('the first axis of the labeled tensor and the mask ' 'are not equal:\n%r\n%r' % (lt_axis, mask_axis)) op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope) # TODO(shoyer): attempt to infer labels for the masked values, by calling # tf.get_static_value on the mask? axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:] return core.LabeledTensor(op, axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike, core.LabeledTensorLike, tc.Optional(string_types)) def where(condition, x, y, name=None): """Return elements from x or y depending on condition. See `tf.where` for more details. This function currently only implements the three argument version of where. Args: condition: LabeledTensor of type `bool`. x: LabeledTensor for values where condition is true. y: LabeledTensor for values where condition is false. name: Optional op name. Returns: The labeled tensor with values according to condition. Raises: ValueError: if `x` and `y` have different axes, or if the axes of `x` do not start with the axes of `condition`. """ with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope: condition = core.convert_to_labeled_tensor(condition) x = core.convert_to_labeled_tensor(x) y = core.convert_to_labeled_tensor(y) if not condition.axes == x.axes == y.axes: raise ValueError('all inputs to `where` must have equal axes') op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope) return core.LabeledTensor(op, x.axes)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/labeled_tensor/python/ops/ops.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from six.moves import range # pylint: disable=redefined-builtin from tensorflow.contrib.labeled_tensor.python.ops import core from tensorflow.contrib.labeled_tensor.python.ops import ops from tensorflow.contrib.labeled_tensor.python.ops import test_util from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import string_ops from tensorflow.python.platform import test as test_lib class Base(test_util.Base): def setUp(self): super(Base, self).setUp() self.x_size = 7 self.channel_size = 3 self.z_size = 4 self.probs_size = 11 tensor = math_ops.range(0, self.x_size * self.channel_size * self.z_size * self.probs_size) tensor = array_ops.reshape( tensor, [self.x_size, self.channel_size, self.z_size, self.probs_size]) a0 = ('x', range(self.x_size)) a1 = ('channel', ['red', 'green', 'blue']) a2 = 'z' a3 = ('probs', np.linspace(0.0, 1.0, self.probs_size)) self.tensor = tensor self.a0 = a0 self.a1 = a1 self.a2 = a2 self.a2_resolved = ('z', self.z_size) self.a3 = a3 self.original_lt = core.LabeledTensor(tensor, [a0, a1, a2, a3]) self.x_probs_lt = core.slice_function(self.original_lt, {'z': 0}) self.x_probs_lt = ops.select(self.x_probs_lt, {'channel': 'red'}) self.channel_probs_lt = core.slice_function(self.original_lt, {'x': 3, 'z': 0}) class SelectTest(Base): def test_name(self): select_lt = ops.select(self.original_lt, {'channel': 'green'}) self.assertIn('lt_select', select_lt.name) def test_scalar(self): select_lt = ops.select(self.original_lt, {'channel': 'green'}) golden_lt = core.LabeledTensor(self.tensor[:, 1, :, :], [self.a0, self.a2, self.a3]) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_slice(self): select_lt = ops.select(self.original_lt, {'channel': slice('red', 'green')}) a1_sliced = ('channel', ['red', 'green']) golden_lt = core.LabeledTensor(self.tensor[:, :2, :, :], [self.a0, a1_sliced, self.a2, self.a3]) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_slices(self): select_lt = ops.select(self.original_lt, {'x': slice(1, 4), 'channel': slice('green', None)}) a0_sliced = ('x', range(1, 5)) a1_sliced = ('channel', ['green', 'blue']) golden_lt = core.LabeledTensor(self.tensor[1:5, 1:, :, :], [a0_sliced, a1_sliced, self.a2, self.a3]) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_list(self): select_lt = ops.select(self.original_lt, {'channel': ['red', 'green']}) a1_sliced = ('channel', ['red', 'green']) golden_lt = core.LabeledTensor(self.tensor[:, :2, :, :], [self.a0, a1_sliced, self.a2, self.a3]) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_list_one_item(self): select_lt = ops.select(self.original_lt, {'channel': ['red']}) a1_sliced = ('channel', ['red']) golden_lt = core.LabeledTensor(self.tensor[:, :1, :, :], [self.a0, a1_sliced, self.a2, self.a3]) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_list_zero_items(self): select_lt = ops.select(self.original_lt, {'channel': []}) golden_lt = core.LabeledTensor(self.tensor[:, :0, :, :], [self.a0, 'channel', self.a2, self.a3]) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_scalars(self): select_lt = ops.select(self.original_lt, {'x': 1, 'channel': 'green'}) golden_lt = core.LabeledTensor(self.tensor[1, 1, :, :], [self.a2, self.a3]) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_tuple(self): original_lt = core.LabeledTensor(constant_op.constant([5, 6]), [('x', [(1, 2), (3, 4)])]) select_lt = ops.select(original_lt, {'x': (1, 2)}) golden_lt = core.LabeledTensor(constant_op.constant(5), []) self.assertLabeledTensorsEqual(select_lt, golden_lt) def test_invalid_input(self): with self.assertRaises(ValueError): ops.select(self.original_lt, {'foo': 1}) with self.assertRaises(ValueError): ops.select(self.original_lt, {'z': 1}) with self.assertRaises(KeyError): ops.select(self.original_lt, {'channel': 'purple'}) with self.assertRaises(KeyError): ops.select(self.original_lt, {'channel': ['red', 'purple']}) with self.assertRaises(NotImplementedError): ops.select(self.original_lt, {'channel': ['red'], 'x': [1]}) with self.assertRaises(NotImplementedError): ops.select(self.original_lt, {'channel': ['red'], 'x': 1}) with self.assertRaises(NotImplementedError): ops.select(self.original_lt, {'channel': slice('red', 'green', 2)}) class ConcatTest(Base): def setUp(self): super(ConcatTest, self).setUp() self.red_lt = ops.select(self.original_lt, {'channel': ['red']}) self.green_lt = ops.select(self.original_lt, {'channel': ['green']}) self.blue_lt = ops.select(self.original_lt, {'channel': ['blue']}) def test_name(self): concat_lt = ops.concat([self.red_lt, self.blue_lt], 'channel') self.assertIn('lt_concat', concat_lt.name) def test(self): concat_lt = ops.concat([self.red_lt, self.green_lt], 'channel') golden_lt = ops.select(self.original_lt, {'channel': ['red', 'green']}) self.assertLabeledTensorsEqual(concat_lt, golden_lt) def test_transposed(self): green_transposed = core.transpose(self.green_lt, ['probs', 'channel', 'z', 'x']) with self.assertRaises(ValueError): ops.concat([self.red_lt, green_transposed], 'channel') def test_invalid_input(self): with self.assertRaises(ValueError): ops.concat([], 'channel') with self.assertRaises(ValueError): ops.concat([self.red_lt, self.red_lt], 'channel') with self.assertRaises(ValueError): ops.concat([self.red_lt, self.red_lt], 'foo') class PackTest(Base): def test_name(self): pack_lt = ops.pack([self.original_lt, self.original_lt], 'batch') self.assertIn('lt_pack', pack_lt.name) def test(self): pack_lt = ops.pack([self.original_lt, self.original_lt], 'batch') golden_lt = core.LabeledTensor( array_ops.stack([self.original_lt.tensor, self.original_lt.tensor]), ['batch', self.a0, self.a1, self.a2, self.a3]) self.assertLabeledTensorsEqual(pack_lt, golden_lt) def test_axis(self): pack_lt = ops.pack( [self.original_lt, self.original_lt], new_axis='batch', axis_position=4) golden_lt = core.LabeledTensor( array_ops.stack( [self.original_lt.tensor, self.original_lt.tensor], axis=4), [self.a0, self.a1, self.a2, self.a3, 'batch']) self.assertLabeledTensorsEqual(pack_lt, golden_lt) def test_invalid_input(self): with self.assertRaises(ValueError): ops.pack([self.original_lt, self.original_lt], 'channel') class UnpackTest(Base): def test_name(self): unpack_lts = ops.unpack(self.original_lt) for t in unpack_lts: self.assertIn('lt_unpack', t.name) def test(self): unpack_lt = ops.unpack(self.original_lt)[0] golden_lt = core.LabeledTensor( array_ops.unstack(self.original_lt.tensor)[0], [self.a1, self.a2, self.a3]) self.assertLabeledTensorsEqual(unpack_lt, golden_lt) def test_axis(self): unpack_lt = ops.unpack(self.original_lt, axis_name='z')[0] golden_lt = core.LabeledTensor( array_ops.unstack( self.original_lt.tensor, axis=2)[0], [self.a0, self.a1, self.a3]) self.assertLabeledTensorsEqual(unpack_lt, golden_lt) def test_invalid_input(self): with self.assertRaises(ValueError): ops.unpack(self.original_lt, axis_name='not_found') class ReshapeTest(Base): def test_name(self): reshape_lt = ops.reshape(self.original_lt, ['channel'], ['foo']) self.assertIn('lt_reshape', reshape_lt.name) def test_identity(self): reshape_lt = ops.reshape(self.original_lt, self.original_lt.axes.keys(), self.original_lt.axes.values()) self.assertLabeledTensorsEqual(reshape_lt, self.original_lt) def test_known_size(self): new_dim_size = self.channel_size * self.z_size * self.probs_size reshape_lt = ops.reshape(self.original_lt, ['channel', 'z', 'probs'], [('new_dim', new_dim_size)]) golden_lt = core.LabeledTensor( array_ops.reshape(self.original_lt.tensor, [self.x_size, -1]), [self.original_lt.axes['x'], 'new_dim']) self.assertLabeledTensorsEqual(reshape_lt, golden_lt) def test_unknown_size(self): reshape_lt = ops.reshape(self.original_lt, ['channel', 'z', 'probs'], ['new_dim']) golden_lt = core.LabeledTensor( array_ops.reshape(self.original_lt.tensor, [self.x_size, -1]), [self.original_lt.axes['x'], 'new_dim']) self.assertLabeledTensorsEqual(reshape_lt, golden_lt) def test_unknown_dimension(self): orig_lt = core.LabeledTensor( array_ops.placeholder(dtypes.float32, [None]), ['x']) reshape_lt = ops.reshape(orig_lt, ['x'], ['y', ('z', 1)]) self.assertEqual(reshape_lt.axes, core.Axes([('y', None), ('z', 1)])) with self.cached_session() as sess: result = sess.run(reshape_lt, feed_dict={orig_lt.tensor: [1, 2]}) np.testing.assert_array_equal(result, [[1], [2]]) def test_with_labels(self): new_dim_size = self.channel_size * self.z_size * self.probs_size reshape_lt = ops.reshape(self.original_lt, ['channel', 'z', 'probs'], [('new_dim', range(new_dim_size))]) golden_lt = core.LabeledTensor( array_ops.reshape(self.original_lt.tensor, [self.x_size, -1]), [self.original_lt.axes['x'], ('new_dim', range(new_dim_size))]) self.assertLabeledTensorsEqual(reshape_lt, golden_lt) def test_invalid_input(self): with self.assertRaisesRegexp(ValueError, 'not contained in the set'): ops.reshape(self.original_lt, ['foo'], ['bar']) with self.assertRaisesRegexp(core.AxisOrderError, 'not a slice of axis names'): ops.reshape(self.original_lt, ['probs', 'z'], ['bar']) with self.assertRaisesRegexp(ValueError, 'at most one axis in new_axes'): ops.reshape(self.original_lt, ['probs'], ['foo', 'bar']) class RenameAxisTest(Base): def test_name(self): rename_axis_lt = ops.rename_axis(self.original_lt, 'channel', 'foo') self.assertIn('lt_rename_axis', rename_axis_lt.name) def test_identity(self): rename_axis_lt = ops.rename_axis(self.original_lt, 'channel', 'channel') self.assertLabeledTensorsEqual(rename_axis_lt, self.original_lt) def test_new_name(self): rename_axis_lt = ops.rename_axis(self.original_lt, 'channel', 'foo') expected_axes = [(name if name != 'channel' else 'foo', axis.value) for name, axis in self.original_lt.axes.items()] expected_lt = core.LabeledTensor(self.original_lt.tensor, expected_axes) self.assertLabeledTensorsEqual(rename_axis_lt, expected_lt) def test_invalid_input(self): with self.assertRaisesRegexp(ValueError, 'not contained in the set'): ops.rename_axis(self.original_lt, 'foo', 'bar') class BatchTest(Base): def setUp(self): super(BatchTest, self).setUp() tensors = [] for i in range(10): offset_lt = core.LabeledTensor(constant_op.constant(i), []) tensors.append(core.add(self.original_lt, offset_lt)) self.pack_lt = ops.pack(tensors, 'batch') def test_name(self): batch_ops = ops.batch( [self.pack_lt, self.pack_lt], batch_size=2, enqueue_many=True) for bo in batch_ops: self.assertIn('lt_batch', bo.name) def test_enqueue_many(self): [batch_2_op] = ops.batch([self.pack_lt], batch_size=2, enqueue_many=True) self.assertEqual(len(batch_2_op.axes['batch']), 2) [batch_10_op] = ops.batch([batch_2_op], batch_size=10, enqueue_many=True) self.assertLabeledTensorsEqual(self.pack_lt, batch_10_op) def test_no_enqueue_many(self): [batch_2_op] = ops.batch([self.original_lt], batch_size=2) self.assertEqual(len(batch_2_op.axes['batch']), 2) [batch_10_op] = ops.batch([batch_2_op], batch_size=10, enqueue_many=True) self.assertLabeledTensorsEqual( ops.pack(10 * [self.original_lt], 'batch'), batch_10_op) def test_invalid_input(self): with self.assertRaises(ValueError): ops.batch([self.original_lt], 3, enqueue_many=True) def test_allow_smaller_final_batch(self): [batch_2_op] = ops.batch( [self.original_lt], batch_size=2, allow_smaller_final_batch=True) self.assertEqual(batch_2_op.axes['batch'].size, None) class ShuffleBatchTest(Base): def setUp(self): super(ShuffleBatchTest, self).setUp() tensors = [] for i in range(10): offset_lt = core.LabeledTensor(constant_op.constant(i), []) tensors.append(core.add(self.original_lt, offset_lt)) self.pack_lt = ops.pack(tensors, 'batch') def test_name(self): batch_lts = ops.shuffle_batch( [self.pack_lt, self.pack_lt], batch_size=2, enqueue_many=True) for blt in batch_lts: self.assertIn('lt_shuffle_batch', blt.name) def test_enqueue_many(self): [batch_2_lt] = ops.shuffle_batch( [self.pack_lt], batch_size=2, enqueue_many=True, min_after_dequeue=8, seed=0) self.assertEqual(len(batch_2_lt.axes['batch']), 2) [batch_10_lt] = ops.batch([batch_2_lt], batch_size=10, enqueue_many=True) self.assertEqual(batch_10_lt.axes, self.pack_lt.axes) [batch_10, pack] = self.eval([batch_10_lt.tensor, self.pack_lt.tensor]) self.assertFalse((batch_10 == pack).all()) def test_allow_smaller_final_batch(self): [batch_2_op] = ops.shuffle_batch( [self.original_lt], batch_size=2, allow_smaller_final_batch=True) self.assertEqual(batch_2_op.axes['batch'].size, None) class RandomCropTest(Base): def test_name(self): crop_lt = ops.random_crop(self.original_lt, {'probs': 3}) self.assertIn('lt_random_crop', crop_lt.name) def test_single(self): crop_lt = ops.random_crop(self.original_lt, {'probs': 3}) self.assertEqual( core.Axes([self.a0, self.a1, self.a2_resolved, ('probs', 3)]), crop_lt.axes) def test_double(self): crop_lt = ops.random_crop(self.original_lt, {'probs': 3, 'channel': 2}) self.assertEqual( core.Axes([self.a0, ('channel', 2), self.a2_resolved, ('probs', 3)]), crop_lt.axes) def test_size1(self): crop_lt = ops.random_crop(self.original_lt, {'probs': 1}) self.assertEqual( core.Axes([self.a0, self.a1, self.a2_resolved, ('probs', 1)]), crop_lt.axes) def test_different_seeds(self): crop_0_lt = ops.random_crop( self.original_lt, {'probs': 3, 'channel': 2}, seed=0) crop_1_lt = ops.random_crop( self.original_lt, {'probs': 3, 'channel': 2}, seed=1) self.assertEqual(crop_0_lt.axes, crop_1_lt.axes) [crop_0, crop_1] = self.eval([crop_0_lt.tensor, crop_1_lt.tensor]) self.assertFalse((crop_0 == crop_1).all()) def test_identical_seeds(self): crop_0_lt = ops.random_crop( self.original_lt, {'probs': 3, 'channel': 2}, seed=0) crop_1_lt = ops.random_crop( self.original_lt, {'probs': 3, 'channel': 2}, seed=0) self.assertLabeledTensorsEqual(crop_0_lt, crop_1_lt) def test_crop_idempotent(self): crop_0_lt = ops.random_crop( self.original_lt, {'probs': 3, 'channel': 2}, seed=0) crop_1_lt = ops.random_crop(crop_0_lt, {'probs': 3, 'channel': 2}, seed=1) self.assertLabeledTensorsEqual(crop_0_lt, crop_1_lt) def test_invalid_input(self): with self.assertRaises(ValueError): ops.random_crop(self.original_lt, {'foobar': 2}) class MapFnTest(Base): def test_name(self): map_lt = ops.map_fn(core.identity, self.original_lt) self.assertIn('lt_map_fn', map_lt.name) def test_identity(self): map_lt = ops.map_fn(core.identity, self.original_lt) self.assertLabeledTensorsEqual(map_lt, self.original_lt) def test_callable_object(self): class Identity(object): def __call__(self, other): return other map_lt = ops.map_fn(Identity(), self.original_lt) self.assertLabeledTensorsEqual(map_lt, self.original_lt) def test_slice(self): map_lt = ops.map_fn(lambda t: core.slice_function(t, {'channel': 1}), self.original_lt) slice_lt = core.slice_function(self.original_lt, {'channel': 1}) self.assertLabeledTensorsEqual(map_lt, slice_lt) def test_string(self): def fn(entry_lt): op = string_ops.string_join([entry_lt, 'world']) return core.LabeledTensor(op, []) tensor_lt = ops.constant(['hi', 'bye'], axes=['batch']) map_lt = ops.map_fn(fn, tensor_lt) golden_lt = ops.constant(['hiworld', 'byeworld'], axes=['batch']) self.assertLabeledTensorsEqual(map_lt, golden_lt) class FoldlTest(Base): def test_name(self): foldl_lt = ops.foldl(core.add, self.original_lt, core.slice_function(self.original_lt, {'x': 0})) self.assertIn('lt_foldl', foldl_lt.name) def test_sum(self): initializer_lt = ops.constant([0, 10], axes=['y']) tensor_lt = ops.constant([[1, 2], [3, 4], [5, 6]], axes=['x', 'y']) foldl_lt = ops.foldl(core.add, tensor_lt, initializer_lt) golden_lt = ops.constant([9, 22], axes=['y']) self.assertLabeledTensorsEqual(foldl_lt, golden_lt) class SqueezeTest(Base): def setUp(self): super(SqueezeTest, self).setUp() self.squeezable_lt = core.slice_function( self.original_lt, {'channel': slice(0, 1), 'probs': slice(0, 1)}) def test_name(self): squeeze_lt = ops.squeeze(self.squeezable_lt) self.assertIn('lt_squeeze', squeeze_lt.name) def test_none(self): none_lt = ops.squeeze(self.squeezable_lt, None) axes_lt = ops.squeeze(self.squeezable_lt, ['channel', 'probs']) self.assertLabeledTensorsEqual(none_lt, axes_lt) def test(self): squeeze_lt = ops.squeeze(self.squeezable_lt, ['probs']) golden_lt = core.slice_function(self.squeezable_lt, {'probs': 0}) self.assertLabeledTensorsEqual(squeeze_lt, golden_lt) def test_invalid_input(self): with self.assertRaises(ValueError): ops.squeeze(self.original_lt, ['channel']) with self.assertRaises(ValueError): ops.squeeze(self.squeezable_lt, ['foo']) class MatMulTest(Base): def test_name(self): x_lt = core.LabeledTensor(array_ops.ones((3,)), ['x']) matmul_lt = ops.matmul(x_lt, x_lt) self.assertIn('lt_matmul', matmul_lt.name) def test_vector_vector(self): x_lt = core.LabeledTensor(math_ops.range(3), ['x']) matmul_lt = ops.matmul(x_lt, x_lt) golden_lt = core.convert_to_labeled_tensor(5) self.assertLabeledTensorsEqual(matmul_lt, golden_lt) def test_matrix_vector(self): xy_lt = core.LabeledTensor( array_ops.reshape(math_ops.range(6), (2, 3)), ['x', 'y']) y_lt = core.LabeledTensor(math_ops.range(3), ['y']) matmul_lt = ops.matmul(xy_lt, y_lt) golden_lt = core.LabeledTensor( math_ops.matmul(xy_lt.tensor, array_ops.reshape(y_lt.tensor, (-1, 1)))[:, 0], ['x']) self.assertLabeledTensorsEqual(matmul_lt, golden_lt) matmul_lt = ops.matmul(y_lt, xy_lt) self.assertLabeledTensorsEqual(matmul_lt, golden_lt) def test_matrix_matrix(self): xy_lt = core.LabeledTensor( array_ops.reshape(math_ops.range(6), (2, 3)), ['x', 'y']) yz_lt = core.LabeledTensor( array_ops.reshape(math_ops.range(12), (3, 4)), ['y', 'z']) matmul_lt = ops.matmul(xy_lt, yz_lt) golden_lt = core.LabeledTensor( math_ops.matmul(xy_lt.tensor, yz_lt.tensor), ['x', 'z']) self.assertLabeledTensorsEqual(matmul_lt, golden_lt) transpose = lambda x: core.transpose(x, list(x.axes.keys())[::-1]) matmul_lt = ops.matmul(xy_lt, transpose(yz_lt)) self.assertLabeledTensorsEqual(matmul_lt, golden_lt) matmul_lt = ops.matmul(transpose(xy_lt), yz_lt) self.assertLabeledTensorsEqual(matmul_lt, golden_lt) matmul_lt = ops.matmul(transpose(xy_lt), transpose(yz_lt)) self.assertLabeledTensorsEqual(matmul_lt, golden_lt) matmul_lt = ops.matmul(yz_lt, xy_lt) self.assertLabeledTensorsEqual(matmul_lt, transpose(golden_lt)) def test_matrix_matrix_axis_order(self): xy_lt = core.LabeledTensor( array_ops.reshape(math_ops.range(6), (2, 3)), ['x', 'y']) yz_lt = core.LabeledTensor( array_ops.reshape(math_ops.range(12), (3, 4)), ['y', 'z']) golden_lt = core.LabeledTensor( math_ops.matmul(xy_lt.tensor, yz_lt.tensor), ['x', 'z']) with core.axis_order_scope(['x', 'y', 'z']): matmul_lt = ops.matmul(xy_lt, yz_lt) self.assertLabeledTensorsEqual(matmul_lt, golden_lt) matmul_lt = ops.matmul(yz_lt, xy_lt) self.assertLabeledTensorsEqual(matmul_lt, golden_lt) def test_invalid(self): scalar_lt = core.LabeledTensor(array_ops.ones(()), []) x_lt = core.LabeledTensor(array_ops.ones((2,)), ['x']) x2_lt = core.LabeledTensor(array_ops.ones((3,)), ['x']) y_lt = core.LabeledTensor(array_ops.ones((3,)), ['y']) xy_lt = core.LabeledTensor(array_ops.ones((2, 3)), ['x', 'y']) xyz_lt = core.LabeledTensor(array_ops.ones((2, 3, 1)), ['x', 'y', 'z']) with self.assertRaisesRegexp(ValueError, 'inputs with at least rank'): ops.matmul(x_lt, scalar_lt) with self.assertRaises(NotImplementedError): ops.matmul(x_lt, xyz_lt) with self.assertRaisesRegexp(ValueError, 'exactly one axis in common'): ops.matmul(x_lt, y_lt) with self.assertRaises(NotImplementedError): ops.matmul(xy_lt, xy_lt) with self.assertRaisesRegexp(ValueError, 'does not match'): ops.matmul(x_lt, x2_lt) class ReduceSumTest(Base): def test_name(self): sum_lt = ops.reduce_sum(self.original_lt, {'channel'}) self.assertIn('lt_reduce_sum', sum_lt.name) def test_drop_axis(self): sum_lt = ops.reduce_sum(self.original_lt, {'channel'}) golden_lt = core.LabeledTensor( math_ops.reduce_sum(self.original_lt.tensor, 1), [self.a0, self.a2, self.a3]) self.assertLabeledTensorsEqual(sum_lt, golden_lt) def test_drop_scalar_axis(self): sum_lt = ops.reduce_sum(self.original_lt, 'channel') golden_lt = core.LabeledTensor( math_ops.reduce_sum(self.original_lt.tensor, 1), [self.a0, self.a2, self.a3]) self.assertLabeledTensorsEqual(sum_lt, golden_lt) def test_keep_axis(self): sum_lt = ops.reduce_sum(self.original_lt, {('channel', 'hihowareyou')}) golden_lt = core.LabeledTensor( math_ops.reduce_sum( self.original_lt.tensor, 1, keepdims=True), [self.a0, ('channel', ['hihowareyou']), self.a2, self.a3]) self.assertLabeledTensorsEqual(sum_lt, golden_lt) def test_keep_scalar_axis(self): sum_lt = ops.reduce_sum(self.original_lt, ('channel', 'hihowareyou')) golden_lt = core.LabeledTensor( math_ops.reduce_sum( self.original_lt.tensor, 1, keepdims=True), [self.a0, ('channel', ['hihowareyou']), self.a2, self.a3]) self.assertLabeledTensorsEqual(sum_lt, golden_lt) def test_scalar(self): scalar_lt = core.LabeledTensor(constant_op.constant(42), []) reduce_lt = ops.reduce_sum(scalar_lt, []) self.assertLabeledTensorsEqual(reduce_lt, scalar_lt) def test_empty_list(self): reduce_lt = ops.reduce_sum(self.original_lt, []) self.assertLabeledTensorsEqual(reduce_lt, self.original_lt) def test_none(self): sum_lt = ops.reduce_sum(self.original_lt) golden_lt = core.LabeledTensor( math_ops.reduce_sum(self.original_lt.tensor), []) self.assertLabeledTensorsEqual(sum_lt, golden_lt) def test_function_docstring_and_name(self): self.assertIn('tf.reduce_sum', ops.reduce_sum.__doc__) self.assertEqual('reduce_sum', ops.reduce_sum.__name__) class ReduceMeanTest(Base): def test_name(self): actual_lt = ops.reduce_mean(self.original_lt, {'channel'}) self.assertIn('lt_reduce_mean', actual_lt.name) def test(self): actual_lt = ops.reduce_mean(self.original_lt, {'channel'}) golden_lt = core.LabeledTensor( math_ops.reduce_mean(self.original_lt.tensor, 1), [self.a0, self.a2, self.a3]) self.assertLabeledTensorsEqual(actual_lt, golden_lt) class ReduceProdTest(Base): def test_name(self): result_lt = ops.reduce_prod(self.original_lt, {'channel'}) self.assertIn('lt_reduce_prod', result_lt.name) def test(self): result_lt = ops.reduce_prod(self.original_lt, {'channel'}) golden_lt = core.LabeledTensor( math_ops.reduce_prod(self.original_lt.tensor, 1), [self.a0, self.a2, self.a3]) self.assertLabeledTensorsEqual(result_lt, golden_lt) class ReduceMinTest(Base): def test_name(self): result_lt = ops.reduce_min(self.original_lt, {'channel'}) self.assertIn('lt_reduce_min', result_lt.name) def test(self): result_lt = ops.reduce_min(self.original_lt, {'channel'}) golden_lt = core.LabeledTensor( math_ops.reduce_min(self.original_lt.tensor, 1), [self.a0, self.a2, self.a3]) self.assertLabeledTensorsEqual(result_lt, golden_lt) class ReduceMaxTest(Base): def test_name(self): result_lt = ops.reduce_max(self.original_lt, {'channel'}) self.assertIn('lt_reduce_max', result_lt.name) def test(self): result_lt = ops.reduce_max(self.original_lt, {'channel'}) golden_lt = core.LabeledTensor( math_ops.reduce_max(self.original_lt.tensor, 1), [self.a0, self.a2, self.a3]) self.assertLabeledTensorsEqual(result_lt, golden_lt) class BaseReduceBoolean(Base): def setUp(self): super(BaseReduceBoolean, self).setUp() self.bool_tensor = math_ops.cast(self.original_lt.tensor > 5, dtypes.bool) self.bool_lt = core.LabeledTensor(self.bool_tensor, self.original_lt.axes) class ReduceAllTest(BaseReduceBoolean): def test_name(self): result_lt = ops.reduce_all(self.bool_lt, {'channel'}) self.assertIn('lt_reduce_all', result_lt.name) def test(self): result_lt = ops.reduce_all(self.bool_lt, {'channel'}) golden_lt = core.LabeledTensor( math_ops.reduce_all(self.bool_tensor, 1), [self.a0, self.a2, self.a3]) self.assertLabeledTensorsEqual(result_lt, golden_lt) class ReduceAnyTest(BaseReduceBoolean): def test_name(self): result_lt = ops.reduce_any(self.bool_lt, {'channel'}) self.assertIn('lt_reduce_any', result_lt.name) def test(self): result_lt = ops.reduce_any(self.bool_lt, {'channel'}) golden_lt = core.LabeledTensor( math_ops.reduce_any(self.bool_tensor, 1), [self.a0, self.a2, self.a3]) self.assertLabeledTensorsEqual(result_lt, golden_lt) class TileTest(Base): def test_name(self): tile_lt = ops.tile(self.original_lt, {'z': 2}) self.assertIn('lt_tile', tile_lt.name) def test(self): for multiple in [2, constant_op.constant(2)]: tile_lt = ops.tile(self.original_lt, {'z': multiple}) golden_op = array_ops.tile(self.original_lt.tensor, [1, 1, multiple, 1]) golden_axes = [ 'z' if axis.name == 'z' else axis for axis in self.original_lt.axes.values() ] golden_lt = core.LabeledTensor(golden_op, golden_axes) self.assertLabeledTensorsEqual(tile_lt, golden_lt) def test_invalid_input(self): with self.assertRaisesRegexp(ValueError, 'are not contained in the set'): ops.tile(self.original_lt, {'foo': 5}) with self.assertRaisesRegexp(ValueError, 'axes with tick labels'): ops.tile(self.original_lt, {'x': 5}) class PadTest(Base): def test_name(self): pad_lt = ops.pad(self.original_lt, {'x': (1, 1), 'channel': ([], ['alpha'])}) self.assertIn('lt_pad', pad_lt.name) def test(self): pad_lt = ops.pad(self.original_lt, {'x': (1, 1), 'channel': ([], ['alpha'])}) golden_op = array_ops.pad(self.original_lt.tensor, [[1, 1], [0, 1], [0, 0], [0, 0]]) golden_axes = [('x', self.x_size + 2), ('channel', ['red', 'green', 'blue', 'alpha']), self.a2, self.a3] golden_lt = core.LabeledTensor(golden_op, golden_axes) self.assertLabeledTensorsEqual(pad_lt, golden_lt) def test_invalid_input(self): with self.assertRaisesRegexp(ValueError, 'are not contained in the set'): ops.pad(self.original_lt, {'foo': (1, 1), 'channel': ([], ['alpha'])}) class ConstantTest(Base): def test_name(self): constant_lt = ops.constant(1) self.assertIn('lt_constant', constant_lt.name) def test_scalar(self): constant_lt = ops.constant(1) golden_lt = core.LabeledTensor(constant_op.constant(1), []) self.assertLabeledTensorsEqual(constant_lt, golden_lt) def test_infer_shape(self): constant_lt = ops.constant([1, 2], axes=['x']) golden_lt = core.LabeledTensor(constant_op.constant([1, 2]), ['x']) self.assertLabeledTensorsEqual(constant_lt, golden_lt) def test_specify_shape(self): constant_lt = ops.constant(1, axes=[('x', 3)]) golden_lt = core.LabeledTensor(constant_op.constant(1, shape=(3,)), ['x']) self.assertLabeledTensorsEqual(constant_lt, golden_lt) def test_existing_axes(self): golden_lt = core.LabeledTensor(constant_op.constant([1, 2]), ['x']) constant_lt = ops.constant([1, 2], axes=golden_lt.axes) self.assertLabeledTensorsEqual(constant_lt, golden_lt) class ZerosLikeTest(Base): def test_name(self): like_lt = ops.zeros_like(self.original_lt) self.assertIn('lt_zeros_like', like_lt.name) def test(self): like_lt = ops.zeros_like(self.original_lt) golden_lt = core.LabeledTensor( array_ops.zeros_like(self.original_lt.tensor), self.original_lt.axes) self.assertLabeledTensorsEqual(like_lt, golden_lt) class OnesLikeTest(Base): def test_name(self): like_lt = ops.ones_like(self.original_lt) self.assertIn('lt_ones_like', like_lt.name) def test(self): like_lt = ops.ones_like(self.original_lt) golden_lt = core.LabeledTensor( array_ops.ones_like(self.original_lt.tensor), self.original_lt.axes) self.assertLabeledTensorsEqual(like_lt, golden_lt) class CastTest(Base): def test_name(self): cast_lt = ops.cast(self.original_lt, dtypes.float16) self.assertIn('lt_cast', cast_lt.name) def test(self): cast_lt = ops.cast(self.original_lt, dtypes.float16) golden_lt = core.LabeledTensor( math_ops.cast(self.original_lt.tensor, dtypes.float16), self.original_lt.axes) self.assertLabeledTensorsEqual(cast_lt, golden_lt) class VerifyTensorAllFiniteTest(Base): def setUp(self): super(VerifyTensorAllFiniteTest, self).setUp() self.finite_lt = core.LabeledTensor(constant_op.constant(42.0), []) self.nan_lt = core.LabeledTensor(constant_op.constant(np.nan), []) self.checked_finite_lt = ops.verify_tensor_all_finite(self.finite_lt, '') self.checked_nan_lt = ops.verify_tensor_all_finite(self.nan_lt, '') def test_name(self): self.assertIn('lt_verify_tensor_all_finite', self.checked_finite_lt.name) self.assertIn('lt_verify_tensor_all_finite', self.checked_nan_lt.name) def test_finite(self): self.assertLabeledTensorsEqual(self.finite_lt, self.checked_finite_lt) def test_nan(self): with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, 'Tensor had NaN values'): self.eval([self.checked_nan_lt]) class BooleanMaskTest(Base): def test_name(self): mask = core.LabeledTensor(math_ops.range(7) > 3, [self.a0]) masked_lt = ops.boolean_mask(self.original_lt, mask) self.assertIn('lt_boolean_mask', masked_lt.name) def test(self): mask = core.LabeledTensor(math_ops.range(7) > 3, [self.a0]) masked_lt = ops.boolean_mask(self.original_lt, mask) golden_lt = core.LabeledTensor( array_ops.boolean_mask(self.original_lt.tensor, mask.tensor), ['x', self.a1, self.a2, self.a3]) self.assertLabeledTensorsEqual(masked_lt, golden_lt) def test_invalid_rank(self): mask = core.LabeledTensor(array_ops.ones((7, 3)) > 3, [self.a0, self.a1]) with self.assertRaises(NotImplementedError): ops.boolean_mask(self.original_lt, mask) def test_mismatched_axis(self): mask = core.LabeledTensor(math_ops.range(7) > 3, ['foo']) with self.assertRaisesRegexp(ValueError, 'not equal'): ops.boolean_mask(self.original_lt, mask) class WhereTest(Base): def test_name(self): condition = core.LabeledTensor(math_ops.range(5) < 3, ['x']) where_lt = ops.where(condition, condition, condition) self.assertIn('lt_where', where_lt.name) def test(self): condition = core.LabeledTensor(math_ops.range(5) < 3, ['x']) x = core.LabeledTensor(array_ops.ones(5), ['x']) y = core.LabeledTensor(array_ops.zeros(5), ['x']) where_lt = ops.where(condition, x, y) golden_lt = core.LabeledTensor( array_ops.concat([array_ops.ones(3), array_ops.zeros(2)], 0), ['x']) self.assertLabeledTensorsEqual(where_lt, golden_lt) def test_mismatched_axes(self): condition = core.LabeledTensor(math_ops.range(5) < 3, ['x']) with self.assertRaisesRegexp(ValueError, 'equal axes'): ops.where(condition, condition[:3], condition) with self.assertRaisesRegexp(ValueError, 'equal axes'): ops.where(condition, condition, condition[:3]) if __name__ == '__main__': test_lib.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/labeled_tensor/python/ops/ops_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """RNN Cells and additional RNN operations. <!--From core--> @@RNNCell @@LayerRNNCell @@BasicRNNCell @@BasicLSTMCell @@GRUCell @@LSTMCell @@LSTMStateTuple @@DropoutWrapper @@MultiRNNCell @@DeviceWrapper @@ResidualWrapper <!--Used to be in core, but kept in contrib.--> @@EmbeddingWrapper @@InputProjectionWrapper @@OutputProjectionWrapper <!--Created in contrib, eventual plans to move to core.--> @@LayerNormBasicLSTMCell @@LSTMBlockWrapper @@LSTMBlockCell @@GRUBlockCell @@GRUBlockCellV2 @@FusedRNNCell @@FusedRNNCellAdaptor @@TimeReversedFusedRNN @@LSTMBlockFusedCell @@CoupledInputForgetGateLSTMCell @@TimeFreqLSTMCell @@GridLSTMCell @@BidirectionalGridLSTMCell @@NASCell @@UGRNNCell @@IntersectionRNNCell @@PhasedLSTMCell @@ConvLSTMCell @@Conv1DLSTMCell @@Conv2DLSTMCell @@Conv3DLSTMCell @@HighwayWrapper @@GLSTMCell @@SRUCell @@IndRNNCell @@IndyGRUCell @@IndyLSTMCell <!--RNNCell wrappers--> @@AttentionCellWrapper @@CompiledWrapper <!--RNN functions--> @@static_rnn @@static_state_saving_rnn @@static_bidirectional_rnn @@stack_bidirectional_dynamic_rnn @@stack_bidirectional_rnn <!--RNN utilities--> @@transpose_batch_time @@best_effort_input_batch_size """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import,wildcard-import,line-too-long from tensorflow.contrib.rnn.python.ops.core_rnn_cell import EmbeddingWrapper from tensorflow.contrib.rnn.python.ops.core_rnn_cell import InputProjectionWrapper from tensorflow.contrib.rnn.python.ops.core_rnn_cell import OutputProjectionWrapper from tensorflow.contrib.rnn.python.ops.fused_rnn_cell import * from tensorflow.contrib.rnn.python.ops.gru_ops import * from tensorflow.contrib.rnn.python.ops.lstm_ops import * from tensorflow.contrib.rnn.python.ops.rnn import * from tensorflow.contrib.rnn.python.ops.rnn_cell import * from tensorflow.python.ops.rnn import _best_effort_input_batch_size as best_effort_input_batch_size from tensorflow.python.ops.rnn import _transpose_batch_time as transpose_batch_time from tensorflow.python.ops.rnn import static_bidirectional_rnn from tensorflow.python.ops.rnn import static_rnn from tensorflow.python.ops.rnn import static_state_saving_rnn from tensorflow.python.ops.rnn_cell import * # pylint: enable=unused-import,wildcard-import,line-too-long from tensorflow.python.util.all_util import remove_undocumented remove_undocumented(__name__)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/__init__.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ops module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/python/__init__.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Convert checkpoints using RNNCells to new name convention. Usage: python checkpoint_convert.py [--write_v1_checkpoint] \ '/path/to/checkpoint' '/path/to/new_checkpoint' For example, if there is a V2 checkpoint to be converted and the files include: /tmp/my_checkpoint/model.ckpt.data-00000-of-00001 /tmp/my_checkpoint/model.ckpt.index /tmp/my_checkpoint/model.ckpt.meta use the following command: mkdir /tmp/my_converted_checkpoint && python checkpoint_convert.py \ /tmp/my_checkpoint/model.ckpt /tmp/my_converted_checkpoint/model.ckpt This will generate three converted checkpoint files corresponding to the three old ones in the new directory: /tmp/my_converted_checkpoint/model.ckpt.data-00000-of-00001 /tmp/my_converted_checkpoint/model.ckpt.index /tmp/my_converted_checkpoint/model.ckpt.meta """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import collections import re import sys from tensorflow.core.protobuf import saver_pb2 from tensorflow.python import pywrap_tensorflow from tensorflow.python.client import session from tensorflow.python.framework import ops from tensorflow.python.ops import variables from tensorflow.python.platform import app from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import saver as saver_lib # Mapping between old <=> new names. Externalized so that user scripts that # may need to consume multiple checkpoint formats can use this metadata. RNN_NAME_REPLACEMENTS = collections.OrderedDict([ ############################################################################ # contrib/rnn/python/ops/core_rnn_cell_impl.py # BasicRNNCell ('basic_rnn_cell/weights', 'basic_rnn_cell/kernel'), ('basic_rnn_cell/biases', 'basic_rnn_cell/bias'), # GRUCell ('gru_cell/weights', 'gru_cell/kernel'), ('gru_cell/biases', 'gru_cell/bias'), ('gru_cell/gates/weights', 'gru_cell/gates/kernel'), ('gru_cell/gates/biases', 'gru_cell/gates/bias'), ('gru_cell/candidate/weights', 'gru_cell/candidate/kernel'), ('gru_cell/candidate/biases', 'gru_cell/candidate/bias'), # BasicLSTMCell ('basic_lstm_cell/weights', 'basic_lstm_cell/kernel'), ('basic_lstm_cell/biases', 'basic_lstm_cell/bias'), # LSTMCell ('lstm_cell/weights', 'lstm_cell/kernel'), ('lstm_cell/biases', 'lstm_cell/bias'), ('lstm_cell/projection/weights', 'lstm_cell/projection/kernel'), ('lstm_cell/projection/biases', 'lstm_cell/projection/bias'), # OutputProjectionWrapper ('output_projection_wrapper/weights', 'output_projection_wrapper/kernel'), ('output_projection_wrapper/biases', 'output_projection_wrapper/bias'), # InputProjectionWrapper ('input_projection_wrapper/weights', 'input_projection_wrapper/kernel'), ('input_projection_wrapper/biases', 'input_projection_wrapper/bias'), ############################################################################ # contrib/rnn/python/ops/lstm_ops.py # LSTMBlockFusedCell ?? ('lstm_block_wrapper/weights', 'lstm_block_wrapper/kernel'), ('lstm_block_wrapper/biases', 'lstm_block_wrapper/bias'), ############################################################################ # contrib/rnn/python/ops/rnn_cell.py # LayerNormBasicLSTMCell ('layer_norm_basic_lstm_cell/weights', 'layer_norm_basic_lstm_cell/kernel'), ('layer_norm_basic_lstm_cell/biases', 'layer_norm_basic_lstm_cell/bias'), # UGRNNCell, not found in g3, but still need it? ('ugrnn_cell/weights', 'ugrnn_cell/kernel'), ('ugrnn_cell/biases', 'ugrnn_cell/bias'), # NASCell ('nas_rnn/weights', 'nas_rnn/kernel'), ('nas_rnn/recurrent_weights', 'nas_rnn/recurrent_kernel'), # IntersectionRNNCell ('intersection_rnn_cell/weights', 'intersection_rnn_cell/kernel'), ('intersection_rnn_cell/biases', 'intersection_rnn_cell/bias'), ('intersection_rnn_cell/in_projection/weights', 'intersection_rnn_cell/in_projection/kernel'), ('intersection_rnn_cell/in_projection/biases', 'intersection_rnn_cell/in_projection/bias'), # PhasedLSTMCell ('phased_lstm_cell/mask_gates/weights', 'phased_lstm_cell/mask_gates/kernel'), ('phased_lstm_cell/mask_gates/biases', 'phased_lstm_cell/mask_gates/bias'), ('phased_lstm_cell/new_input/weights', 'phased_lstm_cell/new_input/kernel'), ('phased_lstm_cell/new_input/biases', 'phased_lstm_cell/new_input/bias'), ('phased_lstm_cell/output_gate/weights', 'phased_lstm_cell/output_gate/kernel'), ('phased_lstm_cell/output_gate/biases', 'phased_lstm_cell/output_gate/bias'), # AttentionCellWrapper ('attention_cell_wrapper/weights', 'attention_cell_wrapper/kernel'), ('attention_cell_wrapper/biases', 'attention_cell_wrapper/bias'), ('attention_cell_wrapper/attn_output_projection/weights', 'attention_cell_wrapper/attn_output_projection/kernel'), ('attention_cell_wrapper/attn_output_projection/biases', 'attention_cell_wrapper/attn_output_projection/bias'), ('attention_cell_wrapper/attention/weights', 'attention_cell_wrapper/attention/kernel'), ('attention_cell_wrapper/attention/biases', 'attention_cell_wrapper/attention/bias'), ############################################################################ # contrib/legacy_seq2seq/python/ops/seq2seq.py ('attention_decoder/weights', 'attention_decoder/kernel'), ('attention_decoder/biases', 'attention_decoder/bias'), ('attention_decoder/Attention_0/weights', 'attention_decoder/Attention_0/kernel'), ('attention_decoder/Attention_0/biases', 'attention_decoder/Attention_0/bias'), ('attention_decoder/AttnOutputProjection/weights', 'attention_decoder/AttnOutputProjection/kernel'), ('attention_decoder/AttnOutputProjection/biases', 'attention_decoder/AttnOutputProjection/bias'), # contrib/legacy_seq2seq/python/ops/seq2seq.py before cl/140060366 ('attention_decoder/Attention_0/Linear/Bias', 'attention_decoder/Attention_0/bias'), ('attention_decoder/Attention_0/Linear/Matrix', 'attention_decoder/Attention_0/kernel'), ('attention_decoder/AttnOutputProjection/Linear/Bias', 'attention_decoder/AttnOutputProjection/bias'), ('attention_decoder/AttnOutputProjection/Linear/Matrix', 'attention_decoder/AttnOutputProjection/kernel'), ('attention_decoder/LSTMCell/B', 'attention_decoder/lstm_cell/bias'), ('attention_decoder/LSTMCell/W_0', 'attention_decoder/lstm_cell/kernel'), ('attention_decoder/Linear/Bias', 'attention_decoder/bias'), ('attention_decoder/Linear/Matrix', 'attention_decoder/kernel') ]) _RNN_SHARDED_NAME_REPLACEMENTS = collections.OrderedDict([ ('LSTMCell/W_', 'lstm_cell/weights/part_'), ('BasicLSTMCell/Linear/Matrix_', 'basic_lstm_cell/weights/part_'), ('GRUCell/W_', 'gru_cell/weights/part_'), ('MultiRNNCell/Cell', 'multi_rnn_cell/cell_'), ]) def _rnn_name_replacement(var_name): for pattern in RNN_NAME_REPLACEMENTS: if pattern in var_name: old_var_name = var_name var_name = var_name.replace(pattern, RNN_NAME_REPLACEMENTS[pattern]) logging.info('Converted: %s --> %s' % (old_var_name, var_name)) break return var_name def _rnn_name_replacement_sharded(var_name): for pattern in _RNN_SHARDED_NAME_REPLACEMENTS: if pattern in var_name: old_var_name = var_name var_name = var_name.replace(pattern, _RNN_SHARDED_NAME_REPLACEMENTS[pattern]) logging.info('Converted: %s --> %s' % (old_var_name, var_name)) return var_name def _split_sharded_vars(name_shape_map): """Split shareded variables. Args: name_shape_map: A dict from variable name to variable shape. Returns: not_sharded: Names of the non-sharded variables. sharded: Names of the sharded variables. """ sharded = [] not_sharded = [] for name in name_shape_map: if re.match(name, '_[0-9]+$'): if re.sub('_[0-9]+$', '_1', name) in name_shape_map: sharded.append(name) else: not_sharded.append(name) else: not_sharded.append(name) return not_sharded, sharded def convert_names(checkpoint_from_path, checkpoint_to_path, write_v1_checkpoint=False): """Migrates the names of variables within a checkpoint. Args: checkpoint_from_path: Path to source checkpoint to be read in. checkpoint_to_path: Path to checkpoint to be written out. write_v1_checkpoint: Whether the output checkpoint will be in V1 format. Returns: A dictionary that maps the new variable names to the Variable objects. A dictionary that maps the old variable names to the new variable names. """ with ops.Graph().as_default(): logging.info('Reading checkpoint_from_path %s' % checkpoint_from_path) reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_from_path) name_shape_map = reader.get_variable_to_shape_map() not_sharded, sharded = _split_sharded_vars(name_shape_map) new_variable_map = {} conversion_map = {} for var_name in not_sharded: new_var_name = _rnn_name_replacement(var_name) tensor = reader.get_tensor(var_name) var = variables.Variable(tensor, name=var_name) new_variable_map[new_var_name] = var if new_var_name != var_name: conversion_map[var_name] = new_var_name for var_name in sharded: new_var_name = _rnn_name_replacement_sharded(var_name) var = variables.Variable(tensor, name=var_name) new_variable_map[new_var_name] = var if new_var_name != var_name: conversion_map[var_name] = new_var_name write_version = (saver_pb2.SaverDef.V1 if write_v1_checkpoint else saver_pb2.SaverDef.V2) saver = saver_lib.Saver(new_variable_map, write_version=write_version) with session.Session() as sess: sess.run(variables.global_variables_initializer()) logging.info('Writing checkpoint_to_path %s' % checkpoint_to_path) saver.save(sess, checkpoint_to_path) logging.info('Summary:') logging.info(' Converted %d variable name(s).' % len(new_variable_map)) return new_variable_map, conversion_map def main(_): convert_names( FLAGS.checkpoint_from_path, FLAGS.checkpoint_to_path, write_v1_checkpoint=FLAGS.write_v1_checkpoint) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.register('type', 'bool', lambda v: v.lower() == 'true') parser.add_argument('checkpoint_from_path', type=str, help='Path to source checkpoint to be read in.') parser.add_argument('checkpoint_to_path', type=str, help='Path to checkpoint to be written out.') parser.add_argument('--write_v1_checkpoint', action='store_true', help='Write v1 checkpoint') FLAGS, unparsed = parser.parse_known_args() app.run(main=main, argv=[sys.argv[0]] + unparsed)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/python/tools/checkpoint_convert.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for checkpoint converter.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import glob import os import tempfile from tensorflow.contrib.rnn.python.tools import checkpoint_convert from tensorflow.python.client import session from tensorflow.python.framework import ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import saver as saver_lib class CheckpointConvertTest(test.TestCase): def setUp(self): self._old_ckpt_path = tempfile.mktemp() self._new_ckpt_path = tempfile.mktemp() ops.reset_default_graph() def tearDown(self): for file_name in glob.glob(self._old_ckpt_path + "*"): os.remove(file_name) for file_name in glob.glob(self._new_ckpt_path + "*"): os.remove(file_name) def testReplacementDictsContainUniqueAndNonEmptyVariableNames(self): for old_name in checkpoint_convert.RNN_NAME_REPLACEMENTS: new_name = checkpoint_convert.RNN_NAME_REPLACEMENTS[old_name] self.assertTrue(old_name) self.assertTrue(new_name) self.assertNotEqual(old_name, new_name) for old_name in checkpoint_convert._RNN_SHARDED_NAME_REPLACEMENTS: new_name = checkpoint_convert._RNN_SHARDED_NAME_REPLACEMENTS[old_name] self.assertTrue(old_name) self.assertTrue(new_name) self.assertNotEqual(old_name, new_name) def testConversionFromV2WithConvertedVariableNamesSucceeds(self): variables.Variable(10.0, name="a") for old_name in checkpoint_convert.RNN_NAME_REPLACEMENTS: variables.Variable(20.0, name=old_name) with session.Session() as sess: saver = saver_lib.Saver() sess.run(variables.global_variables_initializer()) saver.save(sess, self._old_ckpt_path) new_var_map, conversion_map = checkpoint_convert.convert_names( self._old_ckpt_path, self._new_ckpt_path) self.assertTrue(glob.glob(self._new_ckpt_path + "*")) self.assertItemsEqual( set(checkpoint_convert.RNN_NAME_REPLACEMENTS.values()).union(["a"]), new_var_map.keys()) self.assertEqual(checkpoint_convert.RNN_NAME_REPLACEMENTS, conversion_map) def testConversionFromV2WithoutConvertedVariableNamesSucceeds(self): variables.Variable(10.0, name="a") with session.Session() as sess: saver = saver_lib.Saver() sess.run(variables.global_variables_initializer()) saver.save(sess, self._old_ckpt_path) new_var_map, conversion_map = checkpoint_convert.convert_names( self._old_ckpt_path, self._new_ckpt_path) self.assertItemsEqual(["a"], new_var_map.keys()) self.assertFalse(conversion_map) def testConversionToV1Succeeds(self): variables.Variable(10.0, name="a") variables.Variable( 20.0, name=list(checkpoint_convert.RNN_NAME_REPLACEMENTS.keys())[-1]) with session.Session() as sess: saver = saver_lib.Saver() sess.run(variables.global_variables_initializer()) saver.save(sess, self._old_ckpt_path) new_var_map, conversion_map = checkpoint_convert.convert_names( self._old_ckpt_path, self._new_ckpt_path, write_v1_checkpoint=True) self.assertItemsEqual( ["a", list(checkpoint_convert.RNN_NAME_REPLACEMENTS.values())[-1]], new_var_map.keys()) self.assertEqual( {list(checkpoint_convert.RNN_NAME_REPLACEMENTS.keys())[-1]: list(checkpoint_convert.RNN_NAME_REPLACEMENTS.values())[-1]}, conversion_map) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/python/tools/checkpoint_convert_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Block GRU module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.rnn.python.kernel_tests import benchmarking from tensorflow.contrib.rnn.python.ops import gru_ops from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import rnn from tensorflow.python.ops import rnn_cell from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent class GRUBlockCellTest(test.TestCase): def testNoneDimsWithDynamicRNN(self): with self.session(use_gpu=True, graph=ops.Graph()) as sess: batch_size = 4 cell_size = 5 input_size = 6 num_steps = 7 cell = gru_ops.GRUBlockCell(cell_size) x = array_ops.placeholder(dtypes.float32, shape=(None, None, input_size)) _, output = rnn.dynamic_rnn( cell, x, time_major=True, dtype=dtypes.float32) sess.run(variables.global_variables_initializer()) feed = {} feed[x] = np.random.randn(num_steps, batch_size, input_size) sess.run(output, feed) def testBlockGRUToGRUCellSingleStep(self): with self.session(use_gpu=True, graph=ops.Graph()) as sess: batch_size = 4 cell_size = 5 input_size = 6 seed = 1994 initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed) # Inputs x = array_ops.zeros([batch_size, input_size]) h = array_ops.zeros([batch_size, cell_size]) # Values for the inputs. x_value = np.random.rand(batch_size, input_size) h_value = np.random.rand(batch_size, cell_size) # Output from the basic GRU cell implementation. with vs.variable_scope("basic", initializer=initializer): output = rnn_cell.GRUCell(cell_size)(x, h) sess.run([variables.global_variables_initializer()]) basic_res = sess.run([output], {x: x_value, h: h_value}) # Output from the block GRU cell implementation. with vs.variable_scope("block", initializer=initializer): output = gru_ops.GRUBlockCell(cell_size)(x, h) sess.run([variables.global_variables_initializer()]) block_res = sess.run([output], {x: x_value, h: h_value}) self.assertEqual(len(block_res), len(basic_res)) for block, basic in zip(block_res, basic_res): self.assertAllClose(block, basic) def testBlockGRUToGRUCellMultiStep(self): with self.session(use_gpu=True, graph=ops.Graph()) as sess: batch_size = 2 cell_size = 3 input_size = 3 time_steps = 4 # Random initializers. seed = 1994 initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed) np.random.seed(seed) # Inputs concat_x = array_ops.placeholder( dtypes.float32, shape=(time_steps, batch_size, input_size)) h = array_ops.zeros([batch_size, cell_size]) # Values for the inputs. x_values = np.random.rand(time_steps, batch_size, input_size) h_value = np.random.rand(batch_size, cell_size) # Output from the block GRU cell implementation. with vs.variable_scope("block", initializer=initializer): cell = gru_ops.GRUBlockCell(cell_size) outputs_dynamic, state_dynamic = rnn.dynamic_rnn( cell, inputs=concat_x, initial_state=h, time_major=True, dtype=dtypes.float32) feeds = {concat_x: x_values, h: h_value} sess.run([variables.global_variables_initializer()]) block_res = sess.run([outputs_dynamic, state_dynamic], feeds) # Output from the basic GRU cell implementation. with vs.variable_scope("basic", initializer=initializer): cell = rnn_cell.GRUCell(cell_size) outputs_dynamic, state_dynamic = rnn.dynamic_rnn( cell, inputs=concat_x, initial_state=h, time_major=True, dtype=dtypes.float32) feeds = {concat_x: x_values, h: h_value} sess.run([variables.global_variables_initializer()]) basic_res = sess.run([outputs_dynamic, state_dynamic], feeds) # Check the lengths of the outputs_dynamic, and states. self.assertEqual(len(block_res), len(basic_res)) self.assertEqual(len(block_res[0]), len(basic_res[0])) self.assertEqual(len(block_res[1]), len(basic_res[1])) # Check the outputs_dynamic values. for block_output, basic_output in zip(block_res[0], basic_res[0]): self.assertAllClose(block_output, basic_output) # Check the state_dynamic value. self.assertAllClose(block_res[1], block_res[1]) def testDerivativeOfBlockGRUToGRUCellSingleStep(self): with self.session(use_gpu=True, graph=ops.Graph()) as sess: batch_size = 2 cell_size = 3 input_size = 4 seed = 1994 initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed) np.random.seed(seed) # Inputs x = array_ops.zeros([batch_size, input_size]) h = array_ops.zeros([batch_size, cell_size]) # Values for the inputs. x_value = np.random.rand(batch_size, input_size) h_value = np.random.rand(batch_size, cell_size) # Gradients from the block GRU cell implementation. with vs.variable_scope("block", initializer=initializer): output = gru_ops.GRUBlockCell(cell_size)(x, h) sess.run([variables.global_variables_initializer()]) all_variables = variables.global_variables()[0:4] [w_ru, b_ru, w_c, b_c] = all_variables d_new_h_wrt_x = gradients_impl.gradients([output], x) d_new_h_wrt_h = gradients_impl.gradients([output], h) d_new_h_wrt_w_ru = gradients_impl.gradients([output], w_ru) d_new_h_wrt_w_c = gradients_impl.gradients([output], w_c) d_new_h_wrt_b_ru = gradients_impl.gradients([output], b_ru) d_new_h_wrt_b_c = gradients_impl.gradients([output], b_c) d_block_res = sess.run([ d_new_h_wrt_x, d_new_h_wrt_h, d_new_h_wrt_w_ru, d_new_h_wrt_w_c, d_new_h_wrt_b_ru, d_new_h_wrt_b_c ], {x: x_value, h: h_value}) # Gradients from the basic GRU cell implementation. with vs.variable_scope("basic", initializer=initializer): output = rnn_cell.GRUCell(cell_size)(x, h) sess.run([variables.global_variables_initializer()]) all_variables = variables.global_variables()[4:8] [w_ru, b_ru, w_c, b_c] = all_variables d_new_h_wrt_x = gradients_impl.gradients([output], x) d_new_h_wrt_h = gradients_impl.gradients([output], h) d_new_h_wrt_w_ru = gradients_impl.gradients([output], w_ru) d_new_h_wrt_w_c = gradients_impl.gradients([output], w_c) d_new_h_wrt_b_ru = gradients_impl.gradients([output], b_ru) d_new_h_wrt_b_c = gradients_impl.gradients([output], b_c) d_basic_res = sess.run([ d_new_h_wrt_x, d_new_h_wrt_h, d_new_h_wrt_w_ru, d_new_h_wrt_w_c, d_new_h_wrt_b_ru, d_new_h_wrt_b_c ], {x: x_value, h: h_value}) # Check lengths of derivative results. self.assertEqual(len(d_block_res), len(d_basic_res)) # Check the value of every derivative result. for block, basic in zip(d_block_res, d_basic_res): self.assertAllClose(block, basic) def testDerivativeOfBlockGRUToGRUCellMultiSteps(self): batch_size = 2 cell_size = 3 input_size = 4 time_steps = 2 with self.session(use_gpu=True, graph=ops.Graph()) as sess: # Random initializers. seed = 1994 initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed) np.random.seed(seed) # Inputs concat_x = array_ops.placeholder( dtypes.float32, shape=(time_steps, batch_size, input_size)) h = array_ops.zeros([batch_size, cell_size]) # Values for the inputs. x_values = np.random.rand(time_steps, batch_size, input_size) h_value = np.random.rand(batch_size, cell_size) feeds = {concat_x: x_values, h: h_value} # Gradients from the block GRU cell implementation. with vs.variable_scope("block", initializer=initializer): cell = gru_ops.GRUBlockCell(cell_size) outputs_dynamic, _ = rnn.dynamic_rnn( cell, inputs=concat_x, initial_state=h, time_major=True, dtype=dtypes.float32) grad_output_wrt_x = gradients_impl.gradients([outputs_dynamic[0]], concat_x) grad_output_wrt_h = gradients_impl.gradients([outputs_dynamic[0]], h) sess.run([variables.global_variables_initializer()]) block_grad_res_x, block_grad_res_h = sess.run( [grad_output_wrt_x, grad_output_wrt_h], feeds) # Gradients from the basic GRU cell implementation. with vs.variable_scope("basic", initializer=initializer): cell = rnn_cell.GRUCell(cell_size) outputs_dynamic, _ = rnn.dynamic_rnn( cell, inputs=concat_x, initial_state=h, time_major=True, dtype=dtypes.float32) grad_output_wrt_x = gradients_impl.gradients([outputs_dynamic[0]], concat_x) grad_output_wrt_h = gradients_impl.gradients([outputs_dynamic[0]], h) sess.run([variables.global_variables_initializer()]) basic_grad_res_x, basic_grad_res_h = sess.run( [grad_output_wrt_x, grad_output_wrt_h], feeds) # Check derivatives values of the outputs wrt to x. self.assertEqual(len(block_grad_res_x), len(basic_grad_res_x)) # Check derivatives values of the outputs wrt to h. for block, basic in zip(block_grad_res_x, basic_grad_res_x): self.assertAllClose(block, basic) # Check derivatives values of the outputs wrt to x. self.assertEqual(len(block_grad_res_h), len(basic_grad_res_h)) # Check derivatives values of the outputs wrt to h. for block, basic in zip(block_grad_res_h, basic_grad_res_h): self.assertAllClose(block, basic) def testGradient(self): with self.session(use_gpu=True, graph=ops.Graph()) as sess: batch_size = 1 cell_size = 3 input_size = 2 # Inputs x = array_ops.zeros([batch_size, input_size]) h = array_ops.zeros([batch_size, cell_size]) output = gru_ops.GRUBlockCell(cell_size)(x, h) sess.run([variables.global_variables_initializer()]) all_variables = variables.global_variables() [w_ru, b_ru, w_c, b_c] = all_variables[:4] error_x = gradient_checker.compute_gradient_error( x, (batch_size, input_size), output[0], (batch_size, cell_size)) error_h = gradient_checker.compute_gradient_error(h, (batch_size, cell_size), output[0], (batch_size, cell_size)) error_w_ru = gradient_checker.compute_gradient_error( w_ru, (input_size + cell_size, 2 * cell_size), output[0], (batch_size, cell_size)) error_w_c = gradient_checker.compute_gradient_error( w_c, (input_size + cell_size, cell_size), output[0], (batch_size, cell_size)) error_b_ru = gradient_checker.compute_gradient_error( b_ru, (2 * cell_size,), output[0], (batch_size, cell_size)) error_b_c = gradient_checker.compute_gradient_error( b_c, (cell_size,), output[0], (batch_size, cell_size)) eps = 1e-4 self.assertLess(error_x, eps) self.assertLess(error_h, eps) self.assertLess(error_w_ru, eps) self.assertLess(error_w_c, eps) self.assertLess(error_b_ru, eps) self.assertLess(error_b_c, eps) #### Benchmarking GRUBlockCell vs GRUCell. def training_gru_block_vs_gru_cell(batch_size, cell_size, input_size, time_steps, use_gpu=False, iters=30): """Benchmark training speed between GRUBlockCell vs GRUCell.""" ops.reset_default_graph() with session.Session(graph=ops.Graph()) as sess: # Specify the device which is been used. with benchmarking.device(use_gpu): # Random initializers. seed = 1994 initializer = init_ops.random_uniform_initializer(-1, 1, seed=seed) np.random.seed(seed) # Inputs concat_x = vs.get_variable("concat_x", [time_steps, batch_size, input_size]) h = vs.get_variable("h", [batch_size, cell_size]) y = vs.get_variable("y", [time_steps, batch_size, cell_size]) # Output from the basic GRU cell implementation. with vs.variable_scope("basic", initializer=initializer): cell = rnn_cell.GRUCell(cell_size) outputs_dynamic, _ = rnn.dynamic_rnn( cell, inputs=concat_x, initial_state=h, time_major=True, dtype=dtypes.float32) sess.run([variables.global_variables_initializer()]) cost = math_ops.reduce_mean(math_ops.square(outputs_dynamic - y)) learning_rate = 0.01 optimizer = gradient_descent.GradientDescentOptimizer( learning_rate).minimize(cost) # time for a training step. basic_time_training = benchmarking.seconds_per_run( optimizer, sess, iters) # Output from the basic GRU cell implementation. with vs.variable_scope("block", initializer=initializer): cell = gru_ops.GRUBlockCell(cell_size) outputs_dynamic, _ = rnn.dynamic_rnn( cell, inputs=concat_x, initial_state=h, time_major=True, dtype=dtypes.float32) sess.run([variables.global_variables_initializer()]) cost = math_ops.reduce_mean(math_ops.square(outputs_dynamic - y)) learning_rate = 0.01 optimizer = gradient_descent.GradientDescentOptimizer( learning_rate).minimize(cost) # time for a training step. block_time_training = benchmarking.seconds_per_run( optimizer, sess, iters) performance_training = ( basic_time_training - block_time_training) * 100 / basic_time_training print(",".join([ str(batch_size), str(cell_size), str(input_size), str(time_steps), str( use_gpu), str(basic_time_training), str(block_time_training), str( performance_training) ])) return basic_time_training, block_time_training def inference_gru_block_vs_gru_cell(batch_size, cell_size, input_size, time_steps, use_gpu=False, iters=30): """Benchmark inference speed between GRUBlockCell vs GRUCell.""" ops.reset_default_graph() with session.Session(graph=ops.Graph()) as sess: with benchmarking.device(use_gpu): # Random initializers. seed = 1994 initializer = init_ops.random_uniform_initializer(-1, 1, seed=seed) np.random.seed(seed) # Inputs concat_x = vs.get_variable("concat_x", [time_steps, batch_size, input_size]) h = vs.get_variable("h", [batch_size, cell_size]) # Output from the basic GRU cell implementation. with vs.variable_scope("basic", initializer=initializer): cell = rnn_cell.GRUCell(cell_size) outputs_dynamic, _ = rnn.dynamic_rnn( cell, inputs=concat_x, initial_state=h, time_major=True, dtype=dtypes.float32) sess.run([variables.global_variables_initializer()]) basic_time_inference = benchmarking.seconds_per_run( outputs_dynamic, sess, iters) # Output from the block GRU cell implementation. with vs.variable_scope("block", initializer=initializer): cell = gru_ops.GRUBlockCell(cell_size) outputs_dynamic, _ = rnn.dynamic_rnn( cell, inputs=concat_x, initial_state=h, time_major=True, dtype=dtypes.float32) sess.run([variables.global_variables_initializer()]) block_time_inference = benchmarking.seconds_per_run( outputs_dynamic, sess, iters) performance_inference = (basic_time_inference - block_time_inference ) * 100 / basic_time_inference print(",".join([ str(batch_size), str(cell_size), str(input_size), str(time_steps), str( use_gpu), str(basic_time_inference), str(block_time_inference), str( performance_inference) ])) return basic_time_inference, block_time_inference def single_bprop_step_gru_block_vs_gru_cell(batch_size, cell_size, input_size, use_gpu=False, iters=30): """Benchmark single bprop step speed between GRUBlockCell vs GRUCell.""" ops.reset_default_graph() with session.Session(graph=ops.Graph()) as sess: with benchmarking.device(use_gpu): initializer = init_ops.random_uniform_initializer(-1, 1, seed=1989) # Inputs x = vs.get_variable("x", [batch_size, input_size]) h = vs.get_variable("h", [batch_size, cell_size]) # Output from the basic GRU cell implementation. with vs.variable_scope("basic", initializer=initializer): output = rnn_cell.GRUCell(cell_size)(array_ops.identity(x), array_ops.identity(h)) sess.run([variables.global_variables_initializer()]) grad_output_wrt_input = gradients_impl.gradients([output], h) basic_time_bprop = benchmarking.seconds_per_run(grad_output_wrt_input, sess, iters) # Output from the block GRU cell implementation. with vs.variable_scope("block", initializer=initializer): output = gru_ops.GRUBlockCell(cell_size)(array_ops.identity(x), array_ops.identity(h)) sess.run([variables.global_variables_initializer()]) grad_output_wrt_input = gradients_impl.gradients([output], h) block_time_bprop = benchmarking.seconds_per_run(grad_output_wrt_input, sess, iters) performance_inference = ( basic_time_bprop - block_time_bprop) * 100 / basic_time_bprop print(",".join([ str(batch_size), str(cell_size), str(input_size), str(use_gpu), str( basic_time_bprop), str(block_time_bprop), str(performance_inference) ])) return basic_time_bprop, block_time_bprop class BenchmarkGRUBlock(test.Benchmark): def benchmarkTrainingBlockGRUVsGRUCell(self): print("Comparison GRUBlockCell vs GRUCell") print("--------------------------------------------------------------") print("Training speed GRUBlockCell vs GRUCell") print("batch_size, cell_size, input_size, time_steps, GPU, " "basic_time_training, block_time_training, performance_training[%]") iters = 10 for config in benchmarking.dict_product({ "use_gpu": [True, False], "batch_size": [1, 32, 128], "cell_size": [128, 512], "input_size": [128, 512], "time_steps": [50] }): basic_time, block_time = training_gru_block_vs_gru_cell( config["batch_size"], config["cell_size"], config["input_size"], config["time_steps"], config["use_gpu"], iters) self.report_benchmark( name="GRUCell_training_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" % (config["batch_size"], config["cell_size"], config["input_size"], config["time_steps"], config["use_gpu"]), iters=iters, wall_time=basic_time) self.report_benchmark( name="GRUBlockCell_training_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" % (config["batch_size"], config["cell_size"], config["input_size"], config["time_steps"], config["use_gpu"]), iters=iters, wall_time=block_time) def benchmarkInferenceBlockGRUVsGRUCell(self): print("--------------------------------------------------------------") print("Inference speed GRUBlockCell vs GRUCell") print( "batch_size, cell_size, input_size, time_steps, GPU, " "basic_time_inference, block_time_inference, performance_inference[%]") iters = 10 for config in benchmarking.dict_product({ "use_gpu": [True, False], "batch_size": [1, 32, 128], "cell_size": [128, 512], "input_size": [128, 512], "time_steps": [50] }): basic_time, block_time = inference_gru_block_vs_gru_cell( config["batch_size"], config["cell_size"], config["input_size"], config["time_steps"], config["use_gpu"], iters) self.report_benchmark( name="GRUCell_inference_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" % (config["batch_size"], config["cell_size"], config["input_size"], config["time_steps"], config["use_gpu"]), iters=iters, wall_time=basic_time) self.report_benchmark( name="GRUBlockCell_inference_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" % (config["batch_size"], config["cell_size"], config["input_size"], config["time_steps"], config["use_gpu"]), iters=iters, wall_time=block_time) def benchmarkSingleBpropStepBlockGRUVsGRUCell(self): print("--------------------------------------------------------------") print("Single bprop step speed GRUBlockCell vs GRUCell") print("batch_size, cell_size, input_size, GPU, basic_time, " "block_time, performance_inference[%]") iters = 10 for config in benchmarking.dict_product({ "use_gpu": [True, False], "batch_size": [1, 32, 128], "cell_size": [128, 512], "input_size": [128, 512] }): basic_time, block_time = single_bprop_step_gru_block_vs_gru_cell( config["batch_size"], config["cell_size"], config["input_size"], config["use_gpu"], iters) self.report_benchmark( name="GRUCell_Bprop_single_step_time_BS%i_CS%i_IS%i_gpu_%s" % (config["batch_size"], config["cell_size"], config["input_size"], config["use_gpu"]), iters=iters, wall_time=basic_time) self.report_benchmark( name="GRUBlockCell_Bprop_single_step_time_BS%i_CS%i_IS%i_gpu_%s" % (config["batch_size"], config["cell_size"], config["input_size"], config["use_gpu"]), iters=iters, wall_time=block_time) print("--------------------------------------------------------------") if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/python/kernel_tests/gru_ops_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """LSTM Block Cell ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.contrib.rnn.python.kernel_tests import benchmarking from tensorflow.contrib.rnn.python.ops import lstm_ops from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gen_bitwise_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import init_ops from tensorflow.python.ops import rnn from tensorflow.python.ops import rnn_cell from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test block_lstm = lstm_ops._block_lstm # pylint: disable=protected-access class _MaskedRandomUniformInitializer(init_ops.RandomUniform): """Initializer for uniform dist tensors with trailing bits zeroed-out. Allow returning tensors with last few mantissa bits set to 0. This potentially helps avoid getting into precision issues when testing low precision (float16) computation. """ def __init__(self, minval=0, maxval=None, seed=None, dtype=dtypes.float16, num_valid_mantissa_bits=4): """Constructor. Args: minval: A python scalar or a scalar tensor. Lower bound of the range of random values to generate. maxval: A python scalar or a scalar tensor. Upper bound of the range of random values to generate. Defaults to 1 for float types. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: The data type. Only supports tf.float16 for now. num_valid_mantissa_bits: number of non-zero mantissa bits, default to 4. Raises: ValueError: An error if `dtype` is not tf.float16. """ if dtype not in (dtypes.float16,): raise ValueError("dtype: %s not supported" % dtype.name) super(_MaskedRandomUniformInitializer, self).__init__( minval=minval, maxval=maxval, seed=seed, dtype=dtype) self._num_mantissa_bits = 10 self._num_valid_mantissa_bits = num_valid_mantissa_bits def __call__(self, shape, dtype=dtypes.float16, partition_info=None): if dtype and dtype != dtypes.float16: raise ValueError("dtype: %s not supported" % dtype.name) res = super(_MaskedRandomUniformInitializer, self).__call__( shape, dtype, partition_info) # get uint16 view of the underlying buffer. res = gen_array_ops.bitcast(res, dtypes.uint16) # mask the last `shift` mantissa bits. shift = self._num_mantissa_bits - self._num_valid_mantissa_bits mask = (0xffff >> shift) << shift res = gen_bitwise_ops.bitwise_and(res, mask) # restore float16 view. return gen_array_ops.bitcast(res, dtype) def _get_initializer(init_bound, dtype, seed): if dtype == dtypes.float16: return _MaskedRandomUniformInitializer( -init_bound, init_bound, dtype=dtype, seed=seed) else: return init_ops.random_uniform_initializer( -init_bound, init_bound, dtype=dtype, seed=seed) def blocks_match(sess, use_peephole, dtype=dtypes.float32, cell_clip=None): batch_size = 2 input_size = 3 cell_size = 4 sequence_length = 4 inputs = [] for _ in range(sequence_length): inp = ops.convert_to_tensor( np.random.randn(batch_size, input_size), dtype=dtype) inputs.append(inp) stacked_inputs = array_ops.stack(inputs) init_bound = 1e-1 if dtype == dtypes.float16 else 1e-2 initializer = _get_initializer(init_bound, dtype=dtype, seed=19890212) with variable_scope.variable_scope("test", initializer=initializer): # magic naming so that the cells pick up these variables and reuse them if use_peephole: wci = variable_scope.get_variable( "rnn/lstm_cell/w_i_diag", shape=[cell_size], dtype=dtype) wcf = variable_scope.get_variable( "rnn/lstm_cell/w_f_diag", shape=[cell_size], dtype=dtype) wco = variable_scope.get_variable( "rnn/lstm_cell/w_o_diag", shape=[cell_size], dtype=dtype) w = variable_scope.get_variable( "rnn/lstm_cell/kernel", shape=[input_size + cell_size, cell_size * 4], dtype=dtype) b = variable_scope.get_variable( "rnn/lstm_cell/bias", shape=[cell_size * 4], dtype=dtype, initializer=init_ops.zeros_initializer()) basic_cell = rnn_cell.LSTMCell( cell_size, use_peepholes=use_peephole, cell_clip=cell_clip, dtype=dtype, state_is_tuple=True, reuse=True) basic_outputs_op, basic_state_op = rnn.static_rnn( basic_cell, inputs, dtype=dtype) if use_peephole: _, _, _, _, _, _, block_outputs_op = block_lstm( ops.convert_to_tensor(sequence_length, dtype=dtypes.int64), inputs, w, b, wci=wci, wcf=wcf, wco=wco, cell_clip=cell_clip, use_peephole=True) else: _, _, _, _, _, _, block_outputs_op = block_lstm( ops.convert_to_tensor(sequence_length, dtype=dtypes.int64), inputs, w, b, cell_clip=cell_clip) fused_cell = lstm_ops.LSTMBlockFusedCell( cell_size, cell_clip=cell_clip, use_peephole=use_peephole, reuse=True, name="rnn/lstm_cell") fused_outputs_op, fused_state_op = fused_cell(stacked_inputs, dtype=dtype) sess.run([variables.global_variables_initializer()]) basic_outputs, basic_state = sess.run([basic_outputs_op, basic_state_op[0]]) basic_grads = sess.run(gradients_impl.gradients(basic_outputs_op, inputs)) xs = [w, b] if use_peephole: xs += [wci, wcf, wco] basic_wgrads = sess.run(gradients_impl.gradients(basic_outputs_op, xs)) block_outputs = sess.run(block_outputs_op) block_grads = sess.run(gradients_impl.gradients(block_outputs_op, inputs)) block_wgrads = sess.run(gradients_impl.gradients(block_outputs_op, xs)) xs = [w, b] if use_peephole: xs += [wci, wcf, wco] fused_outputs, fused_state = sess.run([fused_outputs_op, fused_state_op[0]]) fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs)) fused_wgrads = sess.run(gradients_impl.gradients(fused_outputs_op, xs)) return (basic_state, fused_state, basic_outputs, block_outputs, fused_outputs, basic_grads, block_grads, fused_grads, basic_wgrads, block_wgrads, fused_wgrads) class LSTMBlockCellTest(test.TestCase, parameterized.TestCase): TEST_CASES = ({ "testcase_name": "Fp32", "dtype": dtypes.float32, "rtol": 1e-6, "atol": 1e-6 }, { "testcase_name": "Fp16", "dtype": dtypes.float16, "rtol": 8e-3, "atol": 8e-4 }) def testNoneDimsWithDynamicRNN(self): with self.session(use_gpu=True, graph=ops.Graph()) as sess: batch_size = 4 num_steps = 5 input_dim = 6 cell_size = 7 cell = lstm_ops.LSTMBlockCell(cell_size) x = array_ops.placeholder(dtypes.float32, shape=(None, None, input_dim)) output, _ = rnn.dynamic_rnn( cell, x, time_major=True, dtype=dtypes.float32) sess.run(variables.global_variables_initializer()) feed = {} feed[x] = np.random.randn(num_steps, batch_size, input_dim) sess.run(output, feed) def testLSTMBlockCell(self): with self.session(use_gpu=True, graph=ops.Graph()) as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 2]) m0 = array_ops.zeros([1, 2]) m1 = array_ops.zeros([1, 2]) m2 = array_ops.zeros([1, 2]) m3 = array_ops.zeros([1, 2]) g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell( [lstm_ops.LSTMBlockCell(2) for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3))) sess.run([variables.global_variables_initializer()]) res = sess.run([g, out_m0, out_m1, out_m2, out_m3], { x.name: np.array([[1., 1.]]), m0.name: 0.1 * np.ones([1, 2]), m1.name: 0.1 * np.ones([1, 2]), m2.name: 0.1 * np.ones([1, 2]), m3.name: 0.1 * np.ones([1, 2]) }) self.assertEqual(len(res), 5) self.assertAllClose(res[0], [[0.24024698, 0.24024698]]) # These numbers are from testBasicLSTMCell and only test c/h. self.assertAllClose(res[1], [[0.68967271, 0.68967271]]) self.assertAllClose(res[2], [[0.44848421, 0.44848421]]) self.assertAllClose(res[3], [[0.39897051, 0.39897051]]) self.assertAllClose(res[4], [[0.24024698, 0.24024698]]) def testCompatibleNames(self): with self.session(use_gpu=True, graph=ops.Graph()): cell = rnn_cell.LSTMCell(10) pcell = rnn_cell.LSTMCell(10, use_peepholes=True) inputs = [array_ops.zeros([4, 5])] * 6 rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope="basic") rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope="peephole") basic_names = { v.name: v.get_shape() for v in variables.trainable_variables() } with self.session(use_gpu=True, graph=ops.Graph()): cell = lstm_ops.LSTMBlockCell(10) pcell = lstm_ops.LSTMBlockCell(10, use_peephole=True) inputs = [array_ops.zeros([4, 5])] * 6 rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope="basic") rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope="peephole") block_names = { v.name: v.get_shape() for v in variables.trainable_variables() } with self.session(use_gpu=True, graph=ops.Graph()): cell = lstm_ops.LSTMBlockFusedCell(10) pcell = lstm_ops.LSTMBlockFusedCell(10, use_peephole=True) inputs = array_ops.stack([array_ops.zeros([4, 5])] * 6) cell(inputs, dtype=dtypes.float32, scope="basic/lstm_cell") pcell(inputs, dtype=dtypes.float32, scope="peephole/lstm_cell") fused_names = { v.name: v.get_shape() for v in variables.trainable_variables() } self.assertEqual(basic_names, block_names) self.assertEqual(basic_names, fused_names) def testLSTMBasicToBlockCell(self): with self.session(use_gpu=True) as sess: x = array_ops.zeros([1, 2]) x_values = np.random.randn(1, 2) m0_val = 0.1 * np.ones([1, 2]) m1_val = -0.1 * np.ones([1, 2]) m2_val = -0.2 * np.ones([1, 2]) m3_val = 0.2 * np.ones([1, 2]) initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=19890212) with variable_scope.variable_scope("basic", initializer=initializer): m0 = array_ops.zeros([1, 2]) m1 = array_ops.zeros([1, 2]) m2 = array_ops.zeros([1, 2]) m3 = array_ops.zeros([1, 2]) g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell( [rnn_cell.BasicLSTMCell(2, state_is_tuple=True) for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3))) sess.run([variables.global_variables_initializer()]) basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], { x.name: x_values, m0.name: m0_val, m1.name: m1_val, m2.name: m2_val, m3.name: m3_val }) with variable_scope.variable_scope("block", initializer=initializer): m0 = array_ops.zeros([1, 2]) m1 = array_ops.zeros([1, 2]) m2 = array_ops.zeros([1, 2]) m3 = array_ops.zeros([1, 2]) g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell( [lstm_ops.LSTMBlockCell(2) for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3))) sess.run([variables.global_variables_initializer()]) block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], { x.name: x_values, m0.name: m0_val, m1.name: m1_val, m2.name: m2_val, m3.name: m3_val }) self.assertEqual(len(basic_res), len(block_res)) for basic, block in zip(basic_res, block_res): self.assertAllClose(basic, block) def testLSTMBasicToBlockCellPeeping(self): with self.session(use_gpu=True) as sess: x = array_ops.zeros([1, 2]) x_values = np.random.randn(1, 2) m0_val = 0.1 * np.ones([1, 2]) m1_val = -0.1 * np.ones([1, 2]) m2_val = -0.2 * np.ones([1, 2]) m3_val = 0.2 * np.ones([1, 2]) initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=19890212) with variable_scope.variable_scope("basic", initializer=initializer): m0 = array_ops.zeros([1, 2]) m1 = array_ops.zeros([1, 2]) m2 = array_ops.zeros([1, 2]) m3 = array_ops.zeros([1, 2]) g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell( [ rnn_cell.LSTMCell(2, use_peepholes=True, state_is_tuple=True) for _ in range(2) ], state_is_tuple=True)(x, ((m0, m1), (m2, m3))) sess.run([variables.global_variables_initializer()]) basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], { x.name: x_values, m0.name: m0_val, m1.name: m1_val, m2.name: m2_val, m3.name: m3_val }) with variable_scope.variable_scope("block", initializer=initializer): m0 = array_ops.zeros([1, 2]) m1 = array_ops.zeros([1, 2]) m2 = array_ops.zeros([1, 2]) m3 = array_ops.zeros([1, 2]) g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell( [lstm_ops.LSTMBlockCell(2, use_peephole=True) for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3))) sess.run([variables.global_variables_initializer()]) block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], { x.name: x_values, m0.name: m0_val, m1.name: m1_val, m2.name: m2_val, m3.name: m3_val }) self.assertEqual(len(basic_res), len(block_res)) for basic, block in zip(basic_res, block_res): self.assertAllClose(basic, block) def LSTMBasicToBlockTestHelper(self, dtype=dtypes.float32, use_peephole=False, cell_clip=None, rtol=1e-6, atol=1e-6): with self.session(use_gpu=True, graph=ops.Graph()) as sess: (basic_state, fused_state, basic_outputs, block_outputs, fused_outputs, basic_grads, block_grads, fused_grads, basic_wgrads, block_wgrads, fused_wgrads) = blocks_match( sess, use_peephole=use_peephole, dtype=dtype, cell_clip=cell_clip) self.assertAllClose(basic_outputs, block_outputs, rtol=rtol, atol=atol) self.assertAllClose(basic_grads, block_grads, rtol=rtol, atol=atol) for basic, block in zip(basic_wgrads, block_wgrads): self.assertAllClose(basic, block, rtol=rtol, atol=atol) self.assertAllClose(basic_outputs, fused_outputs, rtol=rtol, atol=atol) self.assertAllClose(basic_state, fused_state, rtol=rtol, atol=atol) self.assertAllClose(basic_grads, fused_grads, rtol=rtol, atol=atol) for basic, fused in zip(basic_wgrads, fused_wgrads): self.assertAllClose(basic, fused, rtol=rtol, atol=atol) @parameterized.named_parameters(*TEST_CASES) def testLSTMBasicToBlock(self, dtype, rtol, atol): self.LSTMBasicToBlockTestHelper( dtype, use_peephole=False, rtol=rtol, atol=atol) @parameterized.named_parameters(*TEST_CASES) def testLSTMBasicToBlockPeeping(self, dtype, rtol, atol): self.LSTMBasicToBlockTestHelper( dtype, use_peephole=True, rtol=rtol, atol=atol) @parameterized.named_parameters(*TEST_CASES) def testLSTMBasicToBlockCellClip(self, dtype, rtol, atol): self.LSTMBasicToBlockTestHelper( dtype, use_peephole=True, cell_clip=0.5, rtol=rtol, atol=atol) def testLSTMFusedSequenceLengths(self): """Verify proper support for sequence lengths in LSTMBlockFusedCell.""" with self.session(use_gpu=True) as sess: batch_size = 3 input_size = 4 cell_size = 5 max_sequence_length = 6 inputs = [] for _ in range(max_sequence_length): inp = ops.convert_to_tensor( np.random.randn(batch_size, input_size), dtype=dtypes.float32) inputs.append(inp) seq_lengths = constant_op.constant([3, 4, 5]) cell_inputs = array_ops.stack(inputs) initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=19890213) with variable_scope.variable_scope("lstm_cell", initializer=initializer): # magic naming so that the cells pick up these variables and reuse them variable_scope.get_variable( "kernel", shape=[input_size + cell_size, cell_size * 4], dtype=dtypes.float32) variable_scope.get_variable( "bias", shape=[cell_size * 4], dtype=dtypes.float32, initializer=init_ops.zeros_initializer()) cell = lstm_ops.LSTMBlockFusedCell( cell_size, cell_clip=0, use_peephole=False, reuse=True, name="lstm_cell") fused_outputs_op, fused_state_op = cell( cell_inputs, dtype=dtypes.float32, sequence_length=seq_lengths) cell_vars = [ v for v in variables.trainable_variables() if v.name.endswith("kernel") or v.name.endswith("bias") ] # Verify that state propagation works if we turn our sequence into # tiny (single-time) subsequences, i.e. unfuse the cell unfused_outputs_op = [] state = None with variable_scope.variable_scope( variable_scope.get_variable_scope(), reuse=True): for i, inp in enumerate(inputs): lengths = [int(i < l) for l in seq_lengths.eval()] output, state = cell( array_ops.expand_dims(inp, 0), initial_state=state, dtype=dtypes.float32, sequence_length=lengths) unfused_outputs_op.append(output[0]) unfused_outputs_op = array_ops.stack(unfused_outputs_op) sess.run([variables.global_variables_initializer()]) unfused_outputs, unfused_state = sess.run([unfused_outputs_op, state[0]]) unfused_grads = sess.run( gradients_impl.gradients(unfused_outputs_op, inputs)) unfused_wgrads = sess.run( gradients_impl.gradients(unfused_outputs_op, cell_vars)) fused_outputs, fused_state = sess.run( [fused_outputs_op, fused_state_op[0]]) fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs)) fused_wgrads = sess.run( gradients_impl.gradients(fused_outputs_op, cell_vars)) self.assertAllClose(fused_outputs, unfused_outputs) self.assertAllClose(fused_state, unfused_state) self.assertAllClose(fused_grads, unfused_grads) for fused, unfused in zip(fused_wgrads, unfused_wgrads): self.assertAllClose(fused, unfused, rtol=1e-6, atol=1e-6) #### Benchmarking. class BenchmarkLSTMBlock(test.Benchmark): def benchmarkLSTMBlockCellFpropWithDynamicRNN(self): print("BlockLSTMCell forward propagation via dynamic_rnn().") print("--------------------------------------------------------------") print("LSTMBlockCell Seconds per inference.") print("batch_size,cell_size,input_size,time_steps,use_gpu,wall_time") iters = 10 for config in benchmarking.dict_product({ "batch_size": [1, 8, 13, 32, 67, 128], "cell_size": [128, 250, 512, 650, 1024, 1350], "time_steps": [40], "use_gpu": [True, False], "dtype": ["float32", "float16"], }): dtype = dtypes.float32 if config["dtype"] == "float32" else dtypes.float16 with ops.Graph().as_default(): with benchmarking.device(use_gpu=config["use_gpu"]): inputs = variable_scope.get_variable( "x", dtype=dtype, shape=[ config["time_steps"], config["batch_size"], config["cell_size"] ]) cell = lstm_ops.LSTMBlockCell(config["cell_size"], dtype=dtype) outputs = rnn.dynamic_rnn(cell, inputs, time_major=True, dtype=dtype) init_op = variables.global_variables_initializer() with session.Session() as sess: sess.run(init_op) wall_time = benchmarking.seconds_per_run(outputs, sess, iters) # Print to stdout. If the TEST_REPORT_FILE_PREFIX environment variable # is set, this will produce a copy-paste-able CSV file. print(",".join( map(str, [ config["dtype"], config["batch_size"], config["cell_size"], config["cell_size"], config["time_steps"], config["use_gpu"], wall_time ]))) benchmark_name_template = "_".join([ "LSTMBlockCell_fprop", "DT_%(dtype)s", "BS%(batch_size)i", "CS%(cell_size)i", "IS%(cell_size)i", "TS%(time_steps)i", "gpu_%(use_gpu)s" ]) self.report_benchmark( name=benchmark_name_template % config, iters=iters, wall_time=wall_time, extras=config) def benchmarkLSTMBlockCellBpropWithDynamicRNN(self): print("BlockLSTMCell backward propagation via dynamic_rnn().") print("--------------------------------------------------------------") print("LSTMBlockCell Seconds per inference.") print("batch_size,cell_size,input_size,time_steps,use_gpu,wall_time") iters = 10 for config in benchmarking.dict_product({ "batch_size": [1, 8, 13, 32, 67, 128], "cell_size": [128, 250, 512, 650, 1024, 1350], "time_steps": [40], "use_gpu": [True, False], "dtype": ["float32", "float16"], }): dtype = dtypes.float32 if config["dtype"] == "float32" else dtypes.float16 with ops.Graph().as_default(): with benchmarking.device(use_gpu=config["use_gpu"]): time_steps = config["time_steps"] batch_size = config["batch_size"] cell_size = input_size = config["cell_size"] inputs = variable_scope.get_variable( "x", [time_steps, batch_size, cell_size], trainable=False, dtype=dtype) with variable_scope.variable_scope( "rnn", reuse=variable_scope.AUTO_REUSE): w = variable_scope.get_variable( "rnn/lstm_cell/kernel", shape=[input_size + cell_size, cell_size * 4], dtype=dtype) b = variable_scope.get_variable( "rnn/lstm_cell/bias", shape=[cell_size * 4], dtype=dtype, initializer=init_ops.zeros_initializer()) cell = lstm_ops.LSTMBlockCell(cell_size, dtype=dtype) outputs = rnn.dynamic_rnn( cell, inputs, time_major=True, dtype=dtype) grads = gradients_impl.gradients(outputs, [inputs, w, b]) init_op = variables.global_variables_initializer() with session.Session() as sess: sess.run(init_op) wall_time = benchmarking.seconds_per_run(grads, sess, iters) # Print to stdout. If the TEST_REPORT_FILE_PREFIX environment variable # is set, this will produce a copy-paste-able CSV file. print(",".join( map(str, [ config["dtype"], batch_size, cell_size, cell_size, time_steps, config["use_gpu"], wall_time ]))) benchmark_name_template = "_".join([ "LSTMBlockCell_bprop", "DT_%(dtype)s", "BS%(batch_size)i", "CS%(cell_size)i", "IS%(cell_size)i", "TS%(time_steps)i", "gpu_%(use_gpu)s" ]) self.report_benchmark( name=benchmark_name_template % config, iters=iters, wall_time=wall_time, extras=config) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for rnn module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np from tensorflow.contrib.rnn.python.ops import rnn as contrib_rnn from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import rnn_cell from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging class StackBidirectionalRNNTest(test.TestCase): def setUp(self): self._seed = 23489 np.random.seed(self._seed) def _createStackBidirectionalRNN(self, use_gpu, use_shape, use_sequence_length, initial_states_fw=None, initial_states_bw=None, scope=None): self.layers = [2, 3] input_size = 5 batch_size = 2 max_length = 8 initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) sequence_length = array_ops.placeholder( dtypes.int64) if use_sequence_length else None self.cells_fw = [ rnn_cell.LSTMCell( num_units, input_size, initializer=initializer, state_is_tuple=False) for num_units in self.layers ] self.cells_bw = [ rnn_cell.LSTMCell( num_units, input_size, initializer=initializer, state_is_tuple=False) for num_units in self.layers ] inputs = max_length * [ array_ops.placeholder( dtypes.float32, shape=(batch_size, input_size) if use_shape else (None, input_size)) ] outputs, state_fw, state_bw = contrib_rnn.stack_bidirectional_rnn( self.cells_fw, self.cells_bw, inputs, initial_states_fw, initial_states_bw, dtype=dtypes.float32, sequence_length=sequence_length, scope=scope) self.assertEqual(len(outputs), len(inputs)) for out in outputs: self.assertAlmostEqual( out.get_shape().as_list(), [batch_size if use_shape else None, 2 * self.layers[-1]]) input_value = np.random.randn(batch_size, input_size) outputs = array_ops.stack(outputs) return input_value, inputs, outputs, state_fw, state_bw, sequence_length def _testStackBidirectionalRNN(self, use_gpu, use_shape): with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess: input_value, inputs, outputs, state_fw, state_bw, sequence_length = ( self._createStackBidirectionalRNN(use_gpu, use_shape, True)) variables.global_variables_initializer().run() # Run with pre-specified sequence lengths of 2, 3. out, s_fw, s_bw = sess.run( [outputs, state_fw, state_bw], feed_dict={inputs[0]: input_value, sequence_length: [2, 3]}) # Since the forward and backward LSTM cells were initialized with the # same parameters, the forward and backward states of the first layer # must be the same. # For the next layers, since the input is a concat of forward and backward # outputs of the previous layers the symmetry is broken and the following # states and outputs differ. # We cannot access the intermediate values between layers but we can # check that the forward and backward states of the first layer match. self.assertAllClose(s_fw[0], s_bw[0]) # If outputs are not concat between layers the output of the forward # and backward would be the same but symmetric. # Check that it is not the case. # Due to depth concatenation (as num_units=3 for both RNNs): # - forward output: out[][][depth] for 0 <= depth < 3 # - backward output: out[][][depth] for 4 <= depth < 6 # First sequence in batch is length=2 # Check that the time=0 forward output is not equal to time=1 backward. self.assertNotEqual(out[0][0][0], out[1][0][3]) self.assertNotEqual(out[0][0][1], out[1][0][4]) self.assertNotEqual(out[0][0][2], out[1][0][5]) # Check that the time=1 forward output is not equal to time=0 backward. self.assertNotEqual(out[1][0][0], out[0][0][3]) self.assertNotEqual(out[1][0][1], out[0][0][4]) self.assertNotEqual(out[1][0][2], out[0][0][5]) # Second sequence in batch is length=3 # Check that the time=0 forward output is not equal to time=2 backward. self.assertNotEqual(out[0][1][0], out[2][1][3]) self.assertNotEqual(out[0][1][1], out[2][1][4]) self.assertNotEqual(out[0][1][2], out[2][1][5]) # Check that the time=1 forward output is not equal to time=1 backward. self.assertNotEqual(out[1][1][0], out[1][1][3]) self.assertNotEqual(out[1][1][1], out[1][1][4]) self.assertNotEqual(out[1][1][2], out[1][1][5]) # Check that the time=2 forward output is not equal to time=0 backward. self.assertNotEqual(out[2][1][0], out[0][1][3]) self.assertNotEqual(out[2][1][1], out[0][1][4]) self.assertNotEqual(out[2][1][2], out[0][1][5]) def _testStackBidirectionalRNNStates(self, use_gpu): # Check that the states are correctly initialized. # - Create a net and iterate for 3 states. Keep the state (state_3). # - Reset states, and iterate for 5 steps. Last state is state_5. # - Reset the sets to state_3 and iterate for 2 more steps, # last state will be state_5'. # - Check that the state_5 and state_5' (forward and backward) are the # same for the first layer (it does not apply for the second layer since # it has forward-backward dependencies). with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess: batch_size = 2 # Create states placeholders. initial_states_fw = [ array_ops.placeholder( dtypes.float32, shape=(batch_size, layer * 2)) for layer in self.layers ] initial_states_bw = [ array_ops.placeholder( dtypes.float32, shape=(batch_size, layer * 2)) for layer in self.layers ] # Create the net input_value, inputs, outputs, state_fw, state_bw, sequence_length = ( self._createStackBidirectionalRNN(use_gpu, True, True, initial_states_fw, initial_states_bw)) variables.global_variables_initializer().run() # Run 3 steps. feed_dict = {inputs[0]: input_value, sequence_length: [3, 2]} # Initialize to empty state. for i, layer in enumerate(self.layers): feed_dict[initial_states_fw[i]] = np.zeros( (batch_size, layer * 2), dtype=np.float32) feed_dict[initial_states_bw[i]] = np.zeros( (batch_size, layer * 2), dtype=np.float32) _, st_3_fw, st_3_bw = sess.run([outputs, state_fw, state_bw], feed_dict=feed_dict) # Reset the net and run 5 steps. feed_dict = {inputs[0]: input_value, sequence_length: [5, 3]} for i, layer in enumerate(self.layers): feed_dict[initial_states_fw[i]] = np.zeros( (batch_size, layer * 2), dtype=np.float32) feed_dict[initial_states_bw[i]] = np.zeros( (batch_size, layer * 2), dtype=np.float32) _, st_5_fw, st_5_bw = sess.run([outputs, state_fw, state_bw], feed_dict=feed_dict) # Reset the net to state_3 and run 2 more steps. feed_dict = {inputs[0]: input_value, sequence_length: [2, 1]} for i, _ in enumerate(self.layers): feed_dict[initial_states_fw[i]] = st_3_fw[i] feed_dict[initial_states_bw[i]] = st_3_bw[i] out_5p, st_5p_fw, st_5p_bw = sess.run([outputs, state_fw, state_bw], feed_dict=feed_dict) # Check that the 3+2 and 5 first layer states. self.assertAllEqual(st_5_fw[0], st_5p_fw[0]) self.assertAllEqual(st_5_bw[0], st_5p_bw[0]) def testStackBidirectionalRNN(self): self._testStackBidirectionalRNN(use_gpu=False, use_shape=False) self._testStackBidirectionalRNN(use_gpu=True, use_shape=False) self._testStackBidirectionalRNN(use_gpu=False, use_shape=True) self._testStackBidirectionalRNN(use_gpu=True, use_shape=True) self._testStackBidirectionalRNNStates(use_gpu=False) self._testStackBidirectionalRNNStates(use_gpu=True) def _createStackBidirectionalDynamicRNN(self, use_gpu, use_shape, use_state_tuple, initial_states_fw=None, initial_states_bw=None, scope=None): self.layers = [2, 3] input_size = 5 batch_size = 2 max_length = 8 initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) sequence_length = array_ops.placeholder(dtypes.int64) self.cells_fw = [ rnn_cell.LSTMCell( num_units, input_size, initializer=initializer, state_is_tuple=False) for num_units in self.layers ] self.cells_bw = [ rnn_cell.LSTMCell( num_units, input_size, initializer=initializer, state_is_tuple=False) for num_units in self.layers ] inputs = max_length * [ array_ops.placeholder( dtypes.float32, shape=(batch_size, input_size) if use_shape else (None, input_size)) ] inputs_c = array_ops.stack(inputs) inputs_c = array_ops.transpose(inputs_c, [1, 0, 2]) outputs, st_fw, st_bw = contrib_rnn.stack_bidirectional_dynamic_rnn( self.cells_fw, self.cells_bw, inputs_c, initial_states_fw=initial_states_fw, initial_states_bw=initial_states_bw, dtype=dtypes.float32, sequence_length=sequence_length, scope=scope) # Outputs has shape (batch_size, max_length, 2* layer[-1]. output_shape = [None, max_length, 2 * self.layers[-1]] if use_shape: output_shape[0] = batch_size self.assertAllEqual(outputs.get_shape().as_list(), output_shape) input_value = np.random.randn(batch_size, input_size) return input_value, inputs, outputs, st_fw, st_bw, sequence_length def _testStackBidirectionalDynamicRNN(self, use_gpu, use_shape, use_state_tuple): with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess: input_value, inputs, outputs, state_fw, state_bw, sequence_length = ( self._createStackBidirectionalDynamicRNN(use_gpu, use_shape, use_state_tuple)) variables.global_variables_initializer().run() # Run with pre-specified sequence length of 2, 3 out, s_fw, s_bw = sess.run( [outputs, state_fw, state_bw], feed_dict={inputs[0]: input_value, sequence_length: [2, 3]}) # Since the forward and backward LSTM cells were initialized with the # same parameters, the forward and backward states of the first layer has # to be the same. # For the next layers, since the input is a concat of forward and backward # outputs of the previous layers the symmetry is broken and the following # states and outputs differ. # We cannot access the intermediate values between layers but we can # check that the forward and backward states of the first layer match. self.assertAllClose(s_fw[0], s_bw[0]) out = np.swapaxes(out, 0, 1) # If outputs are not concat between layers the output of the forward # and backward would be the same but symmetric. # Check that is not the case. # Due to depth concatenation (as num_units=3 for both RNNs): # - forward output: out[][][depth] for 0 <= depth < 3 # - backward output: out[][][depth] for 4 <= depth < 6 # First sequence in batch is length=2 # Check that the time=0 forward output is not equal to time=1 backward. self.assertNotEqual(out[0][0][0], out[1][0][3]) self.assertNotEqual(out[0][0][1], out[1][0][4]) self.assertNotEqual(out[0][0][2], out[1][0][5]) # Check that the time=1 forward output is not equal to time=0 backward. self.assertNotEqual(out[1][0][0], out[0][0][3]) self.assertNotEqual(out[1][0][1], out[0][0][4]) self.assertNotEqual(out[1][0][2], out[0][0][5]) # Second sequence in batch is length=3 # Check that the time=0 forward output is not equal to time=2 backward. self.assertNotEqual(out[0][1][0], out[2][1][3]) self.assertNotEqual(out[0][1][1], out[2][1][4]) self.assertNotEqual(out[0][1][2], out[2][1][5]) # Check that the time=1 forward output is not equal to time=1 backward. self.assertNotEqual(out[1][1][0], out[1][1][3]) self.assertNotEqual(out[1][1][1], out[1][1][4]) self.assertNotEqual(out[1][1][2], out[1][1][5]) # Check that the time=2 forward output is not equal to time=0 backward. self.assertNotEqual(out[2][1][0], out[0][1][3]) self.assertNotEqual(out[2][1][1], out[0][1][4]) self.assertNotEqual(out[2][1][2], out[0][1][5]) def _testStackBidirectionalDynamicRNNStates(self, use_gpu): # Check that the states are correctly initialized. # - Create a net and iterate for 3 states. Keep the state (state_3). # - Reset states, and iterate for 5 steps. Last state is state_5. # - Reset the sets to state_3 and iterate for 2 more steps, # last state will be state_5'. # - Check that the state_5 and state_5' (forward and backward) are the # same for the first layer (it does not apply for the second layer since # it has forward-backward dependencies). with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess: batch_size = 2 # Create states placeholders. initial_states_fw = [ array_ops.placeholder( dtypes.float32, shape=(batch_size, layer * 2)) for layer in self.layers ] initial_states_bw = [ array_ops.placeholder( dtypes.float32, shape=(batch_size, layer * 2)) for layer in self.layers ] # Create the net input_value, inputs, outputs, state_fw, state_bw, sequence_length = ( self._createStackBidirectionalDynamicRNN( use_gpu, use_shape=True, use_state_tuple=False, initial_states_fw=initial_states_fw, initial_states_bw=initial_states_bw)) variables.global_variables_initializer().run() # Run 3 steps. feed_dict = {inputs[0]: input_value, sequence_length: [3, 2]} # Initialize to empty state. for i, layer in enumerate(self.layers): feed_dict[initial_states_fw[i]] = np.zeros( (batch_size, layer * 2), dtype=np.float32) feed_dict[initial_states_bw[i]] = np.zeros( (batch_size, layer * 2), dtype=np.float32) _, st_3_fw, st_3_bw = sess.run([outputs, state_fw, state_bw], feed_dict=feed_dict) # Reset the net and run 5 steps. feed_dict = {inputs[0]: input_value, sequence_length: [5, 3]} for i, layer in enumerate(self.layers): feed_dict[initial_states_fw[i]] = np.zeros( (batch_size, layer * 2), dtype=np.float32) feed_dict[initial_states_bw[i]] = np.zeros( (batch_size, layer * 2), dtype=np.float32) _, st_5_fw, st_5_bw = sess.run([outputs, state_fw, state_bw], feed_dict=feed_dict) # Reset the net to state_3 and run 2 more steps. feed_dict = {inputs[0]: input_value, sequence_length: [2, 1]} for i, _ in enumerate(self.layers): feed_dict[initial_states_fw[i]] = st_3_fw[i] feed_dict[initial_states_bw[i]] = st_3_bw[i] out_5p, st_5p_fw, st_5p_bw = sess.run([outputs, state_fw, state_bw], feed_dict=feed_dict) # Check that the 3+2 and 5 first layer states. self.assertAllEqual(st_5_fw[0], st_5p_fw[0]) self.assertAllEqual(st_5_bw[0], st_5p_bw[0]) def testBidirectionalRNN(self): # Generate 2^3 option values # from [True, True, True] to [False, False, False] options = itertools.product([True, False], repeat=3) for option in options: self._testStackBidirectionalDynamicRNN( use_gpu=option[0], use_shape=option[1], use_state_tuple=option[2]) # Check States. self._testStackBidirectionalDynamicRNNStates(use_gpu=False) self._testStackBidirectionalDynamicRNNStates(use_gpu=True) def _testScope(self, factory, prefix="prefix", use_outer_scope=True): # REMARKS: factory(scope) is a function accepting a scope # as an argument, such scope can be None, a string # or a VariableScope instance. with self.session(use_gpu=True, graph=ops.Graph()): if use_outer_scope: with variable_scope.variable_scope(prefix) as scope: factory(scope) else: factory(prefix) # check that all the variables names starts with the proper scope. variables.global_variables_initializer() all_vars = variables.global_variables() prefix = prefix or "stack_bidirectional_rnn" scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")] tf_logging.info("StackRNN with scope: %s (%s)" % (prefix, "scope" if use_outer_scope else "str")) for v in scope_vars: tf_logging.info(v.name) self.assertEqual(len(scope_vars), len(all_vars)) def testStackBidirectionalRNNScope(self): def factory(scope): return self._createStackBidirectionalRNN( use_gpu=True, use_shape=True, use_sequence_length=True, scope=scope) self._testScope(factory, use_outer_scope=True) self._testScope(factory, use_outer_scope=False) self._testScope(factory, prefix=None, use_outer_scope=False) def testBidirectionalDynamicRNNScope(self): def factory(scope): return self._createStackBidirectionalDynamicRNN( use_gpu=True, use_shape=True, use_state_tuple=True, scope=scope) self._testScope(factory, use_outer_scope=True) self._testScope(factory, use_outer_scope=False) self._testScope(factory, prefix=None, use_outer_scope=False) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/python/kernel_tests/rnn_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for RNN cells.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np from tensorflow.contrib.rnn.python.ops import core_rnn_cell as legacy_rnn_cell from tensorflow.contrib.rnn.python.ops import rnn_cell as contrib_rnn_cell from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.keras import initializers from tensorflow.python.keras import layers as keras_layers from tensorflow.python.keras import testing_utils from tensorflow.python.keras import utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import rnn from tensorflow.python.ops import rnn_cell from tensorflow.python.ops import rnn_cell_impl from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.ops.losses import losses from tensorflow.python.platform import test from tensorflow.python.training import training from tensorflow.python.util import nest class RNNCellTest(test.TestCase): def _assert_cell_builds(self, cell_class, dtype, batch_size, in_size, out_size): cell = cell_class(out_size, dtype=dtype) in_shape = tensor_shape.TensorShape((batch_size, in_size)) cell.build(in_shape) state_output = cell.get_initial_state( inputs=None, batch_size=batch_size, dtype=dtype) cell_output, _ = cell(array_ops.zeros(in_shape, dtype), state_output) self.assertAllEqual([batch_size, out_size], cell_output.shape.as_list()) def testCellsBuild(self): f32 = dtypes.float32 f64 = dtypes.float64 self._assert_cell_builds(contrib_rnn_cell.IndRNNCell, f32, 5, 7, 3) self._assert_cell_builds(contrib_rnn_cell.IndRNNCell, f64, 5, 7, 3) self._assert_cell_builds(contrib_rnn_cell.IndyGRUCell, f32, 5, 7, 3) self._assert_cell_builds(contrib_rnn_cell.IndyGRUCell, f64, 5, 7, 3) self._assert_cell_builds(contrib_rnn_cell.IndyLSTMCell, f32, 5, 7, 3) self._assert_cell_builds(contrib_rnn_cell.IndyLSTMCell, f64, 5, 7, 3) def testIndRNNCell(self): with self.cached_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 2]) m = array_ops.zeros([1, 2]) cell = contrib_rnn_cell.IndRNNCell(2) g, _ = cell(x, m) self.assertEqual([ "root/ind_rnn_cell/%s_w:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME, "root/ind_rnn_cell/%s_u:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME, "root/ind_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME ], [v.name for v in cell.trainable_variables]) self.assertFalse(cell.non_trainable_variables) sess.run([variables.global_variables_initializer()]) res = sess.run([g], { x.name: np.array([[1., 1.]]), m.name: np.array([[0.1, 0.1]]) }) self.assertEqual(res[0].shape, (1, 2)) def testIndyGRUCell(self): with self.cached_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 2]) m = array_ops.zeros([1, 2]) g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m) sess.run([variables.global_variables_initializer()]) res = sess.run([g], { x.name: np.array([[1., 1.]]), m.name: np.array([[0.1, 0.1]]) }) # Smoke test self.assertAllClose(res[0], [[0.185265, 0.17704]]) with variable_scope.variable_scope( "other", initializer=init_ops.constant_initializer(0.5)): # Test IndyGRUCell with input_size != num_units. x = array_ops.zeros([1, 3]) m = array_ops.zeros([1, 2]) g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m) sess.run([variables.global_variables_initializer()]) res = sess.run([g], { x.name: np.array([[1., 1., 1.]]), m.name: np.array([[0.1, 0.1]]) }) # Smoke test self.assertAllClose(res[0], [[0.155127, 0.157328]]) def testIndyLSTMCell(self): for dtype in [dtypes.float16, dtypes.float32]: np_dtype = dtype.as_numpy_dtype with self.session(graph=ops.Graph()) as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 2], dtype=dtype) state_0 = (array_ops.zeros([1, 2], dtype=dtype),) * 2 state_1 = (array_ops.zeros([1, 2], dtype=dtype),) * 2 cell = rnn_cell_impl.MultiRNNCell( [contrib_rnn_cell.IndyLSTMCell(2) for _ in range(2)]) self.assertEqual(cell.dtype, None) self.assertEqual("cell-0", cell._checkpoint_dependencies[0].name) self.assertEqual("cell-1", cell._checkpoint_dependencies[1].name) cell.get_config() # Should not throw an error g, (out_state_0, out_state_1) = cell(x, (state_0, state_1)) # Layer infers the input type. self.assertEqual(cell.dtype, dtype.name) expected_variable_names = [ "root/multi_rnn_cell/cell_0/indy_lstm_cell/%s_w:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME, "root/multi_rnn_cell/cell_0/indy_lstm_cell/%s_u:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME, "root/multi_rnn_cell/cell_0/indy_lstm_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME, "root/multi_rnn_cell/cell_1/indy_lstm_cell/%s_w:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME, "root/multi_rnn_cell/cell_1/indy_lstm_cell/%s_u:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME, "root/multi_rnn_cell/cell_1/indy_lstm_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME ] self.assertEqual(expected_variable_names, [v.name for v in cell.trainable_variables]) self.assertFalse(cell.non_trainable_variables) sess.run([variables.global_variables_initializer()]) res = sess.run( [g, out_state_0, out_state_1], { x.name: np.array([[1., 1.]]), state_0[0].name: 0.1 * np.ones([1, 2]), state_0[1].name: 0.1 * np.ones([1, 2]), state_1[0].name: 0.1 * np.ones([1, 2]), state_1[1].name: 0.1 * np.ones([1, 2]), }) self.assertEqual(len(res), 3) global_variables = variables.global_variables() self.assertEqual(expected_variable_names, [v.name for v in global_variables]) # Only check the range of outputs as this is just a smoke test. self.assertAllInRange(res[0], -1.0, 1.0) self.assertAllInRange(res[1], -1.0, 1.0) self.assertAllInRange(res[2], -1.0, 1.0) with variable_scope.variable_scope( "other", initializer=init_ops.constant_initializer(0.5)): # Test IndyLSTMCell with input_size != num_units. x = array_ops.zeros([1, 3], dtype=dtype) state = (array_ops.zeros([1, 2], dtype=dtype),) * 2 g, out_state = contrib_rnn_cell.IndyLSTMCell(2)(x, state) sess.run([variables.global_variables_initializer()]) res = sess.run( [g, out_state], { x.name: np.array([[1., 1., 1.]], dtype=np_dtype), state[0].name: 0.1 * np.ones([1, 2], dtype=np_dtype), state[1].name: 0.1 * np.ones([1, 2], dtype=np_dtype), }) self.assertEqual(len(res), 2) def testLSTMCellLayerNorm(self): with self.cached_session() as sess: num_units = 2 num_proj = 3 batch_size = 1 input_size = 4 with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([batch_size, input_size]) c = array_ops.zeros([batch_size, num_units]) h = array_ops.zeros([batch_size, num_proj]) state = rnn_cell_impl.LSTMStateTuple(c, h) cell = contrib_rnn_cell.LayerNormLSTMCell( num_units=num_units, num_proj=num_proj, forget_bias=1.0, layer_norm=True, norm_gain=1.0, norm_shift=0.0) g, out_m = cell(x, state) sess.run([variables.global_variables_initializer()]) res = sess.run( [g, out_m], { x.name: np.ones((batch_size, input_size)), c.name: 0.1 * np.ones((batch_size, num_units)), h.name: 0.1 * np.ones((batch_size, num_proj)) }) self.assertEqual(len(res), 2) # The numbers in results were not calculated, this is mostly just a # smoke test. self.assertEqual(res[0].shape, (batch_size, num_proj)) self.assertEqual(res[1][0].shape, (batch_size, num_units)) self.assertEqual(res[1][1].shape, (batch_size, num_proj)) # Different inputs so different outputs and states for i in range(1, batch_size): self.assertTrue( float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) < 1e-6) self.assertTrue( float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) < 1e-6) def testOutputProjectionWrapper(self): with self.cached_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 3]) m = array_ops.zeros([1, 3]) cell = legacy_rnn_cell.OutputProjectionWrapper( rnn_cell_impl.GRUCell(3), 2) g, new_m = cell(x, m) sess.run([variables.global_variables_initializer()]) res = sess.run([g, new_m], { x.name: np.array([[1., 1., 1.]]), m.name: np.array([[0.1, 0.1, 0.1]]) }) self.assertEqual(res[1].shape, (1, 3)) # The numbers in results were not calculated, this is just a smoke test. self.assertAllClose(res[0], [[0.231907, 0.231907]]) def testInputProjectionWrapper(self): with self.cached_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 2]) m = array_ops.zeros([1, 3]) cell = legacy_rnn_cell.InputProjectionWrapper( rnn_cell_impl.GRUCell(3), num_proj=3) g, new_m = cell(x, m) sess.run([variables.global_variables_initializer()]) res = sess.run([g, new_m], { x.name: np.array([[1., 1.]]), m.name: np.array([[0.1, 0.1, 0.1]]) }) self.assertEqual(res[1].shape, (1, 3)) # The numbers in results were not calculated, this is just a smoke test. self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]]) def testEmbeddingWrapper(self): with self.cached_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 1], dtype=dtypes.int32) m = array_ops.zeros([1, 2]) embedding_cell = legacy_rnn_cell.EmbeddingWrapper( rnn_cell_impl.GRUCell(2), embedding_classes=3, embedding_size=2) self.assertEqual(embedding_cell.output_size, 2) g, new_m = embedding_cell(x, m) sess.run([variables.global_variables_initializer()]) res = sess.run([g, new_m], { x.name: np.array([[1]]), m.name: np.array([[0.1, 0.1]]) }) self.assertEqual(res[1].shape, (1, 2)) # The numbers in results were not calculated, this is just a smoke test. self.assertAllClose(res[0], [[0.17139, 0.17139]]) def testEmbeddingWrapperWithDynamicRnn(self): with self.cached_session() as sess: with variable_scope.variable_scope("root"): inputs = ops.convert_to_tensor([[[0], [0]]], dtype=dtypes.int64) input_lengths = ops.convert_to_tensor([2], dtype=dtypes.int64) embedding_cell = legacy_rnn_cell.EmbeddingWrapper( rnn_cell_impl.BasicLSTMCell(1, state_is_tuple=True), embedding_classes=1, embedding_size=2) outputs, _ = rnn.dynamic_rnn( cell=embedding_cell, inputs=inputs, sequence_length=input_lengths, dtype=dtypes.float32) sess.run([variables.global_variables_initializer()]) # This will fail if output's dtype is inferred from input's. sess.run(outputs) def testSRUCell(self): with self.cached_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 2]) m = array_ops.zeros([1, 2]) g, _ = contrib_rnn_cell.SRUCell(2)(x, m) sess.run([variables.global_variables_initializer()]) res = sess.run([g], { x.name: np.array([[1., 1.]]), m.name: np.array([[0.1, 0.1]]) }) # Smoke test self.assertAllClose(res[0], [[0.509682, 0.509682]]) def testSRUCellKerasRNN(self): """Tests that SRUCell works with keras RNN layer.""" cell = contrib_rnn_cell.SRUCell(10) seq_input = ops.convert_to_tensor( np.random.rand(2, 3, 5), name="seq_input", dtype=dtypes.float32) rnn_layer = keras_layers.RNN(cell=cell) rnn_outputs_keras = rnn_layer(seq_input) with self.cached_session() as sess: sess.run([variables.global_variables_initializer()]) self.assertEqual(sess.run(rnn_outputs_keras).shape, (2, 10)) def testSRUCellBiasType(self): """Tests that the bias' dtype is properly set.""" cell = contrib_rnn_cell.SRUCell(10) cell.build((2, 3, 5)) self.assertEqual(cell._bias.dtype, dtypes.float32_ref) cell = contrib_rnn_cell.SRUCell(10, dtype=dtypes.int32) cell.build((2, 3, 5)) self.assertEqual(cell._bias.dtype, dtypes.int32_ref) cell_input = ops.convert_to_tensor( np.random.rand(2, 5), name="cell_input", dtype=dtypes.float16) cell_state = ops.convert_to_tensor( np.random.rand(2, 10), name="cell_state", dtype=dtypes.float16) cell = contrib_rnn_cell.SRUCell(10) cell(cell_input, [cell_state]) self.assertEqual(cell._bias.dtype, dtypes.float16_ref) def testSRUCellWithDiffSize(self): with self.cached_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 3]) m = array_ops.zeros([1, 2]) g, _ = contrib_rnn_cell.SRUCell(2)(x, m) sess.run([variables.global_variables_initializer()]) res = sess.run([g], { x.name: np.array([[1., 1., 1.]]), m.name: np.array([[0.1, 0.1]]) }) # Smoke test self.assertAllClose(res[0], [[0.55255556, 0.55255556]]) def testCoupledInputForgetGateLSTMCell(self): with self.cached_session() as sess: num_units = 2 state_size = num_units * 2 batch_size = 3 input_size = 4 expected_output = np.array( [[0.121753, 0.121753], [0.103349, 0.103349], [0.100178, 0.100178]], dtype=np.float32) expected_state = np.array( [[0.137523, 0.137523, 0.121753, 0.121753], [ 0.105450, 0.105450, 0.103349, 0.103349 ], [0.100742, 0.100742, 0.100178, 0.100178]], dtype=np.float32) with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([batch_size, input_size]) m = array_ops.zeros([batch_size, state_size]) output, state = contrib_rnn_cell.CoupledInputForgetGateLSTMCell( num_units=num_units, forget_bias=1.0, state_is_tuple=False)(x, m) sess.run([variables.global_variables_initializer()]) res = sess.run( [output, state], { x.name: np.array([[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]]), m.name: 0.1 * np.ones((batch_size, state_size)) }) # This is a smoke test: Only making sure expected values didn't change. self.assertEqual(len(res), 2) self.assertAllClose(res[0], expected_output) self.assertAllClose(res[1], expected_state) def testTimeFreqLSTMCell(self): with self.cached_session() as sess: num_units = 8 state_size = num_units * 2 batch_size = 3 input_size = 4 feature_size = 2 frequency_skip = 1 num_shifts = (input_size - feature_size) // frequency_skip + 1 with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([batch_size, input_size]) m = array_ops.zeros([batch_size, state_size * num_shifts]) output, state = contrib_rnn_cell.TimeFreqLSTMCell( num_units=num_units, feature_size=feature_size, frequency_skip=frequency_skip, forget_bias=1.0)(x, m) sess.run([variables.global_variables_initializer()]) res = sess.run( [output, state], { x.name: np.array([[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]]), m.name: 0.1 * np.ones((batch_size, int(state_size * (num_shifts)))) }) self.assertEqual(len(res), 2) # The numbers in results were not calculated, this is mostly just a # smoke test. self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts)) self.assertEqual(res[1].shape, (batch_size, state_size * num_shifts)) # Different inputs so different outputs and states for i in range(1, batch_size): self.assertTrue( float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6) self.assertTrue( float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6) def testGridLSTMCell(self): with self.cached_session() as sess: num_units = 8 batch_size = 3 input_size = 4 feature_size = 2 frequency_skip = 1 num_shifts = int((input_size - feature_size) / frequency_skip + 1) with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): cell = contrib_rnn_cell.GridLSTMCell( num_units=num_units, feature_size=feature_size, frequency_skip=frequency_skip, forget_bias=1.0, num_frequency_blocks=[num_shifts], couple_input_forget_gates=True, state_is_tuple=True) inputs = constant_op.constant( np.array( [[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]], dtype=np.float32), dtype=dtypes.float32) state_value = constant_op.constant( 0.1 * np.ones((batch_size, num_units), dtype=np.float32), dtype=dtypes.float32) init_state = cell.state_tuple_type(*( [state_value, state_value] * num_shifts)) output, state = cell(inputs, init_state) sess.run([variables.global_variables_initializer()]) res = sess.run([output, state]) self.assertEqual(len(res), 2) # The numbers in results were not calculated, this is mostly just a # smoke test. self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 2)) for ss in res[1]: self.assertEqual(ss.shape, (batch_size, num_units)) # Different inputs so different outputs and states for i in range(1, batch_size): self.assertTrue( float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6) self.assertTrue( float( np.linalg.norm((res[1].state_f00_b00_c[0, :] - res[1] .state_f00_b00_c[i, :]))) > 1e-6) def testGridLSTMCellWithFrequencyBlocks(self): with self.cached_session() as sess: num_units = 8 batch_size = 3 feature_size = 2 frequency_skip = 1 num_frequency_blocks = [1, 1] total_blocks = num_frequency_blocks[0] + num_frequency_blocks[1] start_freqindex_list = [0, 2] end_freqindex_list = [2, 4] with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): cell = contrib_rnn_cell.GridLSTMCell( num_units=num_units, feature_size=feature_size, frequency_skip=frequency_skip, forget_bias=1.0, num_frequency_blocks=num_frequency_blocks, start_freqindex_list=start_freqindex_list, end_freqindex_list=end_freqindex_list, couple_input_forget_gates=True, state_is_tuple=True) inputs = constant_op.constant( np.array( [[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]], dtype=np.float32), dtype=dtypes.float32) state_value = constant_op.constant( 0.1 * np.ones((batch_size, num_units), dtype=np.float32), dtype=dtypes.float32) init_state = cell.state_tuple_type(*( [state_value, state_value] * total_blocks)) output, state = cell(inputs, init_state) sess.run([variables.global_variables_initializer()]) res = sess.run([output, state]) self.assertEqual(len(res), 2) # The numbers in results were not calculated, this is mostly just a # smoke test. self.assertEqual(res[0].shape, (batch_size, num_units * total_blocks * 2)) for ss in res[1]: self.assertEqual(ss.shape, (batch_size, num_units)) # Different inputs so different outputs and states for i in range(1, batch_size): self.assertTrue( float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6) self.assertTrue( float( np.linalg.norm((res[1].state_f00_b00_c[0, :] - res[1] .state_f00_b00_c[i, :]))) > 1e-6) def testGridLstmCellWithCoupledInputForgetGates(self): num_units = 2 batch_size = 3 input_size = 4 feature_size = 2 frequency_skip = 1 num_shifts = int((input_size - feature_size) / frequency_skip + 1) expected_output = np.array( [[ 0.416383, 0.416383, 0.403238, 0.403238, 0.524020, 0.524020, 0.565425, 0.565425, 0.557865, 0.557865, 0.609699, 0.609699 ], [ 0.627331, 0.627331, 0.622393, 0.622393, 0.688342, 0.688342, 0.708078, 0.708078, 0.694245, 0.694245, 0.715171, 0.715171 ], [ 0.711050, 0.711050, 0.709197, 0.709197, 0.736533, 0.736533, 0.744264, 0.744264, 0.737390, 0.737390, 0.745250, 0.745250 ]], dtype=np.float32) expected_state = np.array( [[ 0.625556, 0.625556, 0.416383, 0.416383, 0.759134, 0.759134, 0.524020, 0.524020, 0.798795, 0.798795, 0.557865, 0.557865 ], [ 0.875488, 0.875488, 0.627331, 0.627331, 0.936432, 0.936432, 0.688342, 0.688342, 0.941961, 0.941961, 0.694245, 0.694245 ], [ 0.957327, 0.957327, 0.711050, 0.711050, 0.979522, 0.979522, 0.736533, 0.736533, 0.980245, 0.980245, 0.737390, 0.737390 ]], dtype=np.float32) for state_is_tuple in [False, True]: with self.cached_session() as sess: with variable_scope.variable_scope( "state_is_tuple" + str(state_is_tuple), initializer=init_ops.constant_initializer(0.5)): cell = contrib_rnn_cell.GridLSTMCell( num_units=num_units, feature_size=feature_size, frequency_skip=frequency_skip, forget_bias=1.0, num_frequency_blocks=[num_shifts], couple_input_forget_gates=True, state_is_tuple=state_is_tuple) inputs = constant_op.constant( np.array( [[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]], dtype=np.float32), dtype=dtypes.float32) if state_is_tuple: state_value = constant_op.constant( 0.1 * np.ones((batch_size, num_units), dtype=np.float32), dtype=dtypes.float32) init_state = cell.state_tuple_type(*( [state_value, state_value] * num_shifts)) else: init_state = constant_op.constant( 0.1 * np.ones( (batch_size, num_units * num_shifts * 2), dtype=np.float32), dtype=dtypes.float32) output, state = cell(inputs, init_state) sess.run([variables.global_variables_initializer()]) res = sess.run([output, state]) # This is a smoke test: Only making sure expected values not change. self.assertEqual(len(res), 2) self.assertAllClose(res[0], expected_output) if not state_is_tuple: self.assertAllClose(res[1], expected_state) else: # There should be num_shifts * 2 states in the tuple. self.assertEqual(len(res[1]), num_shifts * 2) # Checking the shape of each state to be batch_size * num_units for ss in res[1]: self.assertEqual(ss.shape[0], batch_size) self.assertEqual(ss.shape[1], num_units) self.assertAllClose(np.concatenate(res[1], axis=1), expected_state) def testBidirectionGridLSTMCell(self): with self.cached_session() as sess: num_units = 2 batch_size = 3 input_size = 4 feature_size = 2 frequency_skip = 1 num_shifts = int((input_size - feature_size) / frequency_skip + 1) expected_output = np.array( [[ 0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283, 0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774, 0.520789, 0.520789, 0.476968, 0.476968, 0.604341, 0.604341, 0.760207, 0.760207, 0.635773, 0.635773, 0.850218, 0.850218 ], [ 0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057, 0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359, 0.692621, 0.692621, 0.652363, 0.652363, 0.737517, 0.737517, 0.899558, 0.899558, 0.745984, 0.745984, 0.946840, 0.946840 ], [ 0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357, 0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604, 0.759940, 0.759940, 0.720652, 0.720652, 0.778552, 0.778552, 0.941606, 0.941606, 0.781035, 0.781035, 0.977731, 0.977731 ]], dtype=np.float32) expected_state = np.array( [[ 0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293, 0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638, 0.785405, 0.785405, 0.520789, 0.520789, 0.890836, 0.890836, 0.604341, 0.604341, 0.928512, 0.928512, 0.635773, 0.635773 ], [ 0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811, 0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559, 0.993088, 0.993088, 0.692621, 0.692621, 1.040288, 1.040288, 0.737517, 0.737517, 1.048773, 1.048773, 0.745984, 0.745984 ], [ 1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919, 0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530, 1.062455, 1.062455, 0.759940, 0.759940, 1.080101, 1.080101, 0.778552, 0.778552, 1.082402, 1.082402, 0.781035, 0.781035 ]], dtype=np.float32) with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): cell = contrib_rnn_cell.BidirectionalGridLSTMCell( num_units=num_units, feature_size=feature_size, share_time_frequency_weights=True, frequency_skip=frequency_skip, forget_bias=1.0, num_frequency_blocks=[num_shifts]) inputs = constant_op.constant( np.array( [[1.0, 1.1, 1.2, 1.3], [2.0, 2.1, 2.2, 2.3], [3.0, 3.1, 3.2, 3.3]], dtype=np.float32), dtype=dtypes.float32) state_value = constant_op.constant( 0.1 * np.ones((batch_size, num_units), dtype=np.float32), dtype=dtypes.float32) init_state = cell.state_tuple_type(*( [state_value, state_value] * num_shifts * 2)) output, state = cell(inputs, init_state) sess.run([variables.global_variables_initializer()]) res = sess.run([output, state]) self.assertEqual(len(res), 2) # The numbers in results were not calculated, this is mostly just a # smoke test. self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 4)) self.assertAllClose(res[0], expected_output) # There should be num_shifts * 4 states in the tuple. self.assertEqual(len(res[1]), num_shifts * 4) # Checking the shape of each state to be batch_size * num_units for ss in res[1]: self.assertEqual(ss.shape[0], batch_size) self.assertEqual(ss.shape[1], num_units) self.assertAllClose(np.concatenate(res[1], axis=1), expected_state) def testBidirectionGridLSTMCellWithSliceOffset(self): with self.cached_session() as sess: num_units = 2 batch_size = 3 input_size = 4 feature_size = 2 frequency_skip = 1 num_shifts = int((input_size - feature_size) / frequency_skip + 1) expected_output = np.array( [[ 0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283, 0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774, 0.322645, 0.322645, 0.276068, 0.276068, 0.584654, 0.584654, 0.690292, 0.690292, 0.640446, 0.640446, 0.840071, 0.840071 ], [ 0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057, 0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359, 0.493625, 0.493625, 0.449236, 0.449236, 0.730828, 0.730828, 0.865996, 0.865996, 0.749429, 0.749429, 0.944958, 0.944958 ], [ 0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357, 0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604, 0.608587, 0.608587, 0.566683, 0.566683, 0.777345, 0.777345, 0.925820, 0.925820, 0.782597, 0.782597, 0.976858, 0.976858 ]], dtype=np.float32) expected_state = np.array( [[ 0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293, 0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638, 0.516575, 0.516575, 0.322645, 0.322645, 0.866628, 0.866628, 0.584654, 0.584654, 0.934002, 0.934002, 0.640446, 0.640446 ], [ 0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811, 0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559, 0.749836, 0.749836, 0.493625, 0.493625, 1.033488, 1.033488, 0.730828, 0.730828, 1.052186, 1.052186, 0.749429, 0.749429 ], [ 1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919, 0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530, 0.895999, 0.895999, 0.608587, 0.608587, 1.078978, 1.078978, 0.777345, 0.777345, 1.083843, 1.083843, 0.782597, 0.782597 ]], dtype=np.float32) with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): cell = contrib_rnn_cell.BidirectionalGridLSTMCell( num_units=num_units, feature_size=feature_size, share_time_frequency_weights=True, frequency_skip=frequency_skip, forget_bias=1.0, num_frequency_blocks=[num_shifts], backward_slice_offset=1) inputs = constant_op.constant( np.array( [[1.0, 1.1, 1.2, 1.3], [2.0, 2.1, 2.2, 2.3], [3.0, 3.1, 3.2, 3.3]], dtype=np.float32), dtype=dtypes.float32) state_value = constant_op.constant( 0.1 * np.ones((batch_size, num_units), dtype=np.float32), dtype=dtypes.float32) init_state = cell.state_tuple_type(*( [state_value, state_value] * num_shifts * 2)) output, state = cell(inputs, init_state) sess.run([variables.global_variables_initializer()]) res = sess.run([output, state]) self.assertEqual(len(res), 2) # The numbers in results were not calculated, this is mostly just a # smoke test. self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 4)) self.assertAllClose(res[0], expected_output) # There should be num_shifts * 4 states in the tuple. self.assertEqual(len(res[1]), num_shifts * 4) # Checking the shape of each state to be batch_size * num_units for ss in res[1]: self.assertEqual(ss.shape[0], batch_size) self.assertEqual(ss.shape[1], num_units) self.assertAllClose(np.concatenate(res[1], axis=1), expected_state) def testAttentionCellWrapperFailures(self): with self.assertRaisesRegexp( TypeError, rnn_cell_impl.ASSERT_LIKE_RNNCELL_ERROR_REGEXP): contrib_rnn_cell.AttentionCellWrapper(None, 0) num_units = 8 for state_is_tuple in [False, True]: with ops.Graph().as_default(): lstm_cell = rnn_cell.BasicLSTMCell( num_units, state_is_tuple=state_is_tuple) with self.assertRaisesRegexp( ValueError, "attn_length should be greater than zero, got 0"): contrib_rnn_cell.AttentionCellWrapper( lstm_cell, 0, state_is_tuple=state_is_tuple) with self.assertRaisesRegexp( ValueError, "attn_length should be greater than zero, got -1"): contrib_rnn_cell.AttentionCellWrapper( lstm_cell, -1, state_is_tuple=state_is_tuple) with ops.Graph().as_default(): lstm_cell = rnn_cell.BasicLSTMCell(num_units, state_is_tuple=True) with self.assertRaisesRegexp( ValueError, "Cell returns tuple of states, but the flag " "state_is_tuple is not set. State size is: *"): contrib_rnn_cell.AttentionCellWrapper( lstm_cell, 4, state_is_tuple=False) def testAttentionCellWrapperZeros(self): num_units = 8 attn_length = 16 batch_size = 3 input_size = 4 for state_is_tuple in [False, True]: with ops.Graph().as_default(): with self.cached_session() as sess: with variable_scope.variable_scope( "state_is_tuple_" + str(state_is_tuple)): lstm_cell = rnn_cell.BasicLSTMCell( num_units, state_is_tuple=state_is_tuple) cell = contrib_rnn_cell.AttentionCellWrapper( lstm_cell, attn_length, state_is_tuple=state_is_tuple) if state_is_tuple: zeros = array_ops.zeros([batch_size, num_units], dtype=np.float32) attn_state_zeros = array_ops.zeros( [batch_size, attn_length * num_units], dtype=np.float32) zero_state = ((zeros, zeros), zeros, attn_state_zeros) else: zero_state = array_ops.zeros( [ batch_size, num_units * 2 + attn_length * num_units + num_units ], dtype=np.float32) inputs = array_ops.zeros( [batch_size, input_size], dtype=dtypes.float32) output, state = cell(inputs, zero_state) self.assertEquals(output.get_shape(), [batch_size, num_units]) if state_is_tuple: self.assertEquals(len(state), 3) self.assertEquals(len(state[0]), 2) self.assertEquals(state[0][0].get_shape(), [batch_size, num_units]) self.assertEquals(state[0][1].get_shape(), [batch_size, num_units]) self.assertEquals(state[1].get_shape(), [batch_size, num_units]) self.assertEquals(state[2].get_shape(), [batch_size, attn_length * num_units]) tensors = [output] + list(state) else: self.assertEquals(state.get_shape(), [ batch_size, num_units * 2 + num_units + attn_length * num_units ]) tensors = [output, state] zero_result = sum( [math_ops.reduce_sum(math_ops.abs(x)) for x in tensors]) sess.run(variables.global_variables_initializer()) self.assertTrue(sess.run(zero_result) < 1e-6) def testAttentionCellWrapperValues(self): num_units = 8 attn_length = 16 batch_size = 3 for state_is_tuple in [False, True]: with ops.Graph().as_default(): with self.cached_session() as sess: with variable_scope.variable_scope( "state_is_tuple_" + str(state_is_tuple)): lstm_cell = rnn_cell.BasicLSTMCell( num_units, state_is_tuple=state_is_tuple) cell = contrib_rnn_cell.AttentionCellWrapper( lstm_cell, attn_length, state_is_tuple=state_is_tuple) if state_is_tuple: zeros = constant_op.constant( 0.1 * np.ones([batch_size, num_units], dtype=np.float32), dtype=dtypes.float32) attn_state_zeros = constant_op.constant( 0.1 * np.ones( [batch_size, attn_length * num_units], dtype=np.float32), dtype=dtypes.float32) zero_state = ((zeros, zeros), zeros, attn_state_zeros) else: zero_state = constant_op.constant( 0.1 * np.ones( [ batch_size, num_units * 2 + num_units + attn_length * num_units ], dtype=np.float32), dtype=dtypes.float32) inputs = constant_op.constant( np.array( [[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]], dtype=np.float32), dtype=dtypes.float32) output, state = cell(inputs, zero_state) if state_is_tuple: concat_state = array_ops.concat( [state[0][0], state[0][1], state[1], state[2]], 1) else: concat_state = state sess.run(variables.global_variables_initializer()) output, state = sess.run([output, concat_state]) # Different inputs so different outputs and states for i in range(1, batch_size): self.assertTrue( float(np.linalg.norm((output[0, :] - output[i, :]))) > 1e-6) self.assertTrue( float(np.linalg.norm((state[0, :] - state[i, :]))) > 1e-6) def _testAttentionCellWrapperCorrectResult(self): num_units = 4 attn_length = 6 batch_size = 2 expected_output = np.array( [[1.068372, 0.45496, -0.678277, 0.340538], [1.018088, 0.378983, -0.572179, 0.268591]], dtype=np.float32) expected_state = np.array( [[ 0.74946702, 0.34681597, 0.26474735, 1.06485605, 0.38465962, 0.11420801, 0.10272158, 0.30925757, 0.63899988, 0.7181077, 0.47534478, 0.33715725, 0.58086717, 0.49446869, 0.7641536, 0.12814975, 0.92231739, 0.89857256, 0.21889746, 0.38442063, 0.53481543, 0.8876909, 0.45823169, 0.5905602, 0.78038228, 0.56501579, 0.03971386, 0.09870267, 0.8074435, 0.66821432, 0.99211812, 0.12295902, 1.14606023, 0.34370938, -0.79251152, 0.51843399 ], [ 0.5179342, 0.48682183, -0.25426468, 0.96810579, 0.28809637, 0.13607743, -0.11446252, 0.26792109, 0.78047138, 0.63460857, 0.49122369, 0.52007174, 0.73000264, 0.66986895, 0.73576689, 0.86301267, 0.87887371, 0.35185754, 0.93417215, 0.64732957, 0.63173044, 0.66627824, 0.53644657, 0.20477486, 0.98458421, 0.38277245, 0.03746676, 0.92510188, 0.57714164, 0.84932971, 0.36127412, 0.12125921, 1.1362772, 0.34361625, -0.78150457, 0.70582712 ]], dtype=np.float32) seed = 12345 random_seed.set_random_seed(seed) rnn_scope = None for state_is_tuple in [False, True]: with session.Session() as sess: with variable_scope.variable_scope( "state_is_tuple", reuse=state_is_tuple, initializer=init_ops.glorot_uniform_initializer()): lstm_cell = rnn_cell.BasicLSTMCell( num_units, state_is_tuple=state_is_tuple) cell = contrib_rnn_cell.AttentionCellWrapper( lstm_cell, attn_length, state_is_tuple=state_is_tuple) # This is legacy behavior to preserve the test. Weight # sharing no longer works by creating a new RNNCell in the # same variable scope; so here we restore the scope of the # RNNCells after the first use below. if rnn_scope is not None: (cell._scope, lstm_cell._scope) = rnn_scope # pylint: disable=protected-access,unpacking-non-sequence zeros1 = random_ops.random_uniform( (batch_size, num_units), 0.0, 1.0, seed=seed + 1) zeros2 = random_ops.random_uniform( (batch_size, num_units), 0.0, 1.0, seed=seed + 2) zeros3 = random_ops.random_uniform( (batch_size, num_units), 0.0, 1.0, seed=seed + 3) attn_state_zeros = random_ops.random_uniform( (batch_size, attn_length * num_units), 0.0, 1.0, seed=seed + 4) zero_state = ((zeros1, zeros2), zeros3, attn_state_zeros) if not state_is_tuple: zero_state = array_ops.concat([ zero_state[0][0], zero_state[0][1], zero_state[1], zero_state[2] ], 1) inputs = random_ops.random_uniform( (batch_size, num_units), 0.0, 1.0, seed=seed + 5) output, state = cell(inputs, zero_state) # This is legacy behavior to preserve the test. Weight # sharing no longer works by creating a new RNNCell in the # same variable scope; so here we store the scope of the # first RNNCell for reuse above. if rnn_scope is None: rnn_scope = (cell._scope, lstm_cell._scope) # pylint: disable=protected-access if state_is_tuple: state = array_ops.concat( [state[0][0], state[0][1], state[1], state[2]], 1) sess.run(variables.global_variables_initializer()) self.assertAllClose(sess.run(output), expected_output) self.assertAllClose(sess.run(state), expected_state) def testNASCell(self): num_units = 6 batch_size = 3 expected_output = np.array( [[0.576751, 0.576751, 0.576751, 0.576751, 0.576751, 0.576751], [0.618936, 0.618936, 0.618936, 0.618936, 0.618936, 0.618936], [0.627393, 0.627393, 0.627393, 0.627393, 0.627393, 0.627393]]) expected_state = np.array([[ 0.71579772, 0.71579772, 0.71579772, 0.71579772, 0.71579772, 0.71579772, 0.57675087, 0.57675087, 0.57675087, 0.57675087, 0.57675087, 0.57675087 ], [ 0.78041625, 0.78041625, 0.78041625, 0.78041625, 0.78041625, 0.78041625, 0.6189357, 0.6189357, 0.61893570, 0.6189357, 0.6189357, 0.6189357 ], [ 0.79457647, 0.79457647, 0.79457647, 0.79457647, 0.79457653, 0.79457653, 0.62739348, 0.62739348, 0.62739348, 0.62739348, 0.62739348, 0.62739348 ]]) with self.cached_session() as sess: with variable_scope.variable_scope( "nas_test", initializer=init_ops.constant_initializer(0.5)): cell = contrib_rnn_cell.NASCell(num_units=num_units) inputs = constant_op.constant( np.array( [[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]], dtype=np.float32), dtype=dtypes.float32) state_value = constant_op.constant( 0.1 * np.ones((batch_size, num_units), dtype=np.float32), dtype=dtypes.float32) init_state = rnn_cell.LSTMStateTuple(state_value, state_value) output, state = cell(inputs, init_state) sess.run([variables.global_variables_initializer()]) res = sess.run([output, state]) # This is a smoke test: Only making sure expected values not change. self.assertEqual(len(res), 2) self.assertAllClose(res[0], expected_output) # There should be 2 states in the tuple. self.assertEqual(len(res[1]), 2) # Checking the shape of each state to be batch_size * num_units new_c, new_h = res[1] self.assertEqual(new_c.shape[0], batch_size) self.assertEqual(new_c.shape[1], num_units) self.assertEqual(new_h.shape[0], batch_size) self.assertEqual(new_h.shape[1], num_units) self.assertAllClose(np.concatenate(res[1], axis=1), expected_state) def testNASCellProj(self): num_units = 6 batch_size = 3 num_proj = 5 expected_output = np.array( [[1.697418, 1.697418, 1.697418, 1.697418, 1.697418], [1.840037, 1.840037, 1.840037, 1.840037, 1.840037], [1.873985, 1.873985, 1.873985, 1.873985, 1.873985]]) expected_state = np.array([[ 0.69855207, 0.69855207, 0.69855207, 0.69855207, 0.69855207, 0.69855207, 1.69741797, 1.69741797, 1.69741797, 1.69741797, 1.69741797 ], [ 0.77073824, 0.77073824, 0.77073824, 0.77073824, 0.77073824, 0.77073824, 1.84003687, 1.84003687, 1.84003687, 1.84003687, 1.84003687 ], [ 0.78973997, 0.78973997, 0.78973997, 0.78973997, 0.78973997, 0.78973997, 1.87398517, 1.87398517, 1.87398517, 1.87398517, 1.87398517 ]]) with self.cached_session() as sess: with variable_scope.variable_scope( "nas_proj_test", initializer=init_ops.constant_initializer(0.5)): cell = contrib_rnn_cell.NASCell(num_units=num_units, num_proj=num_proj) inputs = constant_op.constant( np.array( [[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]], dtype=np.float32), dtype=dtypes.float32) state_value_c = constant_op.constant( 0.1 * np.ones((batch_size, num_units), dtype=np.float32), dtype=dtypes.float32) state_value_h = constant_op.constant( 0.1 * np.ones((batch_size, num_proj), dtype=np.float32), dtype=dtypes.float32) init_state = rnn_cell.LSTMStateTuple(state_value_c, state_value_h) output, state = cell(inputs, init_state) sess.run([variables.global_variables_initializer()]) res = sess.run([output, state]) # This is a smoke test: Only making sure expected values not change. self.assertEqual(len(res), 2) self.assertAllClose(res[0], expected_output) # There should be 2 states in the tuple. self.assertEqual(len(res[1]), 2) # Checking the shape of each state to be batch_size * num_units new_c, new_h = res[1] self.assertEqual(new_c.shape[0], batch_size) self.assertEqual(new_c.shape[1], num_units) self.assertEqual(new_h.shape[0], batch_size) self.assertEqual(new_h.shape[1], num_proj) self.assertAllClose(np.concatenate(res[1], axis=1), expected_state) @test_util.run_in_graph_and_eager_modes def testNASCellKerasRNN(self): """Tests that NASCell works with keras RNN layer.""" cell = contrib_rnn_cell.NASCell(10) seq_input = ops.convert_to_tensor( np.random.rand(2, 3, 5), name="seq_input", dtype=dtypes.float32) rnn_layer = keras_layers.RNN(cell=cell) rnn_outputs = rnn_layer(seq_input) self.evaluate([variables.global_variables_initializer()]) self.assertEqual(self.evaluate(rnn_outputs).shape, (2, 10)) def testUGRNNCell(self): num_units = 2 batch_size = 3 expected_state_and_output = np.array( [[0.13752282, 0.13752282], [0.10545051, 0.10545051], [0.10074195, 0.10074195]], dtype=np.float32) with self.cached_session() as sess: with variable_scope.variable_scope( "ugrnn_cell_test", initializer=init_ops.constant_initializer(0.5)): cell = contrib_rnn_cell.UGRNNCell(num_units=num_units) inputs = constant_op.constant( np.array( [[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]], dtype=np.float32), dtype=dtypes.float32) init_state = constant_op.constant( 0.1 * np.ones((batch_size, num_units), dtype=np.float32), dtype=dtypes.float32) output, state = cell(inputs, init_state) sess.run([variables.global_variables_initializer()]) res = sess.run([output, state]) # This is a smoke test: Only making sure expected values didn't change. self.assertEqual(len(res), 2) self.assertAllClose(res[0], expected_state_and_output) self.assertAllClose(res[1], expected_state_and_output) def testIntersectionRNNCell(self): num_units = 2 batch_size = 3 expected_state = np.array( [[0.13752282, 0.13752282], [0.10545051, 0.10545051], [0.10074195, 0.10074195]], dtype=np.float32) expected_output = np.array( [[2.00431061, 2.00431061], [4.00060606, 4.00060606], [6.00008249, 6.00008249]], dtype=np.float32) with self.cached_session() as sess: with variable_scope.variable_scope( "intersection_rnn_cell_test", initializer=init_ops.constant_initializer(0.5)): cell = contrib_rnn_cell.IntersectionRNNCell( num_units=num_units, num_in_proj=num_units) inputs = constant_op.constant( np.array( [[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]], dtype=np.float32), dtype=dtypes.float32) init_state = constant_op.constant( 0.1 * np.ones((batch_size, num_units), dtype=np.float32), dtype=dtypes.float32) output, state = cell(inputs, init_state) sess.run([variables.global_variables_initializer()]) res = sess.run([output, state]) # This is a smoke test: Only making sure expected values didn't change. self.assertEqual(len(res), 2) self.assertAllClose(res[0], expected_output) self.assertAllClose(res[1], expected_state) def testIntersectionRNNCellFailure(self): num_units = 2 batch_size = 3 cell = contrib_rnn_cell.IntersectionRNNCell(num_units=num_units) inputs = constant_op.constant( np.array( [[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]], dtype=np.float32), dtype=dtypes.float32) init_state = constant_op.constant( 0.1 * np.ones((batch_size, num_units), dtype=np.float32), dtype=dtypes.float32) with self.assertRaisesRegexp(ValueError, "Must have input size == output size for " "Intersection RNN. To fix, num_in_proj should " "be set to num_units at cell init."): cell(inputs, init_state) def testPhasedLSTMCell(self): with self.cached_session() as sess: num_units = 2 batch_size = 3 input_size = 4 expected_state_c = np.array( [[0.00072015, 0.00036633], [0.00083481, 0.00047266], [0.00085111, 0.00053054]], dtype=np.float32) expected_state_h = np.array( [[0.0005159, 0.00026243], [0.00062958, 0.00035646], [0.00064732, 0.00040351]], dtype=np.float32) with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): t = array_ops.zeros([batch_size, 1], dtype=dtypes.float64) x = array_ops.zeros([batch_size, input_size]) c0 = array_ops.zeros([batch_size, 2]) h0 = array_ops.zeros([batch_size, 2]) state0 = rnn_cell.LSTMStateTuple(c0, h0) output, state = contrib_rnn_cell.PhasedLSTMCell(num_units=num_units)( (t, x), state0) sess.run([variables.global_variables_initializer()]) res = sess.run( [output, state], { t.name: np.array([[1.], [2.], [3.]]), x.name: np.array([[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]]), }) # This is a smoke test, making sure expected values are unchanged. self.assertEqual(len(res), 2) self.assertAllClose(res[0], res[1].h) self.assertAllClose(res[1].c, expected_state_c) self.assertAllClose(res[1].h, expected_state_h) def testConv1DLSTMCell(self): with self.cached_session() as sess: shape = [2, 1] filter_size = [3] num_features = 1 expected_state_c = np.array( [[[1.4375670191], [1.4375670191]], [[2.7542609292], [2.7542609292]]], dtype=np.float32) expected_state_h = np.array( [[[0.6529865603], [0.6529865603]], [[0.8736877431], [0.8736877431]]], dtype=np.float32) with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(1.0 / 2.0)): x = array_ops.placeholder(dtypes.float32, [None, None, 1]) cell = contrib_rnn_cell.Conv1DLSTMCell( input_shape=shape, kernel_shape=filter_size, output_channels=num_features) hidden = cell.zero_state(array_ops.shape(x)[0], dtypes.float32) output, state = cell(x, hidden) sess.run([variables.global_variables_initializer()]) res = sess.run( [output, state], { hidden[0].name: np.array([[[1.], [1.]], [[2.], [2.]]]), x.name: np.array([[[1.], [1.]], [[2.], [2.]]]), }) # This is a smoke test, making sure expected values are unchanged. self.assertEqual(len(res), 2) self.assertAllClose(res[0], res[1].h) self.assertAllClose(res[1].c, expected_state_c) self.assertAllClose(res[1].h, expected_state_h) def testConv2DLSTMCell(self): with self.cached_session() as sess: shape = [2, 2, 1] filter_size = [3, 3] num_features = 1 expected_state_c = np.array( [[[[1.4375670191], [1.4375670191]], [[1.4375670191], [1.4375670191]]], [[[2.7542609292], [2.7542609292]], [[2.7542609292], [2.7542609292]] ]], dtype=np.float32) expected_state_h = np.array( [[[[0.6529865603], [0.6529865603]], [[0.6529865603], [0.6529865603]]], [[[0.8736877431], [0.8736877431]], [[0.8736877431], [0.8736877431]] ]], dtype=np.float32) with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(1.0 / 4.0)): x = array_ops.placeholder(dtypes.float32, [None, None, None, 1]) cell = contrib_rnn_cell.Conv2DLSTMCell( input_shape=shape, kernel_shape=filter_size, output_channels=num_features) hidden = cell.zero_state(array_ops.shape(x)[0], dtypes.float32) output, state = cell(x, hidden) sess.run([variables.global_variables_initializer()]) res = sess.run( [output, state], { hidden[0].name: np.array([[[[1.], [1.]], [[1.], [1.]]], [[[2.], [2.]], [[2.], [2.]]]]), x.name: np.array([[[[1.], [1.]], [[1.], [1.]]], [[[2.], [2.]], [[2.], [2.]]]]), }) # This is a smoke test, making sure expected values are unchanged. self.assertEqual(len(res), 2) self.assertAllClose(res[0], res[1].h) self.assertAllClose(res[1].c, expected_state_c) self.assertAllClose(res[1].h, expected_state_h) def testConv3DLSTMCell(self): with self.cached_session() as sess: shape = [2, 2, 2, 1] filter_size = [3, 3, 3] num_features = 1 expected_state_c = np.array( [[[[[1.4375670191], [1.4375670191]], [[1.4375670191], [1.4375670191]] ], [[[1.4375670191], [1.4375670191]], [[1.4375670191], [1.4375670191]]]], [[[[2.7542609292], [2.7542609292]], [[2.7542609292], [2.7542609292]] ], [[[2.7542609292], [2.7542609292]], [[2.7542609292], [2.7542609292]]]]], dtype=np.float32) expected_state_h = np.array( [[[[[0.6529865603], [0.6529865603]], [[0.6529865603], [0.6529865603]] ], [[[0.6529865603], [0.6529865603]], [[0.6529865603], [0.6529865603]]]], [[[[0.8736877431], [0.8736877431]], [[0.8736877431], [0.8736877431]] ], [[[0.8736877431], [0.8736877431]], [[0.8736877431], [0.8736877431]]]]], dtype=np.float32) with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(1.0 / 8.0)): x = array_ops.placeholder(dtypes.float32, [None, None, None, None, 1]) cell = contrib_rnn_cell.Conv3DLSTMCell( input_shape=shape, kernel_shape=filter_size, output_channels=num_features) hidden = cell.zero_state(array_ops.shape(x)[0], dtypes.float32) output, state = cell(x, hidden) sess.run([variables.global_variables_initializer()]) res = sess.run( [output, state], { hidden[0].name: np.array([[[[[1.], [1.]], [[1.], [1.]]], [[[1.], [1.]], [[ 1. ], [1.]]]], [[[[2.], [2.]], [[2.], [2.]]], [[[2.], [2.]], [[2.], [2.]]]]]), x.name: np.array([[[[[1.], [1.]], [[1.], [1.]]], [[[1.], [1.]], [[ 1. ], [1.]]]], [[[[2.], [2.]], [[2.], [2.]]], [[[2.], [2.]], [[2.], [2.]]]]]) }) # This is a smoke test, making sure expected values are unchanged. self.assertEqual(len(res), 2) self.assertAllClose(res[0], res[1].h) self.assertAllClose(res[1].c, expected_state_c) self.assertAllClose(res[1].h, expected_state_h) def testHighwayWrapper(self): with self.cached_session() as sess: with variable_scope.variable_scope( "base_cell", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 3]) m = array_ops.zeros([1, 3]) base_cell = rnn_cell.GRUCell(3) g, m_new = base_cell(x, m) with variable_scope.variable_scope( "hw_cell", initializer=init_ops.constant_initializer(0.5)): hw_cell = contrib_rnn_cell.HighwayWrapper( rnn_cell.GRUCell(3), carry_bias_init=-100.0) g_res, m_new_res = hw_cell(x, m) sess.run([variables.global_variables_initializer()]) res = sess.run([g, g_res, m_new, m_new_res], { x: np.array([[1., 1., 1.]]), m: np.array([[0.1, 0.1, 0.1]]) }) # As carry_bias_init is very negative, the carry gate is 'open' and the # transform gate is 'closed'. This means the output equals the input. self.assertAllClose(res[1], res[0]) # States are left untouched self.assertAllClose(res[2], res[3]) def testGLSTMCell(self): # Ensure that G-LSTM matches LSTM when number_of_groups = 1 batch_size = 2 num_units = 4 number_of_groups = 1 # Try with input dimension equal to num_units or not. for num_inputs in [num_units, num_units + number_of_groups]: with self.cached_session() as sess: with variable_scope.variable_scope( "root1_%d" % num_inputs, initializer=init_ops.constant_initializer(0.5)): x = array_ops.ones([batch_size, num_inputs]) # When number_of_groups = 1, G-LSTM is equivalent to regular LSTM gcell = contrib_rnn_cell.GLSTMCell( num_units=num_units, number_of_groups=number_of_groups) cell = rnn_cell.LSTMCell(num_units=num_units) self.assertTrue(isinstance(gcell.state_size, tuple)) zero_state = gcell.zero_state( batch_size=batch_size, dtype=dtypes.float32) gh, gs = gcell(x, zero_state) h, g = cell(x, zero_state) sess.run([variables.global_variables_initializer()]) glstm_result = sess.run([gh, gs]) lstm_result = sess.run([h, g]) self.assertAllClose(glstm_result[0], lstm_result[0], 1e-5) self.assertAllClose(glstm_result[1], lstm_result[1], 1e-5) # Test that G-LSTM subgroup act like corresponding sub-LSTMs batch_size = 2 num_units = 4 number_of_groups = 2 # Try with num_inputs equal to or not equal to num_units. for num_inputs in [num_units, num_units + number_of_groups]: with self.cached_session() as sess: with variable_scope.variable_scope( "root2_%d" % num_inputs, initializer=init_ops.constant_initializer(0.5)): # input for G-LSTM with 2 groups glstm_input = array_ops.ones([batch_size, num_inputs]) gcell = contrib_rnn_cell.GLSTMCell( num_units=num_units, number_of_groups=number_of_groups) gcell_zero_state = gcell.zero_state( batch_size=batch_size, dtype=dtypes.float32) gh, gs = gcell(glstm_input, gcell_zero_state) # input for LSTM cell simulating single G-LSTM group lstm_input = array_ops.ones( [batch_size, num_inputs / number_of_groups]) # note division by number_of_groups. This cell one simulates G-LSTM # group cell = rnn_cell.LSTMCell(num_units=int(num_units / number_of_groups)) cell_zero_state = cell.zero_state( batch_size=batch_size, dtype=dtypes.float32) h, g = cell(lstm_input, cell_zero_state) sess.run([variables.global_variables_initializer()]) [gh_res, h_res] = sess.run([gh, h]) self.assertAllClose(gh_res[:, 0:int(num_units / number_of_groups)], h_res, 1e-5) self.assertAllClose(gh_res[:, int(num_units / number_of_groups):], h_res, 1e-5) def testGLSTMCellFailure(self): batch_size = 2 num_units = 4 number_of_groups = 2 with self.cached_session(): with variable_scope.variable_scope( "glstm_failure", initializer=init_ops.constant_initializer(0.5)): gcell = contrib_rnn_cell.GLSTMCell( num_units=num_units, number_of_groups=number_of_groups) gcell_zero_state = gcell.zero_state( batch_size=batch_size, dtype=dtypes.float32) # Try an input with statically-unknown innermost dimension. glstm_input = array_ops.placeholder( dtypes.float32, shape=[batch_size, None]) with self.assertRaisesRegexp(ValueError, "input size must be statically known"): gcell(glstm_input, gcell_zero_state) # Try an input whose innermost dimension isn't divisible into groups. glstm_input = array_ops.placeholder( dtypes.float32, shape=[batch_size, 3]) with self.assertRaisesRegexp( ValueError, r"input size \(3\) must be divisible by number_of_groups \(2\)"): gcell(glstm_input, gcell_zero_state) def testCFNCell(self): with self.cached_session() as sess: with variable_scope.variable_scope("root"): x = array_ops.zeros([1, 2]) m = array_ops.zeros([1, 2]) cell = contrib_rnn_cell.CFNCell( units=2, kernel_initializer=initializers.Constant(0.5)) g, _ = cell(x, m) sess.run([variables.global_variables_initializer()]) res = sess.run([g], { x.name: np.array([[1., 1.]]), m.name: np.array([[0.1, 0.1]]) }) # Smoke test self.assertAllClose(res[0], [[0.17188203, 0.17188203]]) with variable_scope.variable_scope("other"): # Test CFN with input_size != num_units. x = array_ops.zeros([1, 3]) m = array_ops.zeros([1, 2]) cell = contrib_rnn_cell.CFNCell( units=2, kernel_initializer=initializers.Constant(0.5)) g, _ = cell(x, m) sess.run([variables.global_variables_initializer()]) res = sess.run([g], { x.name: np.array([[1., 1., 1.]]), m.name: np.array([[0.1, 0.1]]) }) # Smoke test self.assertAllClose(res[0], [[0.15535763, 0.15535763]]) def testCFNCellEndToEnd(self): with self.cached_session() as sess: input_shape = 10 output_shape = 5 timestep = 4 batch = 100 (x_train, y_train), _ = testing_utils.get_test_data( train_samples=batch, test_samples=0, input_shape=(timestep, input_shape), num_classes=output_shape) y_train = utils.to_categorical(y_train) cell = contrib_rnn_cell.CFNCell(output_shape) inputs = array_ops.placeholder( dtypes.float32, shape=(None, timestep, input_shape)) predict = array_ops.placeholder( dtypes.float32, shape=(None, output_shape)) outputs, state = rnn.dynamic_rnn( cell, inputs, dtype=dtypes.float32) self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape]) self.assertEqual(state.shape.as_list(), [None, output_shape]) loss = losses.softmax_cross_entropy(predict, state) train_op = training.GradientDescentOptimizer(0.001).minimize(loss) sess.run([variables.global_variables_initializer()]) _, outputs, state = sess.run( [train_op, outputs, state], {inputs: x_train, predict: y_train}) self.assertEqual(len(outputs), batch) self.assertEqual(len(state), batch) def testMinimalRNNCell(self): with self.cached_session() as sess: with variable_scope.variable_scope( "root"): x = array_ops.zeros([1, 2]) m = array_ops.zeros([1, 2]) cell = contrib_rnn_cell.MinimalRNNCell( units=2, kernel_initializer=initializers.Constant(0.5)) g, _ = cell(x, m) sess.run([variables.global_variables_initializer()]) res = sess.run([g], { x.name: np.array([[1., 1.]]), m.name: np.array([[0.1, 0.1]]) }) # Smoke test self.assertAllClose(res[0], [[0.18899589, 0.18899589]]) with variable_scope.variable_scope( "other"): # Test MinimalRNN with input_size != num_units. x = array_ops.zeros([1, 3]) m = array_ops.zeros([1, 2]) cell = contrib_rnn_cell.MinimalRNNCell( units=2, kernel_initializer=initializers.Constant(0.5)) g, _ = cell(x, m) sess.run([variables.global_variables_initializer()]) res = sess.run([g], { x.name: np.array([[1., 1., 1.]]), m.name: np.array([[0.1, 0.1]]) }) # Smoke test self.assertAllClose(res[0], [[0.19554167, 0.19554167]]) def testMinimalRNNCellEndToEnd(self): with self.cached_session() as sess: input_shape = 10 output_shape = 5 timestep = 4 batch = 100 (x_train, y_train), _ = testing_utils.get_test_data( train_samples=batch, test_samples=0, input_shape=(timestep, input_shape), num_classes=output_shape) y_train = utils.to_categorical(y_train) cell = contrib_rnn_cell.MinimalRNNCell(output_shape) inputs = array_ops.placeholder( dtypes.float32, shape=(None, timestep, input_shape)) predict = array_ops.placeholder( dtypes.float32, shape=(None, output_shape)) outputs, state = rnn.dynamic_rnn( cell, inputs, dtype=dtypes.float32) self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape]) self.assertEqual(state.shape.as_list(), [None, output_shape]) loss = losses.softmax_cross_entropy(predict, state) train_op = training.GradientDescentOptimizer(0.001).minimize(loss) sess.run([variables.global_variables_initializer()]) _, outputs, state = sess.run( [train_op, outputs, state], {inputs: x_train, predict: y_train}) self.assertEqual(len(outputs), batch) self.assertEqual(len(state), batch) def testNTMCell(self): expected_output = np.array( [[-0.04973561, -0.00020032, -0.09586009, -0.05049511], [-0.02199885, 0.02302885, -0.05558189, -0.02051288], [-0.01399924, 0.02543444, -0.06975862, -0.03782758], [-0.02238393, 0.0135776, -0.09102941, -0.05594013]], dtype=np.float32) expected_read_vector_list = np.array( [[1e-6, 1e-6, 1e-6, 1e-6], [1e-6, 1e-6, 1e-6, 1e-6], [1e-6, 1e-6, 1e-6, 1e-6], [1e-6, 1e-6, 1e-6, 1e-6]], dtype=np.float32) expected_w_list = np.array( [[[0.15837428, 0.21354634, 0.22115856, 0.21117255, 0.19574821], [0.15826838, 0.2150458, 0.2228198, 0.20747298, 0.19639312], [0.15750293, 0.21550071, 0.22280747, 0.20737495, 0.19681393], [0.15763053, 0.21473582, 0.22187267, 0.20920397, 0.19655706]], [[0.21703579, 0.19425659, 0.22143759, 0.18024713, 0.18702294], [0.2164267, 0.19451937, 0.22112325, 0.18051708, 0.18741359], [0.21567065, 0.1947548, 0.22107735, 0.18058982, 0.18790732], [0.2163743, 0.194361, 0.22131558, 0.18042919, 0.1875199]]], dtype=np.float32) expected_M_0 = np.array( [[-0.00553495, -0.01089884, 0.00683121, -0.00273276], [-0.00495392, -0.00975483, 0.00611433, -0.00244583], [-0.00564722, -0.0111199, 0.00696973, -0.0027882], [-0.00459658, -0.00905126, 0.00567345, -0.00226937], [-0.00476941, -0.00939155, 0.00588669, -0.00235472]], dtype=np.float32) with session.Session() as sess: with variable_scope.variable_scope("root"): seed = 1234 random_seed.set_random_seed(seed) batch_size = 4 inputs = random_ops.random_uniform((batch_size, 4), 0.0, 1.0, seed=seed + 1) cell = contrib_rnn_cell.NTMCell( controller=rnn_cell_impl.LSTMCell(num_units=4), memory_size=5, memory_vector_dim=4, read_head_num=1, write_head_num=1) output, state = cell(inputs, cell.zero_state(batch_size, dtypes.float32)) sess.run([variables.global_variables_initializer()]) res, read_vector_list, w_list, M = sess.run( [output, state.read_vector_list, state.w_list, state.M]) # Smoke test self.assertAllClose(res, expected_output) self.assertAllClose(read_vector_list[0], expected_read_vector_list) self.assertAllClose(w_list, expected_w_list) self.assertAllClose(M[0], expected_M_0) class LayerNormBasicLSTMCellTest(test.TestCase): # NOTE: all the values in the current test case have been calculated. def testBasicLSTMCell(self): with self.cached_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 2]) c0 = array_ops.zeros([1, 2]) h0 = array_ops.zeros([1, 2]) state0 = rnn_cell.LSTMStateTuple(c0, h0) c1 = array_ops.zeros([1, 2]) h1 = array_ops.zeros([1, 2]) state1 = rnn_cell.LSTMStateTuple(c1, h1) state = (state0, state1) single_cell = lambda: contrib_rnn_cell.LayerNormBasicLSTMCell(2) cell = rnn_cell.MultiRNNCell([single_cell() for _ in range(2)]) g, out_m = cell(x, state) sess.run([variables.global_variables_initializer()]) res = sess.run( [g, out_m], { x.name: np.array([[1., 1.]]), c0.name: 0.1 * np.asarray([[0, 1]]), h0.name: 0.1 * np.asarray([[2, 3]]), c1.name: 0.1 * np.asarray([[4, 5]]), h1.name: 0.1 * np.asarray([[6, 7]]), }) expected_h = np.array([[-0.38079708, 0.38079708]]) expected_state0_c = np.array([[-1.0, 1.0]]) expected_state0_h = np.array([[-0.38079708, 0.38079708]]) expected_state1_c = np.array([[-1.0, 1.0]]) expected_state1_h = np.array([[-0.38079708, 0.38079708]]) actual_h = res[0] actual_state0_c = res[1][0].c actual_state0_h = res[1][0].h actual_state1_c = res[1][1].c actual_state1_h = res[1][1].h self.assertAllClose(actual_h, expected_h, 1e-5) self.assertAllClose(expected_state0_c, actual_state0_c, 1e-5) self.assertAllClose(expected_state0_h, actual_state0_h, 1e-5) self.assertAllClose(expected_state1_c, actual_state1_c, 1e-5) self.assertAllClose(expected_state1_h, actual_state1_h, 1e-5) with variable_scope.variable_scope( "other", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros( [1, 3]) # Test BasicLSTMCell with input_size != num_units. c = array_ops.zeros([1, 2]) h = array_ops.zeros([1, 2]) state = rnn_cell.LSTMStateTuple(c, h) cell = contrib_rnn_cell.LayerNormBasicLSTMCell(2) g, out_m = cell(x, state) sess.run([variables.global_variables_initializer()]) res = sess.run( [g, out_m], { x.name: np.array([[1., 1., 1.]]), c.name: 0.1 * np.asarray([[0, 1]]), h.name: 0.1 * np.asarray([[2, 3]]), }) expected_h = np.array([[-0.38079708, 0.38079708]]) expected_c = np.array([[-1.0, 1.0]]) self.assertEqual(len(res), 2) self.assertAllClose(res[0], expected_h, 1e-5) self.assertAllClose(res[1].c, expected_c, 1e-5) self.assertAllClose(res[1].h, expected_h, 1e-5) def testBasicLSTMCellWithoutNorm(self): """Tests that BasicLSTMCell with layer_norm=False.""" with self.cached_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 2]) c0 = array_ops.zeros([1, 2]) h0 = array_ops.zeros([1, 2]) state0 = rnn_cell.LSTMStateTuple(c0, h0) c1 = array_ops.zeros([1, 2]) h1 = array_ops.zeros([1, 2]) state1 = rnn_cell.LSTMStateTuple(c1, h1) state = (state0, state1) single_cell = lambda: contrib_rnn_cell.LayerNormBasicLSTMCell(2, layer_norm=False) # pylint: disable=line-too-long cell = rnn_cell.MultiRNNCell([single_cell() for _ in range(2)]) g, out_m = cell(x, state) sess.run([variables.global_variables_initializer()]) res = sess.run( [g, out_m], { x.name: np.array([[1., 1.]]), c0.name: 0.1 * np.asarray([[0, 1]]), h0.name: 0.1 * np.asarray([[2, 3]]), c1.name: 0.1 * np.asarray([[4, 5]]), h1.name: 0.1 * np.asarray([[6, 7]]), }) expected_h = np.array([[0.70230919, 0.72581059]]) expected_state0_c = np.array([[0.8020075, 0.89599884]]) expected_state0_h = np.array([[0.56668288, 0.60858738]]) expected_state1_c = np.array([[1.17500675, 1.26892781]]) expected_state1_h = np.array([[0.70230919, 0.72581059]]) actual_h = res[0] actual_state0_c = res[1][0].c actual_state0_h = res[1][0].h actual_state1_c = res[1][1].c actual_state1_h = res[1][1].h self.assertAllClose(actual_h, expected_h, 1e-5) self.assertAllClose(expected_state0_c, actual_state0_c, 1e-5) self.assertAllClose(expected_state0_h, actual_state0_h, 1e-5) self.assertAllClose(expected_state1_c, actual_state1_c, 1e-5) self.assertAllClose(expected_state1_h, actual_state1_h, 1e-5) with variable_scope.variable_scope( "other", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros( [1, 3]) # Test BasicLSTMCell with input_size != num_units. c = array_ops.zeros([1, 2]) h = array_ops.zeros([1, 2]) state = rnn_cell.LSTMStateTuple(c, h) cell = contrib_rnn_cell.LayerNormBasicLSTMCell(2, layer_norm=False) g, out_m = cell(x, state) sess.run([variables.global_variables_initializer()]) res = sess.run( [g, out_m], { x.name: np.array([[1., 1., 1.]]), c.name: 0.1 * np.asarray([[0, 1]]), h.name: 0.1 * np.asarray([[2, 3]]), }) expected_h = np.array([[0.64121795, 0.68166804]]) expected_c = np.array([[0.88477188, 0.98103917]]) self.assertEqual(len(res), 2) self.assertAllClose(res[0], expected_h, 1e-5) self.assertAllClose(res[1].c, expected_c, 1e-5) self.assertAllClose(res[1].h, expected_h, 1e-5) def testBasicLSTMCellWithStateTuple(self): with self.cached_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 2]) c0 = array_ops.zeros([1, 2]) h0 = array_ops.zeros([1, 2]) state0 = rnn_cell.LSTMStateTuple(c0, h0) c1 = array_ops.zeros([1, 2]) h1 = array_ops.zeros([1, 2]) state1 = rnn_cell.LSTMStateTuple(c1, h1) cell = rnn_cell.MultiRNNCell( [contrib_rnn_cell.LayerNormBasicLSTMCell(2) for _ in range(2)]) h, (s0, s1) = cell(x, (state0, state1)) sess.run([variables.global_variables_initializer()]) res = sess.run( [h, s0, s1], { x.name: np.array([[1., 1.]]), c0.name: 0.1 * np.asarray([[0, 1]]), h0.name: 0.1 * np.asarray([[2, 3]]), c1.name: 0.1 * np.asarray([[4, 5]]), h1.name: 0.1 * np.asarray([[6, 7]]), }) expected_h = np.array([[-0.38079708, 0.38079708]]) expected_h0 = np.array([[-0.38079708, 0.38079708]]) expected_c0 = np.array([[-1.0, 1.0]]) expected_h1 = np.array([[-0.38079708, 0.38079708]]) expected_c1 = np.array([[-1.0, 1.0]]) self.assertEqual(len(res), 3) self.assertAllClose(res[0], expected_h, 1e-5) self.assertAllClose(res[1].c, expected_c0, 1e-5) self.assertAllClose(res[1].h, expected_h0, 1e-5) self.assertAllClose(res[2].c, expected_c1, 1e-5) self.assertAllClose(res[2].h, expected_h1, 1e-5) def testBasicLSTMCellWithStateTupleLayerNorm(self): """The results of LSTMCell and LayerNormBasicLSTMCell should be the same.""" with self.cached_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 2]) c0 = array_ops.zeros([1, 2]) h0 = array_ops.zeros([1, 2]) state0 = rnn_cell_impl.LSTMStateTuple(c0, h0) c1 = array_ops.zeros([1, 2]) h1 = array_ops.zeros([1, 2]) state1 = rnn_cell_impl.LSTMStateTuple(c1, h1) cell = rnn_cell_impl.MultiRNNCell([ contrib_rnn_cell.LayerNormLSTMCell( 2, layer_norm=True, norm_gain=1.0, norm_shift=0.0) for _ in range(2) ]) h, (s0, s1) = cell(x, (state0, state1)) sess.run([variables.global_variables_initializer()]) res = sess.run( [h, s0, s1], { x.name: np.array([[1., 1.]]), c0.name: 0.1 * np.asarray([[0, 1]]), h0.name: 0.1 * np.asarray([[2, 3]]), c1.name: 0.1 * np.asarray([[4, 5]]), h1.name: 0.1 * np.asarray([[6, 7]]), }) expected_h = np.array([[-0.38079708, 0.38079708]]) expected_h0 = np.array([[-0.38079708, 0.38079708]]) expected_c0 = np.array([[-1.0, 1.0]]) expected_h1 = np.array([[-0.38079708, 0.38079708]]) expected_c1 = np.array([[-1.0, 1.0]]) self.assertEqual(len(res), 3) self.assertAllClose(res[0], expected_h, 1e-5) self.assertAllClose(res[1].c, expected_c0, 1e-5) self.assertAllClose(res[1].h, expected_h0, 1e-5) self.assertAllClose(res[2].c, expected_c1, 1e-5) self.assertAllClose(res[2].h, expected_h1, 1e-5) def testBasicLSTMCellWithDropout(self): def _is_close(x, y, digits=4): delta = x - y return delta < 10**(-digits) def _is_close_in(x, items, digits=4): for i in items: if _is_close(x, i, digits): return True return False keep_prob = 0.5 c_high = 2.9998924946 c_low = 0.999983298578 h_low = 0.761552567265 h_high = 0.995008519604 num_units = 5 allowed_low = [1, 2, 3] with self.cached_session() as sess: with variable_scope.variable_scope( "other", initializer=init_ops.constant_initializer(1)): x = array_ops.zeros([1, 5]) c = array_ops.zeros([1, 5]) h = array_ops.zeros([1, 5]) state = rnn_cell.LSTMStateTuple(c, h) cell = contrib_rnn_cell.LayerNormBasicLSTMCell( num_units, layer_norm=False, dropout_keep_prob=keep_prob) g, s = cell(x, state) sess.run([variables.global_variables_initializer()]) res = sess.run( [g, s], { x.name: np.ones([1, 5]), c.name: np.ones([1, 5]), h.name: np.ones([1, 5]), }) # Since the returned tensors are of size [1,n] # get the first component right now. actual_h = res[0][0] actual_state_c = res[1].c[0] actual_state_h = res[1].h[0] # For each item in `c` (the cell inner state) check that # it is equal to one of the allowed values `c_high` (not # dropped out) or `c_low` (dropped out) and verify that the # corresponding item in `h` (the cell activation) is coherent. # Count the dropped activations and check that their number is # coherent with the dropout probability. dropped_count = 0 self.assertTrue((actual_h == actual_state_h).all()) for citem, hitem in zip(actual_state_c, actual_state_h): self.assertTrue(_is_close_in(citem, [c_low, c_high])) if _is_close(citem, c_low): self.assertTrue(_is_close(hitem, h_low)) dropped_count += 1 elif _is_close(citem, c_high): self.assertTrue(_is_close(hitem, h_high)) self.assertIn(dropped_count, allowed_low) def _create_multi_lstm_cell_ops(batch_size, num_units, input_depth, num_layers, max_time, compiled): with variable_scope.variable_scope( "root", initializer=init_ops.random_uniform_initializer(-0.1, 0.1, seed=2)): inputs = variable_scope.get_variable( "inputs", initializer=random_ops.random_uniform( (max_time, batch_size, input_depth), seed=1)) maybe_xla = lambda c: contrib_rnn_cell.CompiledWrapper(c) if compiled else c cell = rnn_cell.MultiRNNCell( [maybe_xla(rnn_cell.LSTMCell(num_units)) for _ in range(num_layers)]) initial_state = cell.zero_state(batch_size=batch_size, dtype=dtypes.float32) outputs, final_state = rnn.dynamic_rnn( cell=cell, inputs=inputs, initial_state=initial_state, time_major=True) flat_final_state = nest.flatten(final_state) trainable_variables = variables.trainable_variables() outputs_grad = gradients_impl.gradients( [outputs], trainable_variables + [inputs] + nest.flatten(initial_state)) final_state_grad = gradients_impl.gradients( flat_final_state, trainable_variables + [inputs] + nest.flatten(initial_state)) return { "outputs": outputs, "final_state": flat_final_state, "outputs_grad": outputs_grad, "final_state_grad": final_state_grad } class CompiledWrapperTest(test.TestCase): def testMultiRNNCellWithLSTMCellAndXLA(self): # TODO(b/34735319): Don't run this test if XLA is not available. batch_size = 16 num_units = 32 input_depth = 12 num_layers = 2 max_time = 20 atol = 1e-5 random_seed.set_random_seed(1234) with self.session(graph=ops.Graph()) as sess: xla_ops = _create_multi_lstm_cell_ops( batch_size=batch_size, num_units=num_units, input_depth=input_depth, num_layers=num_layers, max_time=max_time, compiled=True) sess.run([variables.global_variables_initializer()]) xla_results = sess.run(xla_ops) random_seed.set_random_seed(1234) with self.session(graph=ops.Graph()) as sess: non_xla_ops = _create_multi_lstm_cell_ops( batch_size=batch_size, num_units=num_units, input_depth=input_depth, num_layers=num_layers, max_time=max_time, compiled=False) sess.run([variables.global_variables_initializer()]) non_xla_results = sess.run(non_xla_ops) self.assertAllClose( non_xla_results["outputs"], xla_results["outputs"], atol=atol) for xla_value, non_xla_value in zip(xla_results["final_state"], non_xla_results["final_state"]): self.assertAllClose(xla_value, non_xla_value, atol=atol) for xla_g, non_xla_g in zip(xla_results["outputs_grad"], non_xla_results["outputs_grad"]): self.assertAllClose(xla_g, non_xla_g, atol=atol) for xla_g, non_xla_g in zip(xla_results["final_state_grad"], non_xla_results["final_state_grad"]): self.assertAllClose(xla_g, non_xla_g, atol=atol) def testMultiRNNCellWithStateTuple(self): with self.cached_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 2]) m_bad = array_ops.zeros([1, 4]) m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2])) # Test incorrectness of state with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"): rnn_cell.MultiRNNCell( [rnn_cell.GRUCell(2) for _ in range(2)], state_is_tuple=True)(x, m_bad) _, ml = rnn_cell.MultiRNNCell( [rnn_cell.GRUCell(2) for _ in range(2)], state_is_tuple=True)(x, m_good) sess.run([variables.global_variables_initializer()]) res = sess.run( ml, { x.name: np.array([[1., 1.]]), m_good[0].name: np.array([[0.1, 0.1]]), m_good[1].name: np.array([[0.1, 0.1]]) }) # The numbers in results were not calculated, this is just a # smoke test. However, these numbers should match those of # the test testMultiRNNCell. self.assertAllClose(res[0], [[0.175991, 0.175991]]) self.assertAllClose(res[1], [[0.13248, 0.13248]]) class BenchmarkLSTMCellXLA(test.Benchmark): def benchmarkDynamicRNNWithMultiLSTMCell(self): num_layers = 3 max_time = 50 print("benchmarkDynamicRNNWithMultiLSTMCell") print("\t" + "\t".join([ "inter_th", "intra_th", "batch_size", "num_units", "input_depth", "device", "compiled", "wall_time" ])) warmup_run = True for (threads, device, num_units, batch_size, input_depth, compiled) in itertools.product([{ "inter": 0, "intra": 0 }, { "inter": 1, "intra": 4 }], ["cpu", "gpu"], [32, 512], [1, 32, 256], [32, 512], [False, True]): if threads["inter"] != 0: # We only care about testing inter/intra op limitations on # CPU with small batch size, to mimic embedded devices. if device != "cpu" or batch_size != 1: continue if device == "cpu" and batch_size > 32: continue random_seed.set_random_seed(1234) config = config_pb2.ConfigProto( inter_op_parallelism_threads=threads["inter"], intra_op_parallelism_threads=threads["intra"], allow_soft_placement=False) with session.Session(config=config, graph=ops.Graph()) as sess: with ops.device("/%s:0" % device): ops_dict = _create_multi_lstm_cell_ops( batch_size=batch_size, num_units=num_units, input_depth=input_depth, num_layers=num_layers, max_time=max_time, compiled=compiled) sess.run([variables.global_variables_initializer()]) all_ops = nest.flatten(ops_dict.values()) all_ops_group = control_flow_ops.group(*all_ops) name_suffix = ("inter_th_%d_intra_th_%d_bs_%d_units_%d_inputdepth_%d" "_device_%s_xla_%s" % (threads["inter"], threads["intra"], batch_size, num_units, input_depth, device, compiled)) if warmup_run: self.run_op_benchmark( sess, all_ops_group, min_iters=30, name="ignore_warmup") warmup_run = False benchmark_results = self.run_op_benchmark( sess, all_ops_group, min_iters=50, name="benchmarkDynamicRNNWithMultiLSTMCell_%s" % name_suffix) print("\t" + "\t".join([ "%s" % x for x in [ threads["inter"], threads["intra"], batch_size, num_units, input_depth, device, compiled, benchmark_results["wall_time"] ] ])) class WeightNormLSTMCellTest(test.TestCase): """Compared cell output with pre-calculated values.""" def _cell_output(self, cell): """Calculates cell output.""" with self.cached_session() as sess: init = init_ops.constant_initializer(0.5) with variable_scope.variable_scope("root", initializer=init): x = array_ops.zeros([1, 2]) c0 = array_ops.zeros([1, 2]) h0 = array_ops.zeros([1, 2]) state0 = rnn_cell.LSTMStateTuple(c0, h0) xout, sout = cell()(x, state0) sess.run([variables.global_variables_initializer()]) res = sess.run([xout, sout], { x.name: np.array([[1., 1.]]), c0.name: 0.1 * np.asarray([[0, 1]]), h0.name: 0.1 * np.asarray([[2, 3]]), }) actual_state_c = res[1].c actual_state_h = res[1].h return actual_state_c, actual_state_h def testBasicCell(self): """Tests cell w/o peepholes and w/o normalisation.""" def cell(): return contrib_rnn_cell.WeightNormLSTMCell(2, norm=False, use_peepholes=False) actual_c, actual_h = self._cell_output(cell) expected_c = np.array([[0.65937078, 0.74983585]]) expected_h = np.array([[0.44923624, 0.49362513]]) self.assertAllClose(expected_c, actual_c, 1e-5) self.assertAllClose(expected_h, actual_h, 1e-5) def testNonbasicCell(self): """Tests cell with peepholes and w/o normalisation.""" def cell(): return contrib_rnn_cell.WeightNormLSTMCell(2, norm=False, use_peepholes=True) actual_c, actual_h = self._cell_output(cell) expected_c = np.array([[0.65937084, 0.7574988]]) expected_h = np.array([[0.4792085, 0.53470564]]) self.assertAllClose(expected_c, actual_c, 1e-5) self.assertAllClose(expected_h, actual_h, 1e-5) def testBasicCellWithNorm(self): """Tests cell w/o peepholes and with normalisation.""" def cell(): return contrib_rnn_cell.WeightNormLSTMCell(2, norm=True, use_peepholes=False) actual_c, actual_h = self._cell_output(cell) expected_c = np.array([[0.50125383, 0.58805949]]) expected_h = np.array([[0.32770363, 0.37397948]]) self.assertAllClose(expected_c, actual_c, 1e-5) self.assertAllClose(expected_h, actual_h, 1e-5) def testNonBasicCellWithNorm(self): """Tests cell with peepholes and with normalisation.""" def cell(): return contrib_rnn_cell.WeightNormLSTMCell(2, norm=True, use_peepholes=True) actual_c, actual_h = self._cell_output(cell) expected_c = np.array([[0.50125383, 0.59587258]]) expected_h = np.array([[0.35041603, 0.40873795]]) self.assertAllClose(expected_c, actual_c, 1e-5) self.assertAllClose(expected_h, actual_h, 1e-5) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/python/kernel_tests/rnn_cell_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.contrib.rnn.python.ops.fused_rnn_cell.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.rnn.python.ops import fused_rnn_cell from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import init_ops from tensorflow.python.ops import rnn from tensorflow.python.ops import rnn_cell from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test class FusedRnnCellTest(test.TestCase): def testBasicRNNFusedWrapper(self): """This test checks that using a wrapper for BasicRNN works as expected.""" with self.cached_session() as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=19890212) cell = rnn_cell.BasicRNNCell(10) batch_size = 5 input_size = 20 timelen = 15 inputs = constant_op.constant( np.random.randn(timelen, batch_size, input_size)) with variable_scope.variable_scope("basic", initializer=initializer): unpacked_inputs = array_ops.unstack(inputs) outputs, state = rnn.static_rnn( cell, unpacked_inputs, dtype=dtypes.float64) packed_outputs = array_ops.stack(outputs) basic_vars = [ v for v in variables.trainable_variables() if v.name.startswith("basic/") ] sess.run([variables.global_variables_initializer()]) basic_outputs, basic_state = sess.run([packed_outputs, state]) basic_grads = sess.run(gradients_impl.gradients(packed_outputs, inputs)) basic_wgrads = sess.run( gradients_impl.gradients(packed_outputs, basic_vars)) with variable_scope.variable_scope( "fused_static", initializer=initializer): fused_cell = fused_rnn_cell.FusedRNNCellAdaptor( rnn_cell.BasicRNNCell(10)) outputs, state = fused_cell(inputs, dtype=dtypes.float64) fused_static_vars = [ v for v in variables.trainable_variables() if v.name.startswith("fused_static/") ] sess.run([variables.global_variables_initializer()]) fused_static_outputs, fused_static_state = sess.run([outputs, state]) fused_static_grads = sess.run(gradients_impl.gradients(outputs, inputs)) fused_static_wgrads = sess.run( gradients_impl.gradients(outputs, fused_static_vars)) self.assertAllClose(basic_outputs, fused_static_outputs) self.assertAllClose(basic_state, fused_static_state) self.assertAllClose(basic_grads, fused_static_grads) for basic, fused in zip(basic_wgrads, fused_static_wgrads): self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2) with variable_scope.variable_scope( "fused_dynamic", initializer=initializer): fused_cell = fused_rnn_cell.FusedRNNCellAdaptor( rnn_cell.BasicRNNCell(10), use_dynamic_rnn=True) outputs, state = fused_cell(inputs, dtype=dtypes.float64) fused_dynamic_vars = [ v for v in variables.trainable_variables() if v.name.startswith("fused_dynamic/") ] sess.run([variables.global_variables_initializer()]) fused_dynamic_outputs, fused_dynamic_state = sess.run([outputs, state]) fused_dynamic_grads = sess.run( gradients_impl.gradients(outputs, inputs)) fused_dynamic_wgrads = sess.run( gradients_impl.gradients(outputs, fused_dynamic_vars)) self.assertAllClose(basic_outputs, fused_dynamic_outputs) self.assertAllClose(basic_state, fused_dynamic_state) self.assertAllClose(basic_grads, fused_dynamic_grads) for basic, fused in zip(basic_wgrads, fused_dynamic_wgrads): self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2) def testTimeReversedFusedRNN(self): with self.cached_session() as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=19890213) fw_cell = rnn_cell.BasicRNNCell(10) bw_cell = rnn_cell.BasicRNNCell(10) batch_size = 5 input_size = 20 timelen = 15 inputs = constant_op.constant( np.random.randn(timelen, batch_size, input_size)) # test bi-directional rnn with variable_scope.variable_scope("basic", initializer=initializer): unpacked_inputs = array_ops.unstack(inputs) outputs, fw_state, bw_state = rnn.static_bidirectional_rnn( fw_cell, bw_cell, unpacked_inputs, dtype=dtypes.float64) packed_outputs = array_ops.stack(outputs) basic_vars = [ v for v in variables.trainable_variables() if v.name.startswith("basic/") ] sess.run([variables.global_variables_initializer()]) basic_outputs, basic_fw_state, basic_bw_state = sess.run( [packed_outputs, fw_state, bw_state]) basic_grads = sess.run(gradients_impl.gradients(packed_outputs, inputs)) basic_wgrads = sess.run( gradients_impl.gradients(packed_outputs, basic_vars)) with variable_scope.variable_scope("fused", initializer=initializer): fused_cell = fused_rnn_cell.FusedRNNCellAdaptor( rnn_cell.BasicRNNCell(10)) fused_bw_cell = fused_rnn_cell.TimeReversedFusedRNN( fused_rnn_cell.FusedRNNCellAdaptor(rnn_cell.BasicRNNCell(10))) fw_outputs, fw_state = fused_cell( inputs, dtype=dtypes.float64, scope="fw") bw_outputs, bw_state = fused_bw_cell( inputs, dtype=dtypes.float64, scope="bw") outputs = array_ops.concat([fw_outputs, bw_outputs], 2) fused_vars = [ v for v in variables.trainable_variables() if v.name.startswith("fused/") ] sess.run([variables.global_variables_initializer()]) fused_outputs, fused_fw_state, fused_bw_state = sess.run( [outputs, fw_state, bw_state]) fused_grads = sess.run(gradients_impl.gradients(outputs, inputs)) fused_wgrads = sess.run(gradients_impl.gradients(outputs, fused_vars)) self.assertAllClose(basic_outputs, fused_outputs) self.assertAllClose(basic_fw_state, fused_fw_state) self.assertAllClose(basic_bw_state, fused_bw_state) self.assertAllClose(basic_grads, fused_grads) for basic, fused in zip(basic_wgrads, fused_wgrads): self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/python/kernel_tests/fused_rnn_cell_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Library for benchmarking OpKernels.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import time from tensorflow.python.framework import ops def device(use_gpu=False): """TensorFlow device to assign ops to.""" if use_gpu: return ops.device("/gpu:0") return ops.device("/cpu:0") def seconds_per_run(op, sess, num_runs=50): """Number of seconds taken to execute 'op' once on average.""" for _ in range(2): sess.run(op) start_time = time.time() for _ in range(num_runs): sess.run(op) end_time = time.time() time_taken = (end_time - start_time) / num_runs return time_taken def dict_product(dicts): """Constructs iterator over outer product of entries in a dict-of-lists. Example: >>> dict_products({"a": [1,2], "b": [3, 4]}) >>> [{"a": 1, "b": 3}, {"a": 1, "b": 4}, {"a": 2, "b": 3}, {"a": 2, "b": 4}] Args: dicts: dictionary with string keys and list values. Yields: Individual dicts from outer product. """ keys, values = zip(*dicts.items()) for config_values in itertools.product(*values): yield dict(zip(keys, config_values))
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/python/kernel_tests/benchmarking.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python wrapper for the Block GRU Op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.keras.engine import input_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_rnn_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import rnn_cell_impl from tensorflow.python.util.deprecation import deprecated_args LayerRNNCell = rnn_cell_impl.LayerRNNCell # pylint: disable=invalid-name @ops.RegisterGradient("GRUBlockCell") def _GRUBlockCellGrad(op, *grad): r"""Gradient for GRUBlockCell. Args: op: Op for which the gradient is defined. *grad: Gradients of the optimization function wrt output for the Op. Returns: d_x: Gradients wrt to x d_h: Gradients wrt to h d_w_ru: Gradients wrt to w_ru d_w_c: Gradients wrt to w_c d_b_ru: Gradients wrt to b_ru d_b_c: Gradients wrt to b_c Mathematics behind the Gradients below: ``` d_c_bar = d_h \circ (1-u) \circ (1-c \circ c) d_u_bar = d_h \circ (h-c) \circ u \circ (1-u) d_r_bar_u_bar = [d_r_bar d_u_bar] [d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T [d_x_component_2 d_h_prevr] = d_c_bar * w_c^T d_x = d_x_component_1 + d_x_component_2 d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + u ``` Below calculation is performed in the python wrapper for the Gradients (not in the gradient kernel.) ``` d_w_ru = x_h_prevr^T * d_c_bar d_w_c = x_h_prev^T * d_r_bar_u_bar d_b_ru = sum of d_r_bar_u_bar along axis = 0 d_b_c = sum of d_c_bar along axis = 0 ``` """ x, h_prev, w_ru, w_c, b_ru, b_c = op.inputs r, u, c, _ = op.outputs _, _, _, d_h = grad d_x, d_h_prev, d_c_bar, d_r_bar_u_bar = gen_rnn_ops.gru_block_cell_grad( x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h) x_h_prev = array_ops.concat([x, h_prev], 1) d_w_ru = math_ops.matmul(x_h_prev, d_r_bar_u_bar, transpose_a=True) d_b_ru = nn_ops.bias_add_grad(d_r_bar_u_bar) x_h_prevr = array_ops.concat([x, h_prev * r], 1) d_w_c = math_ops.matmul(x_h_prevr, d_c_bar, transpose_a=True) d_b_c = nn_ops.bias_add_grad(d_c_bar) return d_x, d_h_prev, d_w_ru, d_w_c, d_b_ru, d_b_c class GRUBlockCell(LayerRNNCell): r"""Block GRU cell implementation. Deprecated: use GRUBlockCellV2 instead. The implementation is based on: http://arxiv.org/abs/1406.1078 Computes the GRU cell forward propagation for 1 time step. This kernel op implements the following mathematical equations: Biases are initialized with: * `b_ru` - constant_initializer(1.0) * `b_c` - constant_initializer(0.0) ``` x_h_prev = [x, h_prev] [r_bar u_bar] = x_h_prev * w_ru + b_ru r = sigmoid(r_bar) u = sigmoid(u_bar) h_prevr = h_prev \circ r x_h_prevr = [x h_prevr] c_bar = x_h_prevr * w_c + b_c c = tanh(c_bar) h = (1-u) \circ c + u \circ h_prev ``` """ @deprecated_args(None, "cell_size is deprecated, use num_units instead", "cell_size") def __init__(self, num_units=None, cell_size=None, reuse=None, name="gru_cell"): """Initialize the Block GRU cell. Args: num_units: int, The number of units in the GRU cell. cell_size: int, The old (deprecated) name for `num_units`. reuse: (optional) boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. By default this is "lstm_cell", for variable-name compatibility with `tf.compat.v1.nn.rnn_cell.GRUCell`. Raises: ValueError: if both cell_size and num_units are not None; or both are None. """ super(GRUBlockCell, self).__init__(_reuse=reuse, name=name) if (cell_size is None) == (num_units is None): raise ValueError( "Exactly one of num_units or cell_size must be provided.") if num_units is None: num_units = cell_size self._cell_size = num_units # Inputs must be 2-dimensional. self.input_spec = input_spec.InputSpec(ndim=2) @property def state_size(self): return self._cell_size @property def output_size(self): return self._cell_size def build(self, input_shape): # Check if the input size exist. input_size = tensor_shape.dimension_value(input_shape[1]) if input_size is None: raise ValueError("Expecting input_size to be set.") self._gate_kernel = self.add_variable( "w_ru", [input_size + self._cell_size, self._cell_size * 2]) self._gate_bias = self.add_variable( "b_ru", [self._cell_size * 2], initializer=init_ops.constant_initializer(1.0)) self._candidate_kernel = self.add_variable( "w_c", [input_size + self._cell_size, self._cell_size]) self._candidate_bias = self.add_variable( "b_c", [self._cell_size], initializer=init_ops.constant_initializer(0.0)) self.built = True def call(self, inputs, h_prev): """GRU cell.""" # Check cell_size == state_size from h_prev. cell_size = h_prev.get_shape().with_rank(2)[1] if cell_size != self._cell_size: raise ValueError("Shape of h_prev[1] incorrect: cell_size %i vs %s" % (self._cell_size, cell_size)) _gru_block_cell = gen_rnn_ops.gru_block_cell # pylint: disable=invalid-name _, _, _, new_h = _gru_block_cell( x=inputs, h_prev=h_prev, w_ru=self._gate_kernel, w_c=self._candidate_kernel, b_ru=self._gate_bias, b_c=self._candidate_bias) return new_h, new_h class GRUBlockCellV2(GRUBlockCell): """Temporary GRUBlockCell impl with a different variable naming scheme. Only differs from GRUBlockCell by variable names. """ def build(self, input_shape): """GRU cell.""" input_size = tensor_shape.dimension_value(input_shape[1]) if input_size is None: raise ValueError("Expecting input_size to be set.") self._gate_kernel = self.add_variable( "gates/kernel", [input_size + self._cell_size, self._cell_size * 2]) self._gate_bias = self.add_variable( "gates/bias", [self._cell_size * 2], initializer=init_ops.constant_initializer(1.0)) self._candidate_kernel = self.add_variable( "candidate/kernel", [input_size + self._cell_size, self._cell_size]) self._candidate_bias = self.add_variable( "candidate/bias", [self._cell_size], initializer=init_ops.constant_initializer(0.0)) self.built = True
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/python/ops/gru_ops.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Module implementing RNN Cells that used to be in core. @@EmbeddingWrapper @@InputProjectionWrapper @@OutputProjectionWrapper """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import rnn_cell_impl from tensorflow.python.ops import variable_scope as vs from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest # pylint: disable=protected-access,invalid-name RNNCell = rnn_cell_impl.RNNCell _WEIGHTS_VARIABLE_NAME = rnn_cell_impl._WEIGHTS_VARIABLE_NAME _BIAS_VARIABLE_NAME = rnn_cell_impl._BIAS_VARIABLE_NAME # pylint: enable=protected-access,invalid-name class _Linear(object): """Linear map: sum_i(args[i] * W[i]), where W[i] is a variable. Args: args: a 2D Tensor or a list of 2D, batch, n, Tensors. output_size: int, second dimension of weight variable. dtype: data type for variables. build_bias: boolean, whether to build a bias variable. bias_initializer: starting value to initialize the bias (default is all zeros). kernel_initializer: starting value to initialize the weight. Raises: ValueError: if inputs_shape is wrong. """ def __init__(self, args, output_size, build_bias, bias_initializer=None, kernel_initializer=None): self._build_bias = build_bias if args is None or (nest.is_sequence(args) and not args): raise ValueError("`args` must be specified") if not nest.is_sequence(args): args = [args] self._is_sequence = False else: self._is_sequence = True # Calculate the total size of arguments on dimension 1. total_arg_size = 0 shapes = [a.get_shape() for a in args] for shape in shapes: if shape.ndims != 2: raise ValueError("linear is expecting 2D arguments: %s" % shapes) if shape.dims[1].value is None: raise ValueError("linear expects shape[1] to be provided for shape %s, " "but saw %s" % (shape, shape[1])) else: total_arg_size += shape.dims[1].value dtype = [a.dtype for a in args][0] scope = vs.get_variable_scope() with vs.variable_scope(scope) as outer_scope: self._weights = vs.get_variable( _WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype, initializer=kernel_initializer) if build_bias: with vs.variable_scope(outer_scope) as inner_scope: inner_scope.set_partitioner(None) if bias_initializer is None: bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype) self._biases = vs.get_variable( _BIAS_VARIABLE_NAME, [output_size], dtype=dtype, initializer=bias_initializer) def __call__(self, args): if not self._is_sequence: args = [args] if len(args) == 1: res = math_ops.matmul(args[0], self._weights) else: # Explicitly creating a one for a minor performance improvement. one = constant_op.constant(1, dtype=dtypes.int32) res = math_ops.matmul(array_ops.concat(args, one), self._weights) if self._build_bias: res = nn_ops.bias_add(res, self._biases) return res # TODO(xpan): Remove this function in a follow up. def _linear(args, output_size, bias, bias_initializer=None, kernel_initializer=None): """Linear map: sum_i(args[i] * W[i]), where W[i] is a variable. Args: args: a 2D Tensor or a list of 2D, batch, n, Tensors. output_size: int, second dimension of W[i]. bias: boolean, whether to add a bias term or not. bias_initializer: starting value to initialize the bias (default is all zeros). kernel_initializer: starting value to initialize the weight. Returns: A 2D Tensor with shape `[batch, output_size]` equal to sum_i(args[i] * W[i]), where W[i]s are newly created matrices. Raises: ValueError: if some of the arguments has unspecified or wrong shape. """ if args is None or (nest.is_sequence(args) and not args): raise ValueError("`args` must be specified") if not nest.is_sequence(args): args = [args] # Calculate the total size of arguments on dimension 1. total_arg_size = 0 shapes = [a.get_shape() for a in args] for shape in shapes: if shape.ndims != 2: raise ValueError("linear is expecting 2D arguments: %s" % shapes) if shape.dims[1].value is None: raise ValueError("linear expects shape[1] to be provided for shape %s, " "but saw %s" % (shape, shape[1])) else: total_arg_size += shape.dims[1].value dtype = [a.dtype for a in args][0] # Now the computation. scope = vs.get_variable_scope() with vs.variable_scope(scope) as outer_scope: weights = vs.get_variable( _WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype, initializer=kernel_initializer) if len(args) == 1: res = math_ops.matmul(args[0], weights) else: res = math_ops.matmul(array_ops.concat(args, 1), weights) if not bias: return res with vs.variable_scope(outer_scope) as inner_scope: inner_scope.set_partitioner(None) if bias_initializer is None: bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype) biases = vs.get_variable( _BIAS_VARIABLE_NAME, [output_size], dtype=dtype, initializer=bias_initializer) return nn_ops.bias_add(res, biases) class EmbeddingWrapper(RNNCell): """Operator adding input embedding to the given cell. Note: in many cases it may be more efficient to not use this wrapper, but instead concatenate the whole sequence of your inputs in time, do the embedding on this batch-concatenated sequence, then split it and feed into your RNN. """ def __init__(self, cell, embedding_classes, embedding_size, initializer=None, reuse=None): """Create a cell with an added input embedding. Args: cell: an RNNCell, an embedding will be put before its inputs. embedding_classes: integer, how many symbols will be embedded. embedding_size: integer, the size of the vectors we embed into. initializer: an initializer to use when creating the embedding; if None, the initializer from variable scope or a default one is used. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. Raises: TypeError: if cell is not an RNNCell. ValueError: if embedding_classes is not positive. """ super(EmbeddingWrapper, self).__init__(_reuse=reuse) rnn_cell_impl.assert_like_rnncell("cell", cell) if embedding_classes <= 0 or embedding_size <= 0: raise ValueError("Both embedding_classes and embedding_size must be > 0: " "%d, %d." % (embedding_classes, embedding_size)) self._cell = cell self._embedding_classes = embedding_classes self._embedding_size = embedding_size self._initializer = initializer @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._cell.output_size def zero_state(self, batch_size, dtype): with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]): return self._cell.zero_state(batch_size, dtype) def call(self, inputs, state): """Run the cell on embedded inputs.""" with ops.device("/cpu:0"): if self._initializer: initializer = self._initializer elif vs.get_variable_scope().initializer: initializer = vs.get_variable_scope().initializer else: # Default initializer for embeddings should have variance=1. sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1. initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3) if isinstance(state, tuple): data_type = state[0].dtype else: data_type = state.dtype embedding = vs.get_variable( "embedding", [self._embedding_classes, self._embedding_size], initializer=initializer, dtype=data_type) embedded = embedding_ops.embedding_lookup(embedding, array_ops.reshape(inputs, [-1])) return self._cell(embedded, state) class InputProjectionWrapper(RNNCell): """Operator adding an input projection to the given cell. Note: in many cases it may be more efficient to not use this wrapper, but instead concatenate the whole sequence of your inputs in time, do the projection on this batch-concatenated sequence, then split it. """ def __init__(self, cell, num_proj, activation=None, input_size=None, reuse=None): """Create a cell with input projection. Args: cell: an RNNCell, a projection of inputs is added before it. num_proj: Python integer. The dimension to project to. activation: (optional) an optional activation function. input_size: Deprecated and unused. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. Raises: TypeError: if cell is not an RNNCell. """ super(InputProjectionWrapper, self).__init__(_reuse=reuse) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) rnn_cell_impl.assert_like_rnncell("cell", cell) self._cell = cell self._num_proj = num_proj self._activation = activation self._linear = None @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._cell.output_size def zero_state(self, batch_size, dtype): with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]): return self._cell.zero_state(batch_size, dtype) def call(self, inputs, state): """Run the input projection and then the cell.""" # Default scope: "InputProjectionWrapper" if self._linear is None: self._linear = _Linear(inputs, self._num_proj, True) projected = self._linear(inputs) if self._activation: projected = self._activation(projected) return self._cell(projected, state) class OutputProjectionWrapper(RNNCell): """Operator adding an output projection to the given cell. Note: in many cases it may be more efficient to not use this wrapper, but instead concatenate the whole sequence of your outputs in time, do the projection on this batch-concatenated sequence, then split it if needed or directly feed into a softmax. """ def __init__(self, cell, output_size, activation=None, reuse=None): """Create a cell with output projection. Args: cell: an RNNCell, a projection to output_size is added to it. output_size: integer, the size of the output after projection. activation: (optional) an optional activation function. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. Raises: TypeError: if cell is not an RNNCell. ValueError: if output_size is not positive. """ super(OutputProjectionWrapper, self).__init__(_reuse=reuse) rnn_cell_impl.assert_like_rnncell("cell", cell) if output_size < 1: raise ValueError("Parameter output_size must be > 0: %d." % output_size) self._cell = cell self._output_size = output_size self._activation = activation self._linear = None @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._output_size def zero_state(self, batch_size, dtype): with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]): return self._cell.zero_state(batch_size, dtype) def call(self, inputs, state): """Run the cell and output projection on inputs, starting from state.""" output, res_state = self._cell(inputs, state) if self._linear is None: self._linear = _Linear(output, self._output_size, True) projected = self._linear(output) if self._activation: projected = self._activation(projected) return projected, res_state
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/python/ops/core_rnn_cell.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """LSTM Block Cell ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.keras.engine import input_spec from tensorflow.python.layers import base as base_layer from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_rnn_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import rnn_cell_impl LayerRNNCell = rnn_cell_impl.LayerRNNCell # pylint: disable=invalid-name # pylint: disable=invalid-name def _lstm_block_cell(x, cs_prev, h_prev, w, b, wci=None, wcf=None, wco=None, forget_bias=None, cell_clip=None, use_peephole=None, name=None): r"""Computes the LSTM cell forward propagation for 1 time step. This implementation uses 1 weight matrix and 1 bias vector, and there's an optional peephole connection. This kernel op implements the following mathematical equations: ```python xh = [x, h_prev] [i, ci, f, o] = xh * w + b f = f + forget_bias if not use_peephole: wci = wcf = wco = 0 i = sigmoid(cs_prev * wci + i) f = sigmoid(cs_prev * wcf + f) ci = tanh(ci) cs = ci .* i + cs_prev .* f cs = clip(cs, cell_clip) o = sigmoid(cs * wco + o) co = tanh(cs) h = co .* o ``` Args: x: A `Tensor`. Must be one of the following types: `float32`. The input to the LSTM cell, shape (batch_size, num_inputs). cs_prev: A `Tensor`. Must have the same type as `x`. Value of the cell state at previous time step. h_prev: A `Tensor`. Must have the same type as `x`. Output of the previous cell at previous time step. w: A `Tensor`. Must have the same type as `x`. The weight matrix. b: A `Tensor`. Must have the same type as `x`. The bias vector. wci: A `Tensor`. Must have the same type as `x`. The weight matrix for input gate peephole connection. wcf: A `Tensor`. Must have the same type as `x`. The weight matrix for forget gate peephole connection. wco: A `Tensor`. Must have the same type as `x`. The weight matrix for output gate peephole connection. forget_bias: An optional `float`. Defaults to `1`. The forget gate bias. cell_clip: An optional `float`. Defaults to `-1` (no clipping). Value to clip the 'cs' value to. Disable by setting to negative value. use_peephole: An optional `bool`. Defaults to `False`. Whether to use peephole weights. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (i, cs, f, o, ci, co, h). i: A `Tensor`. Has the same type as `x`. The input gate. cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh. f: A `Tensor`. Has the same type as `x`. The forget gate. o: A `Tensor`. Has the same type as `x`. The output gate. ci: A `Tensor`. Has the same type as `x`. The cell input. co: A `Tensor`. Has the same type as `x`. The cell after the tanh. h: A `Tensor`. Has the same type as `x`. The output h vector. Raises: ValueError: If cell_size is None. """ if wci is None: cell_size = cs_prev.get_shape().with_rank(2).dims[1].value if cell_size is None: raise ValueError("cell_size from `cs_prev` should not be None.") wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size]) wcf = wci wco = wci # pylint: disable=protected-access return gen_rnn_ops.lstm_block_cell( x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, forget_bias=forget_bias, cell_clip=cell_clip if cell_clip is not None else -1, use_peephole=use_peephole, name=name) # pylint: enable=protected-access def _block_lstm(seq_len_max, x, w, b, cs_prev=None, h_prev=None, wci=None, wcf=None, wco=None, forget_bias=None, cell_clip=None, use_peephole=None, name=None): r"""TODO(williamchan): add doc. Args: seq_len_max: A `Tensor` of type `int64`. x: A list of at least 1 `Tensor` objects of the same type. w: A `Tensor`. Must have the same type as `x`. b: A `Tensor`. Must have the same type as `x`. cs_prev: A `Tensor`. Must have the same type as `x`. h_prev: A `Tensor`. Must have the same type as `x`. wci: A `Tensor`. Must have the same type as `x`. wcf: A `Tensor`. Must have the same type as `x`. wco: A `Tensor`. Must have the same type as `x`. forget_bias: An optional `float`. Defaults to `1`. cell_clip: An optional `float`. Defaults to `-1` (no clipping). use_peephole: An optional `bool`. Defaults to `False`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (i, cs, f, o, ci, co, h). i: A list with the same number of `Tensor` objects as `x` of `Tensor` objects of the same type as x. cs: A list with the same number of `Tensor` objects as `x` of `Tensor` objects of the same type as x. f: A list with the same number of `Tensor` objects as `x` of `Tensor` objects of the same type as x. o: A list with the same number of `Tensor` objects as `x` of `Tensor` objects of the same type as x. ci: A list with the same number of `Tensor` objects as `x` of `Tensor` objects of the same type as x. co: A list with the same number of `Tensor` objects as `x` of `Tensor` objects of the same type as x. h: A list with the same number of `Tensor` objects as `x` of `Tensor` objects of the same type as x. Raises: ValueError: If `b` does not have a valid shape. """ dtype = x[0].dtype batch_size = x[0].get_shape().with_rank(2).dims[0].value cell_size4 = b.get_shape().with_rank(1).dims[0].value if cell_size4 is None: raise ValueError("`b` shape must not be None.") cell_size = cell_size4 / 4 zero_state = None if cs_prev is None or h_prev is None: zero_state = array_ops.constant( 0, dtype=dtype, shape=[batch_size, cell_size]) if cs_prev is None: cs_prev = zero_state if h_prev is None: h_prev = zero_state if wci is None: wci = array_ops.constant(0, dtype=dtype, shape=[cell_size]) wcf = wci wco = wci # pylint: disable=protected-access i, cs, f, o, ci, co, h = gen_rnn_ops.block_lstm( seq_len_max=seq_len_max, x=array_ops.stack(x), cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, forget_bias=forget_bias, cell_clip=cell_clip if cell_clip is not None else -1, name=name, use_peephole=use_peephole) return array_ops.unstack(i), array_ops.unstack(cs), array_ops.unstack( f), array_ops.unstack(o), array_ops.unstack(ci), array_ops.unstack( co), array_ops.unstack(h) # pylint: enable=protected-access # pylint: enable=invalid-name @ops.RegisterGradient("LSTMBlockCell") def _LSTMBlockCellGrad(op, *grad): """Gradient for LSTMBlockCell.""" (x, cs_prev, h_prev, w, wci, wcf, wco, b) = op.inputs (i, cs, f, o, ci, co, _) = op.outputs (_, cs_grad, _, _, _, _, h_grad) = grad batch_size = x.get_shape().with_rank(2).dims[0].value if batch_size is None: batch_size = -1 input_size = x.get_shape().with_rank(2).dims[1].value if input_size is None: raise ValueError("input_size from `x` should not be None.") cell_size = cs_prev.get_shape().with_rank(2).dims[1].value if cell_size is None: raise ValueError("cell_size from `cs_prev` should not be None.") (cs_prev_grad, dgates, wci_grad, wcf_grad, wco_grad) = gen_rnn_ops.lstm_block_cell_grad( x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f, o=o, ci=ci, co=co, cs_grad=cs_grad, h_grad=h_grad, use_peephole=op.get_attr("use_peephole")) # Backprop from dgates to xh. xh_grad = math_ops.matmul(dgates, w, transpose_b=True) x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size)) x_grad.get_shape().merge_with(x.get_shape()) h_prev_grad = array_ops.slice(xh_grad, (0, input_size), (batch_size, cell_size)) h_prev_grad.get_shape().merge_with(h_prev.get_shape()) # Backprop from dgates to w. xh = array_ops.concat([x, h_prev], 1) w_grad = math_ops.matmul(xh, dgates, transpose_a=True) w_grad.get_shape().merge_with(w.get_shape()) # Backprop from dgates to b. b_grad = nn_ops.bias_add_grad(dgates) b_grad.get_shape().merge_with(b.get_shape()) return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, b_grad) class LSTMBlockCell(LayerRNNCell): """Basic LSTM recurrent network cell. The implementation is based on: http://arxiv.org/abs/1409.2329. We add `forget_bias` (default: 1) to the biases of the forget gate in order to reduce the scale of forgetting in the beginning of the training. Unlike `rnn_cell_impl.LSTMCell`, this is a monolithic op and should be much faster. The weight and bias matrices should be compatible as long as the variable scope matches. """ def __init__(self, num_units, forget_bias=1.0, cell_clip=None, use_peephole=False, dtype=None, reuse=None, name="lstm_cell"): """Initialize the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). cell_clip: An optional `float`. Defaults to `-1` (no clipping). use_peephole: Whether to use peephole connections or not. dtype: the variable dtype of this layer. Default to tf.float32. reuse: (optional) boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. By default this is "lstm_cell", for variable-name compatibility with `tf.compat.v1.nn.rnn_cell.LSTMCell`. When restoring from CudnnLSTM-trained checkpoints, must use CudnnCompatibleLSTMBlockCell instead. """ super(LSTMBlockCell, self).__init__(_reuse=reuse, dtype=dtype, name=name) self._num_units = num_units self._forget_bias = forget_bias self._use_peephole = use_peephole self._cell_clip = cell_clip if cell_clip is not None else -1 self._names = { "W": "kernel", "b": "bias", "wci": "w_i_diag", "wcf": "w_f_diag", "wco": "w_o_diag", "scope": "lstm_cell" } # Inputs must be 2-dimensional. self.input_spec = input_spec.InputSpec(ndim=2) @property def state_size(self): return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units) @property def output_size(self): return self._num_units def build(self, inputs_shape): if not inputs_shape.dims[1].value: raise ValueError( "Expecting inputs_shape[1] to be set: %s" % str(inputs_shape)) input_size = inputs_shape.dims[1].value self._kernel = self.add_variable( self._names["W"], [input_size + self._num_units, self._num_units * 4]) self._bias = self.add_variable( self._names["b"], [self._num_units * 4], initializer=init_ops.constant_initializer(0.0)) if self._use_peephole: self._w_i_diag = self.add_variable(self._names["wci"], [self._num_units]) self._w_f_diag = self.add_variable(self._names["wcf"], [self._num_units]) self._w_o_diag = self.add_variable(self._names["wco"], [self._num_units]) self.built = True def call(self, inputs, state): """Long short-term memory cell (LSTM).""" if len(state) != 2: raise ValueError("Expecting state to be a tuple with length 2.") if self._use_peephole: wci = self._w_i_diag wcf = self._w_f_diag wco = self._w_o_diag else: wci = wcf = wco = array_ops.zeros([self._num_units], dtype=self.dtype) (cs_prev, h_prev) = state (_, cs, _, _, _, _, h) = _lstm_block_cell( inputs, cs_prev, h_prev, self._kernel, self._bias, wci=wci, wcf=wcf, wco=wco, forget_bias=self._forget_bias, cell_clip=self._cell_clip, use_peephole=self._use_peephole) new_state = rnn_cell_impl.LSTMStateTuple(cs, h) return h, new_state @six.add_metaclass(abc.ABCMeta) class LSTMBlockWrapper(base_layer.Layer): """This is a helper class that provides housekeeping for LSTM cells. This may be useful for alternative LSTM and similar type of cells. The subclasses must implement `_call_cell` method and `num_units` property. """ @abc.abstractproperty def num_units(self): """Number of units in this cell (output dimension).""" pass @abc.abstractmethod def _call_cell(self, inputs, initial_cell_state, initial_output, dtype, sequence_length): """Run this LSTM on inputs, starting from the given state. This method must be implemented by subclasses and does the actual work of calling the cell. Args: inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]` initial_cell_state: initial value for cell state, shape `[batch_size, self._num_units]` initial_output: initial value of cell output, shape `[batch_size, self._num_units]` dtype: The data type for the initial state and expected output. sequence_length: Specifies the length of each sequence in inputs. An int32 or int64 vector (tensor) size [batch_size], values in [0, time_len) or None. Returns: A pair containing: - State: A `3-D` tensor of shape `[time_len, batch_size, output_size]` - Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]` """ pass def call(self, inputs, initial_state=None, dtype=None, sequence_length=None): """Run this LSTM on inputs, starting from the given state. Args: inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`. initial_state: a tuple `(initial_cell_state, initial_output)` with tensors of shape `[batch_size, self._num_units]`. If this is not provided, the cell is expected to create a zero initial state of type `dtype`. dtype: The data type for the initial state and expected output. Required if `initial_state` is not provided or RNN state has a heterogeneous dtype. sequence_length: Specifies the length of each sequence in inputs. An `int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0, time_len).` Defaults to `time_len` for each element. Returns: A pair containing: - Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]` or a list of time_len tensors of shape `[batch_size, output_size]`, to match the type of the `inputs`. - Final state: a tuple `(cell_state, output)` matching `initial_state`. Raises: ValueError: in case of shape mismatches """ is_list = isinstance(inputs, list) if is_list: inputs = array_ops.stack(inputs) inputs_shape = inputs.get_shape().with_rank(3) if not inputs_shape[2]: raise ValueError("Expecting inputs_shape[2] to be set: %s" % inputs_shape) batch_size = inputs_shape.dims[1].value if batch_size is None: batch_size = array_ops.shape(inputs)[1] time_len = inputs_shape.dims[0].value if time_len is None: time_len = array_ops.shape(inputs)[0] # Provide default values for initial_state and dtype if initial_state is None: if dtype is None: raise ValueError("Either initial_state or dtype needs to be specified") z = array_ops.zeros( array_ops.stack([batch_size, self.num_units]), dtype=dtype) initial_state = z, z else: if len(initial_state) != 2: raise ValueError( "Expecting initial_state to be a tuple with length 2 or None") if dtype is None: dtype = initial_state[0].dtype # create the actual cell if sequence_length is not None: sequence_length = ops.convert_to_tensor(sequence_length) initial_cell_state, initial_output = initial_state # pylint: disable=unpacking-non-sequence cell_states, outputs = self._call_cell( inputs, initial_cell_state, initial_output, dtype, sequence_length) if sequence_length is not None: # Mask out the part beyond sequence_length mask = array_ops.transpose( array_ops.sequence_mask(sequence_length, time_len, dtype=dtype), [1, 0]) mask = array_ops.tile( array_ops.expand_dims(mask, [-1]), [1, 1, self.num_units]) outputs *= mask # Prepend initial states to cell_states and outputs for indexing to work # correctly,since we want to access the last valid state at # sequence_length - 1, which can even be -1, corresponding to the # initial state. mod_cell_states = array_ops.concat( [array_ops.expand_dims(initial_cell_state, [0]), cell_states], 0) mod_outputs = array_ops.concat( [array_ops.expand_dims(initial_output, [0]), outputs], 0) final_cell_state = self._gather_states(mod_cell_states, sequence_length, batch_size) final_output = self._gather_states(mod_outputs, sequence_length, batch_size) else: # No sequence_lengths used: final state is the last state final_cell_state = cell_states[-1] final_output = outputs[-1] if is_list: # Input was a list, so return a list outputs = array_ops.unstack(outputs) final_state = rnn_cell_impl.LSTMStateTuple(final_cell_state, final_output) return outputs, final_state def _gather_states(self, data, indices, batch_size): """Produce `out`, s.t. out(i, j) = data(indices(i), i, j).""" return array_ops.gather_nd( data, array_ops.stack([indices, math_ops.range(batch_size)], axis=1)) class LSTMBlockFusedCell(LSTMBlockWrapper): """FusedRNNCell implementation of LSTM. This is an extremely efficient LSTM implementation, that uses a single TF op for the entire LSTM. It should be both faster and more memory-efficient than LSTMBlockCell defined above. The implementation is based on: http://arxiv.org/abs/1409.2329. We add forget_bias (default: 1) to the biases of the forget gate in order to reduce the scale of forgetting in the beginning of the training. The variable naming is consistent with `rnn_cell_impl.LSTMCell`. """ def __init__(self, num_units, forget_bias=1.0, cell_clip=None, use_peephole=False, reuse=None, dtype=None, name="lstm_fused_cell"): """Initialize the LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). cell_clip: clip the cell to this value. Defaults is no cell clipping. use_peephole: Whether to use peephole connections or not. reuse: (optional) boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. dtype: the dtype of variables of this layer. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. By default this is "lstm_cell", for variable-name compatibility with `tf.compat.v1.nn.rnn_cell.LSTMCell`. """ super(LSTMBlockFusedCell, self).__init__( _reuse=reuse, name=name, dtype=dtype) self._num_units = num_units self._forget_bias = forget_bias self._cell_clip = cell_clip if cell_clip is not None else -1 self._use_peephole = use_peephole # Inputs must be 3-dimensional. self.input_spec = input_spec.InputSpec(ndim=3) @property def num_units(self): """Number of units in this cell (output dimension).""" return self._num_units def build(self, input_shape): input_size = input_shape.dims[2].value self._kernel = self.add_variable( "kernel", [input_size + self._num_units, self._num_units * 4]) self._bias = self.add_variable( "bias", [self._num_units * 4], initializer=init_ops.constant_initializer(0.0)) if self._use_peephole: self._w_i_diag = self.add_variable("w_i_diag", [self._num_units]) self._w_f_diag = self.add_variable("w_f_diag", [self._num_units]) self._w_o_diag = self.add_variable("w_o_diag", [self._num_units]) self.built = True def _call_cell(self, inputs, initial_cell_state=None, initial_output=None, dtype=None, sequence_length=None): """Run this LSTM on inputs, starting from the given state. Args: inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]` initial_cell_state: initial value for cell state, shape `[batch_size, self._num_units]` initial_output: initial value of cell output, shape `[batch_size, self._num_units]` dtype: The data type for the initial state and expected output. sequence_length: Specifies the length of each sequence in inputs. An `int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0, time_len)` or None. Returns: A pair containing: - Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size, output_size]` - Output (h): A `3-D` tensor of shape `[time_len, batch_size, output_size]` """ inputs_shape = inputs.get_shape().with_rank(3) time_len = inputs_shape.dims[0].value if time_len is None: time_len = array_ops.shape(inputs)[0] if self._use_peephole: wci = self._w_i_diag wco = self._w_o_diag wcf = self._w_f_diag else: wci = wcf = wco = array_ops.zeros([self._num_units], dtype=dtype) if sequence_length is None: max_seq_len = math_ops.cast(time_len, dtypes.int64) else: max_seq_len = math_ops.cast(math_ops.reduce_max(sequence_length), dtypes.int64) _, cs, _, _, _, _, h = gen_rnn_ops.block_lstm( seq_len_max=max_seq_len, x=inputs, cs_prev=initial_cell_state, h_prev=initial_output, w=self._kernel, wci=wci, wcf=wcf, wco=wco, b=self._bias, forget_bias=self._forget_bias, cell_clip=self._cell_clip, use_peephole=self._use_peephole) return cs, h
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/python/ops/lstm_ops.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Module for constructing RNN Cells.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math from tensorflow.contrib.compiler import jit from tensorflow.contrib.layers.python.layers import layers from tensorflow.contrib.rnn.python.ops import core_rnn_cell from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import op_def_registry from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import activations from tensorflow.python.keras import initializers from tensorflow.python.keras.engine import input_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_impl # pylint: disable=unused-import from tensorflow.python.ops import nn_ops from tensorflow.python.ops import partitioned_variables # pylint: disable=unused-import from tensorflow.python.ops import random_ops from tensorflow.python.ops import rnn_cell_impl from tensorflow.python.ops import variable_scope as vs from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest def _get_concat_variable(name, shape, dtype, num_shards): """Get a sharded variable concatenated into one tensor.""" sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards) if len(sharded_variable) == 1: return sharded_variable[0] concat_name = name + "/concat" concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0" for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES): if value.name == concat_full_name: return value concat_variable = array_ops.concat(sharded_variable, 0, name=concat_name) ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES, concat_variable) return concat_variable def _get_sharded_variable(name, shape, dtype, num_shards): """Get a list of sharded variables with the given dtype.""" if num_shards > shape[0]: raise ValueError("Too many shards: shape=%s, num_shards=%d" % (shape, num_shards)) unit_shard_size = int(math.floor(shape[0] / num_shards)) remaining_rows = shape[0] - unit_shard_size * num_shards shards = [] for i in range(num_shards): current_size = unit_shard_size if i < remaining_rows: current_size += 1 shards.append( vs.get_variable( name + "_%d" % i, [current_size] + shape[1:], dtype=dtype)) return shards def _norm(g, b, inp, scope): shape = inp.get_shape()[-1:] gamma_init = init_ops.constant_initializer(g) beta_init = init_ops.constant_initializer(b) with vs.variable_scope(scope): # Initialize beta and gamma for use by layer_norm. vs.get_variable("gamma", shape=shape, initializer=gamma_init) vs.get_variable("beta", shape=shape, initializer=beta_init) normalized = layers.layer_norm(inp, reuse=True, scope=scope) return normalized class CoupledInputForgetGateLSTMCell(rnn_cell_impl.RNNCell): """Long short-term memory unit (LSTM) recurrent network cell. The default non-peephole implementation is based on: https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf Felix Gers, Jurgen Schmidhuber, and Fred Cummins. "Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999. The peephole implementation is based on: https://research.google.com/pubs/archive/43905.pdf Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory recurrent neural network architectures for large scale acoustic modeling." INTERSPEECH, 2014. The coupling of input and forget gate is based on: http://arxiv.org/pdf/1503.04069.pdf Greff et al. "LSTM: A Search Space Odyssey" The class uses optional peep-hole connections, and an optional projection layer. Layer normalization implementation is based on: https://arxiv.org/abs/1607.06450. "Layer Normalization" Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton and is applied before the internal nonlinearities. """ def __init__(self, num_units, use_peepholes=False, initializer=None, num_proj=None, proj_clip=None, num_unit_shards=1, num_proj_shards=1, forget_bias=1.0, state_is_tuple=True, activation=math_ops.tanh, reuse=None, layer_norm=False, norm_gain=1.0, norm_shift=0.0): """Initialize the parameters for an LSTM cell. Args: num_units: int, The number of units in the LSTM cell use_peepholes: bool, set True to enable diagonal/peephole connections. initializer: (optional) The initializer to use for the weight and projection matrices. num_proj: (optional) int, The output dimensionality for the projection matrices. If None, no projection is performed. proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is provided, then the projected values are clipped elementwise to within `[-proj_clip, proj_clip]`. num_unit_shards: How to split the weight matrix. If >1, the weight matrix is stored across num_unit_shards. num_proj_shards: How to split the projection matrix. If >1, the projection matrix is stored across num_proj_shards. forget_bias: Biases of the forget gate are initialized by default to 1 in order to reduce the scale of forgetting at the beginning of the training. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. By default (False), they are concatenated along the column axis. This default behavior will soon be deprecated. activation: Activation function of the inner states. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. layer_norm: If `True`, layer normalization will be applied. norm_gain: float, The layer normalization gain initial value. If `layer_norm` has been set to `False`, this argument will be ignored. norm_shift: float, The layer normalization shift initial value. If `layer_norm` has been set to `False`, this argument will be ignored. """ super(CoupledInputForgetGateLSTMCell, self).__init__(_reuse=reuse) if not state_is_tuple: logging.warn("%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) self._num_units = num_units self._use_peepholes = use_peepholes self._initializer = initializer self._num_proj = num_proj self._proj_clip = proj_clip self._num_unit_shards = num_unit_shards self._num_proj_shards = num_proj_shards self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation self._reuse = reuse self._layer_norm = layer_norm self._norm_gain = norm_gain self._norm_shift = norm_shift if num_proj: self._state_size = ( rnn_cell_impl.LSTMStateTuple(num_units, num_proj) if state_is_tuple else num_units + num_proj) self._output_size = num_proj else: self._state_size = ( rnn_cell_impl.LSTMStateTuple(num_units, num_units) if state_is_tuple else 2 * num_units) self._output_size = num_units @property def state_size(self): return self._state_size @property def output_size(self): return self._output_size def call(self, inputs, state): """Run one step of LSTM. Args: inputs: input Tensor, 2D, batch x num_units. state: if `state_is_tuple` is False, this must be a state Tensor, `2-D, batch x state_size`. If `state_is_tuple` is True, this must be a tuple of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`. Returns: A tuple containing: - A `2-D, [batch x output_dim]`, Tensor representing the output of the LSTM after reading `inputs` when previous state was `state`. Here output_dim is: num_proj if num_proj was set, num_units otherwise. - Tensor(s) representing the new state of LSTM after reading `inputs` when the previous state was `state`. Same type and shape(s) as `state`. Raises: ValueError: If input size cannot be inferred from inputs via static shape inference. """ sigmoid = math_ops.sigmoid num_proj = self._num_units if self._num_proj is None else self._num_proj if self._state_is_tuple: (c_prev, m_prev) = state else: c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units]) m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj]) dtype = inputs.dtype input_size = inputs.get_shape().with_rank(2).dims[1] if input_size.value is None: raise ValueError("Could not infer input size from inputs.get_shape()[-1]") concat_w = _get_concat_variable( "W", [input_size.value + num_proj, 3 * self._num_units], dtype, self._num_unit_shards) b = vs.get_variable( "B", shape=[3 * self._num_units], initializer=init_ops.zeros_initializer(), dtype=dtype) # j = new_input, f = forget_gate, o = output_gate cell_inputs = array_ops.concat([inputs, m_prev], 1) lstm_matrix = math_ops.matmul(cell_inputs, concat_w) # If layer nomalization is applied, do not add bias if not self._layer_norm: lstm_matrix = nn_ops.bias_add(lstm_matrix, b) j, f, o = array_ops.split(value=lstm_matrix, num_or_size_splits=3, axis=1) # Apply layer normalization if self._layer_norm: j = _norm(self._norm_gain, self._norm_shift, j, "transform") f = _norm(self._norm_gain, self._norm_shift, f, "forget") o = _norm(self._norm_gain, self._norm_shift, o, "output") # Diagonal connections if self._use_peepholes: w_f_diag = vs.get_variable( "W_F_diag", shape=[self._num_units], dtype=dtype) w_o_diag = vs.get_variable( "W_O_diag", shape=[self._num_units], dtype=dtype) if self._use_peepholes: f_act = sigmoid(f + self._forget_bias + w_f_diag * c_prev) else: f_act = sigmoid(f + self._forget_bias) c = (f_act * c_prev + (1 - f_act) * self._activation(j)) # Apply layer normalization if self._layer_norm: c = _norm(self._norm_gain, self._norm_shift, c, "state") if self._use_peepholes: m = sigmoid(o + w_o_diag * c) * self._activation(c) else: m = sigmoid(o) * self._activation(c) if self._num_proj is not None: concat_w_proj = _get_concat_variable("W_P", [self._num_units, self._num_proj], dtype, self._num_proj_shards) m = math_ops.matmul(m, concat_w_proj) if self._proj_clip is not None: # pylint: disable=invalid-unary-operand-type m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip) # pylint: enable=invalid-unary-operand-type new_state = ( rnn_cell_impl.LSTMStateTuple(c, m) if self._state_is_tuple else array_ops.concat([c, m], 1)) return m, new_state class TimeFreqLSTMCell(rnn_cell_impl.RNNCell): """Time-Frequency Long short-term memory unit (LSTM) recurrent network cell. This implementation is based on: Tara N. Sainath and Bo Li "Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures for LVCSR Tasks." submitted to INTERSPEECH, 2016. It uses peep-hole connections and optional cell clipping. """ def __init__(self, num_units, use_peepholes=False, cell_clip=None, initializer=None, num_unit_shards=1, forget_bias=1.0, feature_size=None, frequency_skip=1, reuse=None): """Initialize the parameters for an LSTM cell. Args: num_units: int, The number of units in the LSTM cell use_peepholes: bool, set True to enable diagonal/peephole connections. cell_clip: (optional) A float value, if provided the cell state is clipped by this value prior to the cell output activation. initializer: (optional) The initializer to use for the weight and projection matrices. num_unit_shards: int, How to split the weight matrix. If >1, the weight matrix is stored across num_unit_shards. forget_bias: float, Biases of the forget gate are initialized by default to 1 in order to reduce the scale of forgetting at the beginning of the training. feature_size: int, The size of the input feature the LSTM spans over. frequency_skip: int, The amount the LSTM filter is shifted by in frequency. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. """ super(TimeFreqLSTMCell, self).__init__(_reuse=reuse) self._num_units = num_units self._use_peepholes = use_peepholes self._cell_clip = cell_clip self._initializer = initializer self._num_unit_shards = num_unit_shards self._forget_bias = forget_bias self._feature_size = feature_size self._frequency_skip = frequency_skip self._state_size = 2 * num_units self._output_size = num_units self._reuse = reuse @property def output_size(self): return self._output_size @property def state_size(self): return self._state_size def call(self, inputs, state): """Run one step of LSTM. Args: inputs: input Tensor, 2D, batch x num_units. state: state Tensor, 2D, batch x state_size. Returns: A tuple containing: - A 2D, batch x output_dim, Tensor representing the output of the LSTM after reading "inputs" when previous state was "state". Here output_dim is num_units. - A 2D, batch x state_size, Tensor representing the new state of LSTM after reading "inputs" when previous state was "state". Raises: ValueError: if an input_size was specified and the provided inputs have a different dimension. """ sigmoid = math_ops.sigmoid tanh = math_ops.tanh freq_inputs = self._make_tf_features(inputs) dtype = inputs.dtype actual_input_size = freq_inputs[0].get_shape().as_list()[1] concat_w = _get_concat_variable( "W", [actual_input_size + 2 * self._num_units, 4 * self._num_units], dtype, self._num_unit_shards) b = vs.get_variable( "B", shape=[4 * self._num_units], initializer=init_ops.zeros_initializer(), dtype=dtype) # Diagonal connections if self._use_peepholes: w_f_diag = vs.get_variable( "W_F_diag", shape=[self._num_units], dtype=dtype) w_i_diag = vs.get_variable( "W_I_diag", shape=[self._num_units], dtype=dtype) w_o_diag = vs.get_variable( "W_O_diag", shape=[self._num_units], dtype=dtype) # initialize the first freq state to be zero m_prev_freq = array_ops.zeros( [inputs.shape.dims[0].value or inputs.get_shape()[0], self._num_units], dtype) for fq in range(len(freq_inputs)): c_prev = array_ops.slice(state, [0, 2 * fq * self._num_units], [-1, self._num_units]) m_prev = array_ops.slice(state, [0, (2 * fq + 1) * self._num_units], [-1, self._num_units]) # i = input_gate, j = new_input, f = forget_gate, o = output_gate cell_inputs = array_ops.concat([freq_inputs[fq], m_prev, m_prev_freq], 1) lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b) i, j, f, o = array_ops.split( value=lstm_matrix, num_or_size_splits=4, axis=1) if self._use_peepholes: c = ( sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev + sigmoid(i + w_i_diag * c_prev) * tanh(j)) else: c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * tanh(j)) if self._cell_clip is not None: # pylint: disable=invalid-unary-operand-type c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip) # pylint: enable=invalid-unary-operand-type if self._use_peepholes: m = sigmoid(o + w_o_diag * c) * tanh(c) else: m = sigmoid(o) * tanh(c) m_prev_freq = m if fq == 0: state_out = array_ops.concat([c, m], 1) m_out = m else: state_out = array_ops.concat([state_out, c, m], 1) m_out = array_ops.concat([m_out, m], 1) return m_out, state_out def _make_tf_features(self, input_feat): """Make the frequency features. Args: input_feat: input Tensor, 2D, batch x num_units. Returns: A list of frequency features, with each element containing: - A 2D, batch x output_dim, Tensor representing the time-frequency feature for that frequency index. Here output_dim is feature_size. Raises: ValueError: if input_size cannot be inferred from static shape inference. """ input_size = input_feat.get_shape().with_rank(2).dims[-1].value if input_size is None: raise ValueError("Cannot infer input_size from static shape inference.") num_feats = int( (input_size - self._feature_size) / (self._frequency_skip)) + 1 freq_inputs = [] for f in range(num_feats): cur_input = array_ops.slice(input_feat, [0, f * self._frequency_skip], [-1, self._feature_size]) freq_inputs.append(cur_input) return freq_inputs class GridLSTMCell(rnn_cell_impl.RNNCell): """Grid Long short-term memory unit (LSTM) recurrent network cell. The default is based on: Nal Kalchbrenner, Ivo Danihelka and Alex Graves "Grid Long Short-Term Memory," Proc. ICLR 2016. http://arxiv.org/abs/1507.01526 When peephole connections are used, the implementation is based on: Tara N. Sainath and Bo Li "Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures for LVCSR Tasks." submitted to INTERSPEECH, 2016. The code uses optional peephole connections, shared_weights and cell clipping. """ def __init__(self, num_units, use_peepholes=False, share_time_frequency_weights=False, cell_clip=None, initializer=None, num_unit_shards=1, forget_bias=1.0, feature_size=None, frequency_skip=None, num_frequency_blocks=None, start_freqindex_list=None, end_freqindex_list=None, couple_input_forget_gates=False, state_is_tuple=True, reuse=None): """Initialize the parameters for an LSTM cell. Args: num_units: int, The number of units in the LSTM cell use_peepholes: (optional) bool, default False. Set True to enable diagonal/peephole connections. share_time_frequency_weights: (optional) bool, default False. Set True to enable shared cell weights between time and frequency LSTMs. cell_clip: (optional) A float value, default None, if provided the cell state is clipped by this value prior to the cell output activation. initializer: (optional) The initializer to use for the weight and projection matrices, default None. num_unit_shards: (optional) int, default 1, How to split the weight matrix. If > 1, the weight matrix is stored across num_unit_shards. forget_bias: (optional) float, default 1.0, The initial bias of the forget gates, used to reduce the scale of forgetting at the beginning of the training. feature_size: (optional) int, default None, The size of the input feature the LSTM spans over. frequency_skip: (optional) int, default None, The amount the LSTM filter is shifted by in frequency. num_frequency_blocks: [required] A list of frequency blocks needed to cover the whole input feature splitting defined by start_freqindex_list and end_freqindex_list. start_freqindex_list: [optional], list of ints, default None, The starting frequency index for each frequency block. end_freqindex_list: [optional], list of ints, default None. The ending frequency index for each frequency block. couple_input_forget_gates: (optional) bool, default False, Whether to couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce model parameters and computation cost. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. By default (False), they are concatenated along the column axis. This default behavior will soon be deprecated. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. Raises: ValueError: if the num_frequency_blocks list is not specified """ super(GridLSTMCell, self).__init__(_reuse=reuse) if not state_is_tuple: logging.warn("%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) self._num_units = num_units self._use_peepholes = use_peepholes self._share_time_frequency_weights = share_time_frequency_weights self._couple_input_forget_gates = couple_input_forget_gates self._state_is_tuple = state_is_tuple self._cell_clip = cell_clip self._initializer = initializer self._num_unit_shards = num_unit_shards self._forget_bias = forget_bias self._feature_size = feature_size self._frequency_skip = frequency_skip self._start_freqindex_list = start_freqindex_list self._end_freqindex_list = end_freqindex_list self._num_frequency_blocks = num_frequency_blocks self._total_blocks = 0 self._reuse = reuse if self._num_frequency_blocks is None: raise ValueError("Must specify num_frequency_blocks") for block_index in range(len(self._num_frequency_blocks)): self._total_blocks += int(self._num_frequency_blocks[block_index]) if state_is_tuple: state_names = "" for block_index in range(len(self._num_frequency_blocks)): for freq_index in range(self._num_frequency_blocks[block_index]): name_prefix = "state_f%02d_b%02d" % (freq_index, block_index) state_names += ("%s_c, %s_m," % (name_prefix, name_prefix)) self._state_tuple_type = collections.namedtuple("GridLSTMStateTuple", state_names.strip(",")) self._state_size = self._state_tuple_type(*( [num_units, num_units] * self._total_blocks)) else: self._state_tuple_type = None self._state_size = num_units * self._total_blocks * 2 self._output_size = num_units * self._total_blocks * 2 @property def output_size(self): return self._output_size @property def state_size(self): return self._state_size @property def state_tuple_type(self): return self._state_tuple_type def call(self, inputs, state): """Run one step of LSTM. Args: inputs: input Tensor, 2D, [batch, feature_size]. state: Tensor or tuple of Tensors, 2D, [batch, state_size], depends on the flag self._state_is_tuple. Returns: A tuple containing: - A 2D, [batch, output_dim], Tensor representing the output of the LSTM after reading "inputs" when previous state was "state". Here output_dim is num_units. - A 2D, [batch, state_size], Tensor representing the new state of LSTM after reading "inputs" when previous state was "state". Raises: ValueError: if an input_size was specified and the provided inputs have a different dimension. """ batch_size = tensor_shape.dimension_value( inputs.shape[0]) or array_ops.shape(inputs)[0] freq_inputs = self._make_tf_features(inputs) m_out_lst = [] state_out_lst = [] for block in range(len(freq_inputs)): m_out_lst_current, state_out_lst_current = self._compute( freq_inputs[block], block, state, batch_size, state_is_tuple=self._state_is_tuple) m_out_lst.extend(m_out_lst_current) state_out_lst.extend(state_out_lst_current) if self._state_is_tuple: state_out = self._state_tuple_type(*state_out_lst) else: state_out = array_ops.concat(state_out_lst, 1) m_out = array_ops.concat(m_out_lst, 1) return m_out, state_out def _compute(self, freq_inputs, block, state, batch_size, state_prefix="state", state_is_tuple=True): """Run the actual computation of one step LSTM. Args: freq_inputs: list of Tensors, 2D, [batch, feature_size]. block: int, current frequency block index to process. state: Tensor or tuple of Tensors, 2D, [batch, state_size], it depends on the flag state_is_tuple. batch_size: int32, batch size. state_prefix: (optional) string, name prefix for states, defaults to "state". state_is_tuple: boolean, indicates whether the state is a tuple or Tensor. Returns: A tuple, containing: - A list of [batch, output_dim] Tensors, representing the output of the LSTM given the inputs and state. - A list of [batch, state_size] Tensors, representing the LSTM state values given the inputs and previous state. """ sigmoid = math_ops.sigmoid tanh = math_ops.tanh num_gates = 3 if self._couple_input_forget_gates else 4 dtype = freq_inputs[0].dtype actual_input_size = freq_inputs[0].get_shape().as_list()[1] concat_w_f = _get_concat_variable( "W_f_%d" % block, [actual_input_size + 2 * self._num_units, num_gates * self._num_units], dtype, self._num_unit_shards) b_f = vs.get_variable( "B_f_%d" % block, shape=[num_gates * self._num_units], initializer=init_ops.zeros_initializer(), dtype=dtype) if not self._share_time_frequency_weights: concat_w_t = _get_concat_variable("W_t_%d" % block, [ actual_input_size + 2 * self._num_units, num_gates * self._num_units ], dtype, self._num_unit_shards) b_t = vs.get_variable( "B_t_%d" % block, shape=[num_gates * self._num_units], initializer=init_ops.zeros_initializer(), dtype=dtype) if self._use_peepholes: # Diagonal connections if not self._couple_input_forget_gates: w_f_diag_freqf = vs.get_variable( "W_F_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype) w_f_diag_freqt = vs.get_variable( "W_F_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype) w_i_diag_freqf = vs.get_variable( "W_I_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype) w_i_diag_freqt = vs.get_variable( "W_I_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype) w_o_diag_freqf = vs.get_variable( "W_O_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype) w_o_diag_freqt = vs.get_variable( "W_O_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype) if not self._share_time_frequency_weights: if not self._couple_input_forget_gates: w_f_diag_timef = vs.get_variable( "W_F_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype) w_f_diag_timet = vs.get_variable( "W_F_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype) w_i_diag_timef = vs.get_variable( "W_I_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype) w_i_diag_timet = vs.get_variable( "W_I_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype) w_o_diag_timef = vs.get_variable( "W_O_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype) w_o_diag_timet = vs.get_variable( "W_O_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype) # initialize the first freq state to be zero m_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype) c_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype) for freq_index in range(len(freq_inputs)): if state_is_tuple: name_prefix = "%s_f%02d_b%02d" % (state_prefix, freq_index, block) c_prev_time = getattr(state, name_prefix + "_c") m_prev_time = getattr(state, name_prefix + "_m") else: c_prev_time = array_ops.slice( state, [0, 2 * freq_index * self._num_units], [-1, self._num_units]) m_prev_time = array_ops.slice( state, [0, (2 * freq_index + 1) * self._num_units], [-1, self._num_units]) # i = input_gate, j = new_input, f = forget_gate, o = output_gate cell_inputs = array_ops.concat( [freq_inputs[freq_index], m_prev_time, m_prev_freq], 1) # F-LSTM lstm_matrix_freq = nn_ops.bias_add( math_ops.matmul(cell_inputs, concat_w_f), b_f) if self._couple_input_forget_gates: i_freq, j_freq, o_freq = array_ops.split( value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1) f_freq = None else: i_freq, j_freq, f_freq, o_freq = array_ops.split( value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1) # T-LSTM if self._share_time_frequency_weights: i_time = i_freq j_time = j_freq f_time = f_freq o_time = o_freq else: lstm_matrix_time = nn_ops.bias_add( math_ops.matmul(cell_inputs, concat_w_t), b_t) if self._couple_input_forget_gates: i_time, j_time, o_time = array_ops.split( value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1) f_time = None else: i_time, j_time, f_time, o_time = array_ops.split( value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1) # F-LSTM c_freq # input gate activations if self._use_peepholes: i_freq_g = sigmoid(i_freq + w_i_diag_freqf * c_prev_freq + w_i_diag_freqt * c_prev_time) else: i_freq_g = sigmoid(i_freq) # forget gate activations if self._couple_input_forget_gates: f_freq_g = 1.0 - i_freq_g else: if self._use_peepholes: f_freq_g = sigmoid(f_freq + self._forget_bias + w_f_diag_freqf * c_prev_freq + w_f_diag_freqt * c_prev_time) else: f_freq_g = sigmoid(f_freq + self._forget_bias) # cell state c_freq = f_freq_g * c_prev_freq + i_freq_g * tanh(j_freq) if self._cell_clip is not None: # pylint: disable=invalid-unary-operand-type c_freq = clip_ops.clip_by_value(c_freq, -self._cell_clip, self._cell_clip) # pylint: enable=invalid-unary-operand-type # T-LSTM c_freq # input gate activations if self._use_peepholes: if self._share_time_frequency_weights: i_time_g = sigmoid(i_time + w_i_diag_freqf * c_prev_freq + w_i_diag_freqt * c_prev_time) else: i_time_g = sigmoid(i_time + w_i_diag_timef * c_prev_freq + w_i_diag_timet * c_prev_time) else: i_time_g = sigmoid(i_time) # forget gate activations if self._couple_input_forget_gates: f_time_g = 1.0 - i_time_g else: if self._use_peepholes: if self._share_time_frequency_weights: f_time_g = sigmoid(f_time + self._forget_bias + w_f_diag_freqf * c_prev_freq + w_f_diag_freqt * c_prev_time) else: f_time_g = sigmoid(f_time + self._forget_bias + w_f_diag_timef * c_prev_freq + w_f_diag_timet * c_prev_time) else: f_time_g = sigmoid(f_time + self._forget_bias) # cell state c_time = f_time_g * c_prev_time + i_time_g * tanh(j_time) if self._cell_clip is not None: # pylint: disable=invalid-unary-operand-type c_time = clip_ops.clip_by_value(c_time, -self._cell_clip, self._cell_clip) # pylint: enable=invalid-unary-operand-type # F-LSTM m_freq if self._use_peepholes: m_freq = sigmoid(o_freq + w_o_diag_freqf * c_freq + w_o_diag_freqt * c_time) * tanh(c_freq) else: m_freq = sigmoid(o_freq) * tanh(c_freq) # T-LSTM m_time if self._use_peepholes: if self._share_time_frequency_weights: m_time = sigmoid(o_time + w_o_diag_freqf * c_freq + w_o_diag_freqt * c_time) * tanh(c_time) else: m_time = sigmoid(o_time + w_o_diag_timef * c_freq + w_o_diag_timet * c_time) * tanh(c_time) else: m_time = sigmoid(o_time) * tanh(c_time) m_prev_freq = m_freq c_prev_freq = c_freq # Concatenate the outputs for T-LSTM and F-LSTM for each shift if freq_index == 0: state_out_lst = [c_time, m_time] m_out_lst = [m_time, m_freq] else: state_out_lst.extend([c_time, m_time]) m_out_lst.extend([m_time, m_freq]) return m_out_lst, state_out_lst def _make_tf_features(self, input_feat, slice_offset=0): """Make the frequency features. Args: input_feat: input Tensor, 2D, [batch, num_units]. slice_offset: (optional) Python int, default 0, the slicing offset is only used for the backward processing in the BidirectionalGridLSTMCell. It specifies a different starting point instead of always 0 to enable the forward and backward processing look at different frequency blocks. Returns: A list of frequency features, with each element containing: - A 2D, [batch, output_dim], Tensor representing the time-frequency feature for that frequency index. Here output_dim is feature_size. Raises: ValueError: if input_size cannot be inferred from static shape inference. """ input_size = input_feat.get_shape().with_rank(2).dims[-1].value if input_size is None: raise ValueError("Cannot infer input_size from static shape inference.") if slice_offset > 0: # Padding to the end inputs = array_ops.pad(input_feat, array_ops.constant( [0, 0, 0, slice_offset], shape=[2, 2], dtype=dtypes.int32), "CONSTANT") elif slice_offset < 0: # Padding to the front inputs = array_ops.pad(input_feat, array_ops.constant( [0, 0, -slice_offset, 0], shape=[2, 2], dtype=dtypes.int32), "CONSTANT") slice_offset = 0 else: inputs = input_feat freq_inputs = [] if not self._start_freqindex_list: if len(self._num_frequency_blocks) != 1: raise ValueError("Length of num_frequency_blocks" " is not 1, but instead is %d" % len(self._num_frequency_blocks)) num_feats = int( (input_size - self._feature_size) / (self._frequency_skip)) + 1 if num_feats != self._num_frequency_blocks[0]: raise ValueError( "Invalid num_frequency_blocks, requires %d but gets %d, please" " check the input size and filter config are correct." % (self._num_frequency_blocks[0], num_feats)) block_inputs = [] for f in range(num_feats): cur_input = array_ops.slice( inputs, [0, slice_offset + f * self._frequency_skip], [-1, self._feature_size]) block_inputs.append(cur_input) freq_inputs.append(block_inputs) else: if len(self._start_freqindex_list) != len(self._end_freqindex_list): raise ValueError("Length of start and end freqindex_list" " does not match %d %d", len(self._start_freqindex_list), len(self._end_freqindex_list)) if len(self._num_frequency_blocks) != len(self._start_freqindex_list): raise ValueError("Length of num_frequency_blocks" " is not equal to start_freqindex_list %d %d", len(self._num_frequency_blocks), len(self._start_freqindex_list)) for b in range(len(self._start_freqindex_list)): start_index = self._start_freqindex_list[b] end_index = self._end_freqindex_list[b] cur_size = end_index - start_index block_feats = int( (cur_size - self._feature_size) / (self._frequency_skip)) + 1 if block_feats != self._num_frequency_blocks[b]: raise ValueError( "Invalid num_frequency_blocks, requires %d but gets %d, please" " check the input size and filter config are correct." % (self._num_frequency_blocks[b], block_feats)) block_inputs = [] for f in range(block_feats): cur_input = array_ops.slice( inputs, [0, start_index + slice_offset + f * self._frequency_skip], [-1, self._feature_size]) block_inputs.append(cur_input) freq_inputs.append(block_inputs) return freq_inputs class BidirectionalGridLSTMCell(GridLSTMCell): """Bidirectional GridLstm cell. The bidirection connection is only used in the frequency direction, which hence doesn't affect the time direction's real-time processing that is required for online recognition systems. The current implementation uses different weights for the two directions. """ def __init__(self, num_units, use_peepholes=False, share_time_frequency_weights=False, cell_clip=None, initializer=None, num_unit_shards=1, forget_bias=1.0, feature_size=None, frequency_skip=None, num_frequency_blocks=None, start_freqindex_list=None, end_freqindex_list=None, couple_input_forget_gates=False, backward_slice_offset=0, reuse=None): """Initialize the parameters for an LSTM cell. Args: num_units: int, The number of units in the LSTM cell use_peepholes: (optional) bool, default False. Set True to enable diagonal/peephole connections. share_time_frequency_weights: (optional) bool, default False. Set True to enable shared cell weights between time and frequency LSTMs. cell_clip: (optional) A float value, default None, if provided the cell state is clipped by this value prior to the cell output activation. initializer: (optional) The initializer to use for the weight and projection matrices, default None. num_unit_shards: (optional) int, default 1, How to split the weight matrix. If > 1, the weight matrix is stored across num_unit_shards. forget_bias: (optional) float, default 1.0, The initial bias of the forget gates, used to reduce the scale of forgetting at the beginning of the training. feature_size: (optional) int, default None, The size of the input feature the LSTM spans over. frequency_skip: (optional) int, default None, The amount the LSTM filter is shifted by in frequency. num_frequency_blocks: [required] A list of frequency blocks needed to cover the whole input feature splitting defined by start_freqindex_list and end_freqindex_list. start_freqindex_list: [optional], list of ints, default None, The starting frequency index for each frequency block. end_freqindex_list: [optional], list of ints, default None. The ending frequency index for each frequency block. couple_input_forget_gates: (optional) bool, default False, Whether to couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce model parameters and computation cost. backward_slice_offset: (optional) int32, default 0, the starting offset to slice the feature for backward processing. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. """ super(BidirectionalGridLSTMCell, self).__init__( num_units, use_peepholes, share_time_frequency_weights, cell_clip, initializer, num_unit_shards, forget_bias, feature_size, frequency_skip, num_frequency_blocks, start_freqindex_list, end_freqindex_list, couple_input_forget_gates, True, reuse) self._backward_slice_offset = int(backward_slice_offset) state_names = "" for direction in ["fwd", "bwd"]: for block_index in range(len(self._num_frequency_blocks)): for freq_index in range(self._num_frequency_blocks[block_index]): name_prefix = "%s_state_f%02d_b%02d" % (direction, freq_index, block_index) state_names += ("%s_c, %s_m," % (name_prefix, name_prefix)) self._state_tuple_type = collections.namedtuple( "BidirectionalGridLSTMStateTuple", state_names.strip(",")) self._state_size = self._state_tuple_type(*( [num_units, num_units] * self._total_blocks * 2)) self._output_size = 2 * num_units * self._total_blocks * 2 def call(self, inputs, state): """Run one step of LSTM. Args: inputs: input Tensor, 2D, [batch, num_units]. state: tuple of Tensors, 2D, [batch, state_size]. Returns: A tuple containing: - A 2D, [batch, output_dim], Tensor representing the output of the LSTM after reading "inputs" when previous state was "state". Here output_dim is num_units. - A 2D, [batch, state_size], Tensor representing the new state of LSTM after reading "inputs" when previous state was "state". Raises: ValueError: if an input_size was specified and the provided inputs have a different dimension. """ batch_size = tensor_shape.dimension_value( inputs.shape[0]) or array_ops.shape(inputs)[0] fwd_inputs = self._make_tf_features(inputs) if self._backward_slice_offset: bwd_inputs = self._make_tf_features(inputs, self._backward_slice_offset) else: bwd_inputs = fwd_inputs # Forward processing with vs.variable_scope("fwd"): fwd_m_out_lst = [] fwd_state_out_lst = [] for block in range(len(fwd_inputs)): fwd_m_out_lst_current, fwd_state_out_lst_current = self._compute( fwd_inputs[block], block, state, batch_size, state_prefix="fwd_state", state_is_tuple=True) fwd_m_out_lst.extend(fwd_m_out_lst_current) fwd_state_out_lst.extend(fwd_state_out_lst_current) # Backward processing bwd_m_out_lst = [] bwd_state_out_lst = [] with vs.variable_scope("bwd"): for block in range(len(bwd_inputs)): # Reverse the blocks bwd_inputs_reverse = bwd_inputs[block][::-1] bwd_m_out_lst_current, bwd_state_out_lst_current = self._compute( bwd_inputs_reverse, block, state, batch_size, state_prefix="bwd_state", state_is_tuple=True) bwd_m_out_lst.extend(bwd_m_out_lst_current) bwd_state_out_lst.extend(bwd_state_out_lst_current) state_out = self._state_tuple_type(*(fwd_state_out_lst + bwd_state_out_lst)) # Outputs are always concated as it is never used separately. m_out = array_ops.concat(fwd_m_out_lst + bwd_m_out_lst, 1) return m_out, state_out # pylint: disable=protected-access _Linear = core_rnn_cell._Linear # pylint: disable=invalid-name # pylint: enable=protected-access class AttentionCellWrapper(rnn_cell_impl.RNNCell): """Basic attention cell wrapper. Implementation based on https://arxiv.org/abs/1601.06733. """ def __init__(self, cell, attn_length, attn_size=None, attn_vec_size=None, input_size=None, state_is_tuple=True, reuse=None): """Create a cell with attention. Args: cell: an RNNCell, an attention is added to it. attn_length: integer, the size of an attention window. attn_size: integer, the size of an attention vector. Equal to cell.output_size by default. attn_vec_size: integer, the number of convolutional features calculated on attention state and a size of the hidden layer built from base cell state. Equal attn_size to by default. input_size: integer, the size of a hidden linear layer, built from inputs and attention. Derived from the input tensor by default. state_is_tuple: If True, accepted and returned states are n-tuples, where `n = len(cells)`. By default (False), the states are all concatenated along the column axis. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. Raises: TypeError: if cell is not an RNNCell. ValueError: if cell returns a state tuple but the flag `state_is_tuple` is `False` or if attn_length is zero or less. """ super(AttentionCellWrapper, self).__init__(_reuse=reuse) rnn_cell_impl.assert_like_rnncell("cell", cell) if nest.is_sequence(cell.state_size) and not state_is_tuple: raise ValueError( "Cell returns tuple of states, but the flag " "state_is_tuple is not set. State size is: %s" % str(cell.state_size)) if attn_length <= 0: raise ValueError( "attn_length should be greater than zero, got %s" % str(attn_length)) if not state_is_tuple: logging.warn("%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if attn_size is None: attn_size = cell.output_size if attn_vec_size is None: attn_vec_size = attn_size self._state_is_tuple = state_is_tuple self._cell = cell self._attn_vec_size = attn_vec_size self._input_size = input_size self._attn_size = attn_size self._attn_length = attn_length self._reuse = reuse self._linear1 = None self._linear2 = None self._linear3 = None @property def state_size(self): size = (self._cell.state_size, self._attn_size, self._attn_size * self._attn_length) if self._state_is_tuple: return size else: return sum(list(size)) @property def output_size(self): return self._attn_size def call(self, inputs, state): """Long short-term memory cell with attention (LSTMA).""" if self._state_is_tuple: state, attns, attn_states = state else: states = state state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size]) attns = array_ops.slice(states, [0, self._cell.state_size], [-1, self._attn_size]) attn_states = array_ops.slice( states, [0, self._cell.state_size + self._attn_size], [-1, self._attn_size * self._attn_length]) attn_states = array_ops.reshape(attn_states, [-1, self._attn_length, self._attn_size]) input_size = self._input_size if input_size is None: input_size = inputs.get_shape().as_list()[1] if self._linear1 is None: self._linear1 = _Linear([inputs, attns], input_size, True) inputs = self._linear1([inputs, attns]) cell_output, new_state = self._cell(inputs, state) if self._state_is_tuple: new_state_cat = array_ops.concat(nest.flatten(new_state), 1) else: new_state_cat = new_state new_attns, new_attn_states = self._attention(new_state_cat, attn_states) with vs.variable_scope("attn_output_projection"): if self._linear2 is None: self._linear2 = _Linear([cell_output, new_attns], self._attn_size, True) output = self._linear2([cell_output, new_attns]) new_attn_states = array_ops.concat( [new_attn_states, array_ops.expand_dims(output, 1)], 1) new_attn_states = array_ops.reshape( new_attn_states, [-1, self._attn_length * self._attn_size]) new_state = (new_state, new_attns, new_attn_states) if not self._state_is_tuple: new_state = array_ops.concat(list(new_state), 1) return output, new_state def _attention(self, query, attn_states): conv2d = nn_ops.conv2d reduce_sum = math_ops.reduce_sum softmax = nn_ops.softmax tanh = math_ops.tanh with vs.variable_scope("attention"): k = vs.get_variable("attn_w", [1, 1, self._attn_size, self._attn_vec_size]) v = vs.get_variable("attn_v", [self._attn_vec_size]) hidden = array_ops.reshape(attn_states, [-1, self._attn_length, 1, self._attn_size]) hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME") if self._linear3 is None: self._linear3 = _Linear(query, self._attn_vec_size, True) y = self._linear3(query) y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size]) s = reduce_sum(v * tanh(hidden_features + y), [2, 3]) a = softmax(s) d = reduce_sum( array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2]) new_attns = array_ops.reshape(d, [-1, self._attn_size]) new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1]) return new_attns, new_attn_states class HighwayWrapper(rnn_cell_impl.RNNCell): """RNNCell wrapper that adds highway connection on cell input and output. Based on: R. K. Srivastava, K. Greff, and J. Schmidhuber, "Highway networks", arXiv preprint arXiv:1505.00387, 2015. https://arxiv.org/abs/1505.00387 """ def __init__(self, cell, couple_carry_transform_gates=True, carry_bias_init=1.0): """Constructs a `HighwayWrapper` for `cell`. Args: cell: An instance of `RNNCell`. couple_carry_transform_gates: boolean, should the Carry and Transform gate be coupled. carry_bias_init: float, carry gates bias initialization. """ self._cell = cell self._couple_carry_transform_gates = couple_carry_transform_gates self._carry_bias_init = carry_bias_init @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._cell.output_size def zero_state(self, batch_size, dtype): with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]): return self._cell.zero_state(batch_size, dtype) def _highway(self, inp, out): input_size = inp.get_shape().with_rank(2).dims[1].value carry_weight = vs.get_variable("carry_w", [input_size, input_size]) carry_bias = vs.get_variable( "carry_b", [input_size], initializer=init_ops.constant_initializer(self._carry_bias_init)) carry = math_ops.sigmoid(nn_ops.xw_plus_b(inp, carry_weight, carry_bias)) if self._couple_carry_transform_gates: transform = 1 - carry else: transform_weight = vs.get_variable("transform_w", [input_size, input_size]) transform_bias = vs.get_variable( "transform_b", [input_size], initializer=init_ops.constant_initializer(-self._carry_bias_init)) transform = math_ops.sigmoid( nn_ops.xw_plus_b(inp, transform_weight, transform_bias)) return inp * carry + out * transform def __call__(self, inputs, state, scope=None): """Run the cell and add its inputs to its outputs. Args: inputs: cell inputs. state: cell state. scope: optional cell scope. Returns: Tuple of cell outputs and new state. Raises: TypeError: If cell inputs and outputs have different structure (type). ValueError: If cell inputs and outputs have different structure (value). """ outputs, new_state = self._cell(inputs, state, scope=scope) nest.assert_same_structure(inputs, outputs) # Ensure shapes match def assert_shape_match(inp, out): inp.get_shape().assert_is_compatible_with(out.get_shape()) nest.map_structure(assert_shape_match, inputs, outputs) res_outputs = nest.map_structure(self._highway, inputs, outputs) return (res_outputs, new_state) class LayerNormBasicLSTMCell(rnn_cell_impl.RNNCell): """LSTM unit with layer normalization and recurrent dropout. This class adds layer normalization and recurrent dropout to a basic LSTM unit. Layer normalization implementation is based on: https://arxiv.org/abs/1607.06450. "Layer Normalization" Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton and is applied before the internal nonlinearities. Recurrent dropout is base on: https://arxiv.org/abs/1603.05118 "Recurrent Dropout without Memory Loss" Stanislau Semeniuta, Aliaksei Severyn, Erhardt Barth. """ def __init__(self, num_units, forget_bias=1.0, input_size=None, activation=math_ops.tanh, layer_norm=True, norm_gain=1.0, norm_shift=0.0, dropout_keep_prob=1.0, dropout_prob_seed=None, reuse=None): """Initializes the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. activation: Activation function of the inner states. layer_norm: If `True`, layer normalization will be applied. norm_gain: float, The layer normalization gain initial value. If `layer_norm` has been set to `False`, this argument will be ignored. norm_shift: float, The layer normalization shift initial value. If `layer_norm` has been set to `False`, this argument will be ignored. dropout_keep_prob: unit Tensor or float between 0 and 1 representing the recurrent dropout probability value. If float and 1.0, no dropout will be applied. dropout_prob_seed: (optional) integer, the randomness seed. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. """ super(LayerNormBasicLSTMCell, self).__init__(_reuse=reuse) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation self._forget_bias = forget_bias self._keep_prob = dropout_keep_prob self._seed = dropout_prob_seed self._layer_norm = layer_norm self._norm_gain = norm_gain self._norm_shift = norm_shift self._reuse = reuse @property def state_size(self): return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units) @property def output_size(self): return self._num_units def _norm(self, inp, scope, dtype=dtypes.float32): shape = inp.get_shape()[-1:] gamma_init = init_ops.constant_initializer(self._norm_gain) beta_init = init_ops.constant_initializer(self._norm_shift) with vs.variable_scope(scope): # Initialize beta and gamma for use by layer_norm. vs.get_variable("gamma", shape=shape, initializer=gamma_init, dtype=dtype) vs.get_variable("beta", shape=shape, initializer=beta_init, dtype=dtype) normalized = layers.layer_norm(inp, reuse=True, scope=scope) return normalized def _linear(self, args): out_size = 4 * self._num_units proj_size = args.get_shape()[-1] dtype = args.dtype weights = vs.get_variable("kernel", [proj_size, out_size], dtype=dtype) out = math_ops.matmul(args, weights) if not self._layer_norm: bias = vs.get_variable("bias", [out_size], dtype=dtype) out = nn_ops.bias_add(out, bias) return out def call(self, inputs, state): """LSTM cell with layer normalization and recurrent dropout.""" c, h = state args = array_ops.concat([inputs, h], 1) concat = self._linear(args) dtype = args.dtype i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1) if self._layer_norm: i = self._norm(i, "input", dtype=dtype) j = self._norm(j, "transform", dtype=dtype) f = self._norm(f, "forget", dtype=dtype) o = self._norm(o, "output", dtype=dtype) g = self._activation(j) if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1: g = nn_ops.dropout(g, self._keep_prob, seed=self._seed) new_c = ( c * math_ops.sigmoid(f + self._forget_bias) + math_ops.sigmoid(i) * g) if self._layer_norm: new_c = self._norm(new_c, "state", dtype=dtype) new_h = self._activation(new_c) * math_ops.sigmoid(o) new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h) return new_h, new_state class NASCell(rnn_cell_impl.LayerRNNCell): """Neural Architecture Search (NAS) recurrent network cell. This implements the recurrent cell from the paper: https://arxiv.org/abs/1611.01578 Barret Zoph and Quoc V. Le. "Neural Architecture Search with Reinforcement Learning" Proc. ICLR 2017. The class uses an optional projection layer. """ # NAS cell's architecture base. _NAS_BASE = 8 def __init__(self, num_units, num_proj=None, use_bias=False, reuse=None, **kwargs): """Initialize the parameters for a NAS cell. Args: num_units: int, The number of units in the NAS cell. num_proj: (optional) int, The output dimensionality for the projection matrices. If None, no projection is performed. use_bias: (optional) bool, If True then use biases within the cell. This is False by default. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. **kwargs: Additional keyword arguments. """ super(NASCell, self).__init__(_reuse=reuse, **kwargs) self._num_units = num_units self._num_proj = num_proj self._use_bias = use_bias self._reuse = reuse if num_proj is not None: self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj) self._output_size = num_proj else: self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units) self._output_size = num_units @property def state_size(self): return self._state_size @property def output_size(self): return self._output_size def build(self, inputs_shape): input_size = tensor_shape.dimension_value( tensor_shape.TensorShape(inputs_shape).with_rank(2)[1]) if input_size is None: raise ValueError("Could not infer input size from inputs.get_shape()[-1]") num_proj = self._num_units if self._num_proj is None else self._num_proj # Variables for the NAS cell. `recurrent_kernel` is all matrices multiplying # the hiddenstate and `kernel` is all matrices multiplying the inputs. self.recurrent_kernel = self.add_variable( "recurrent_kernel", [num_proj, self._NAS_BASE * self._num_units]) self.kernel = self.add_variable( "kernel", [input_size, self._NAS_BASE * self._num_units]) if self._use_bias: self.bias = self.add_variable("bias", shape=[self._NAS_BASE * self._num_units], initializer=init_ops.zeros_initializer) # Projection layer if specified if self._num_proj is not None: self.projection_weights = self.add_variable( "projection_weights", [self._num_units, self._num_proj]) self.built = True def call(self, inputs, state): """Run one step of NAS Cell. Args: inputs: input Tensor, 2D, batch x num_units. state: This must be a tuple of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`. Returns: A tuple containing: - A `2-D, [batch x output_dim]`, Tensor representing the output of the NAS Cell after reading `inputs` when previous state was `state`. Here output_dim is: num_proj if num_proj was set, num_units otherwise. - Tensor(s) representing the new state of NAS Cell after reading `inputs` when the previous state was `state`. Same type and shape(s) as `state`. Raises: ValueError: If input size cannot be inferred from inputs via static shape inference. """ sigmoid = math_ops.sigmoid tanh = math_ops.tanh relu = nn_ops.relu (c_prev, m_prev) = state m_matrix = math_ops.matmul(m_prev, self.recurrent_kernel) inputs_matrix = math_ops.matmul(inputs, self.kernel) if self._use_bias: m_matrix = nn_ops.bias_add(m_matrix, self.bias) # The NAS cell branches into 8 different splits for both the hiddenstate # and the input m_matrix_splits = array_ops.split( axis=1, num_or_size_splits=self._NAS_BASE, value=m_matrix) inputs_matrix_splits = array_ops.split( axis=1, num_or_size_splits=self._NAS_BASE, value=inputs_matrix) # First layer layer1_0 = sigmoid(inputs_matrix_splits[0] + m_matrix_splits[0]) layer1_1 = relu(inputs_matrix_splits[1] + m_matrix_splits[1]) layer1_2 = sigmoid(inputs_matrix_splits[2] + m_matrix_splits[2]) layer1_3 = relu(inputs_matrix_splits[3] * m_matrix_splits[3]) layer1_4 = tanh(inputs_matrix_splits[4] + m_matrix_splits[4]) layer1_5 = sigmoid(inputs_matrix_splits[5] + m_matrix_splits[5]) layer1_6 = tanh(inputs_matrix_splits[6] + m_matrix_splits[6]) layer1_7 = sigmoid(inputs_matrix_splits[7] + m_matrix_splits[7]) # Second layer l2_0 = tanh(layer1_0 * layer1_1) l2_1 = tanh(layer1_2 + layer1_3) l2_2 = tanh(layer1_4 * layer1_5) l2_3 = sigmoid(layer1_6 + layer1_7) # Inject the cell l2_0 = tanh(l2_0 + c_prev) # Third layer l3_0_pre = l2_0 * l2_1 new_c = l3_0_pre # create new cell l3_0 = l3_0_pre l3_1 = tanh(l2_2 + l2_3) # Final layer new_m = tanh(l3_0 * l3_1) # Projection layer if specified if self._num_proj is not None: new_m = math_ops.matmul(new_m, self.projection_weights) new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_m) return new_m, new_state class UGRNNCell(rnn_cell_impl.RNNCell): """Update Gate Recurrent Neural Network (UGRNN) cell. Compromise between a LSTM/GRU and a vanilla RNN. There is only one gate, and that is to determine whether the unit should be integrating or computing instantaneously. This is the recurrent idea of the feedforward Highway Network. This implements the recurrent cell from the paper: https://arxiv.org/abs/1611.09913 Jasmine Collins, Jascha Sohl-Dickstein, and David Sussillo. "Capacity and Trainability in Recurrent Neural Networks" Proc. ICLR 2017. """ def __init__(self, num_units, initializer=None, forget_bias=1.0, activation=math_ops.tanh, reuse=None): """Initialize the parameters for an UGRNN cell. Args: num_units: int, The number of units in the UGRNN cell initializer: (optional) The initializer to use for the weight matrices. forget_bias: (optional) float, default 1.0, The initial bias of the forget gate, used to reduce the scale of forgetting at the beginning of the training. activation: (optional) Activation function of the inner states. Default is `tf.tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. """ super(UGRNNCell, self).__init__(_reuse=reuse) self._num_units = num_units self._initializer = initializer self._forget_bias = forget_bias self._activation = activation self._reuse = reuse self._linear = None @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def call(self, inputs, state): """Run one step of UGRNN. Args: inputs: input Tensor, 2D, batch x input size. state: state Tensor, 2D, batch x num units. Returns: new_output: batch x num units, Tensor representing the output of the UGRNN after reading `inputs` when previous state was `state`. Identical to `new_state`. new_state: batch x num units, Tensor representing the state of the UGRNN after reading `inputs` when previous state was `state`. Raises: ValueError: If input size cannot be inferred from inputs via static shape inference. """ sigmoid = math_ops.sigmoid input_size = inputs.get_shape().with_rank(2).dims[1] if input_size.value is None: raise ValueError("Could not infer input size from inputs.get_shape()[-1]") with vs.variable_scope( vs.get_variable_scope(), initializer=self._initializer): cell_inputs = array_ops.concat([inputs, state], 1) if self._linear is None: self._linear = _Linear(cell_inputs, 2 * self._num_units, True) rnn_matrix = self._linear(cell_inputs) [g_act, c_act] = array_ops.split( axis=1, num_or_size_splits=2, value=rnn_matrix) c = self._activation(c_act) g = sigmoid(g_act + self._forget_bias) new_state = g * state + (1.0 - g) * c new_output = new_state return new_output, new_state class IntersectionRNNCell(rnn_cell_impl.RNNCell): """Intersection Recurrent Neural Network (+RNN) cell. Architecture with coupled recurrent gate as well as coupled depth gate, designed to improve information flow through stacked RNNs. As the architecture uses depth gating, the dimensionality of the depth output (y) also should not change through depth (input size == output size). To achieve this, the first layer of a stacked Intersection RNN projects the inputs to N (num units) dimensions. Therefore when initializing an IntersectionRNNCell, one should set `num_in_proj = N` for the first layer and use default settings for subsequent layers. This implements the recurrent cell from the paper: https://arxiv.org/abs/1611.09913 Jasmine Collins, Jascha Sohl-Dickstein, and David Sussillo. "Capacity and Trainability in Recurrent Neural Networks" Proc. ICLR 2017. The Intersection RNN is built for use in deeply stacked RNNs so it may not achieve best performance with depth 1. """ def __init__(self, num_units, num_in_proj=None, initializer=None, forget_bias=1.0, y_activation=nn_ops.relu, reuse=None): """Initialize the parameters for an +RNN cell. Args: num_units: int, The number of units in the +RNN cell num_in_proj: (optional) int, The input dimensionality for the RNN. If creating the first layer of an +RNN, this should be set to `num_units`. Otherwise, this should be set to `None` (default). If `None`, dimensionality of `inputs` should be equal to `num_units`, otherwise ValueError is thrown. initializer: (optional) The initializer to use for the weight matrices. forget_bias: (optional) float, default 1.0, The initial bias of the forget gates, used to reduce the scale of forgetting at the beginning of the training. y_activation: (optional) Activation function of the states passed through depth. Default is 'tf.nn.relu`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. """ super(IntersectionRNNCell, self).__init__(_reuse=reuse) self._num_units = num_units self._initializer = initializer self._forget_bias = forget_bias self._num_input_proj = num_in_proj self._y_activation = y_activation self._reuse = reuse self._linear1 = None self._linear2 = None @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def call(self, inputs, state): """Run one step of the Intersection RNN. Args: inputs: input Tensor, 2D, batch x input size. state: state Tensor, 2D, batch x num units. Returns: new_y: batch x num units, Tensor representing the output of the +RNN after reading `inputs` when previous state was `state`. new_state: batch x num units, Tensor representing the state of the +RNN after reading `inputs` when previous state was `state`. Raises: ValueError: If input size cannot be inferred from `inputs` via static shape inference. ValueError: If input size != output size (these must be equal when using the Intersection RNN). """ sigmoid = math_ops.sigmoid tanh = math_ops.tanh input_size = inputs.get_shape().with_rank(2).dims[1] if input_size.value is None: raise ValueError("Could not infer input size from inputs.get_shape()[-1]") with vs.variable_scope( vs.get_variable_scope(), initializer=self._initializer): # read-in projections (should be used for first layer in deep +RNN # to transform size of inputs from I --> N) if input_size.value != self._num_units: if self._num_input_proj: with vs.variable_scope("in_projection"): if self._linear1 is None: self._linear1 = _Linear(inputs, self._num_units, True) inputs = self._linear1(inputs) else: raise ValueError("Must have input size == output size for " "Intersection RNN. To fix, num_in_proj should " "be set to num_units at cell init.") n_dim = i_dim = self._num_units cell_inputs = array_ops.concat([inputs, state], 1) if self._linear2 is None: self._linear2 = _Linear(cell_inputs, 2 * n_dim + 2 * i_dim, True) rnn_matrix = self._linear2(cell_inputs) gh_act = rnn_matrix[:, :n_dim] # b x n h_act = rnn_matrix[:, n_dim:2 * n_dim] # b x n gy_act = rnn_matrix[:, 2 * n_dim:2 * n_dim + i_dim] # b x i y_act = rnn_matrix[:, 2 * n_dim + i_dim:2 * n_dim + 2 * i_dim] # b x i h = tanh(h_act) y = self._y_activation(y_act) gh = sigmoid(gh_act + self._forget_bias) gy = sigmoid(gy_act + self._forget_bias) new_state = gh * state + (1.0 - gh) * h # passed thru time new_y = gy * inputs + (1.0 - gy) * y # passed thru depth return new_y, new_state _REGISTERED_OPS = None class CompiledWrapper(rnn_cell_impl.RNNCell): """Wraps step execution in an XLA JIT scope.""" def __init__(self, cell, compile_stateful=False): """Create CompiledWrapper cell. Args: cell: Instance of `RNNCell`. compile_stateful: Whether to compile stateful ops like initializers and random number generators (default: False). """ self._cell = cell self._compile_stateful = compile_stateful @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._cell.output_size def zero_state(self, batch_size, dtype): with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]): return self._cell.zero_state(batch_size, dtype) def __call__(self, inputs, state, scope=None): if self._compile_stateful: compile_ops = True else: def compile_ops(node_def): global _REGISTERED_OPS if _REGISTERED_OPS is None: _REGISTERED_OPS = op_def_registry.get_registered_ops() return not _REGISTERED_OPS[node_def.op].is_stateful with jit.experimental_jit_scope(compile_ops=compile_ops): return self._cell(inputs, state, scope=scope) def _random_exp_initializer(minval, maxval, seed=None, dtype=dtypes.float32): """Returns an exponential distribution initializer. Args: minval: float or a scalar float Tensor. With value > 0. Lower bound of the range of random values to generate. maxval: float or a scalar float Tensor. With value > minval. Upper bound of the range of random values to generate. seed: An integer. Used to create random seeds. dtype: The data type. Returns: An initializer that generates tensors with an exponential distribution. """ def _initializer(shape, dtype=dtype, partition_info=None): del partition_info # Unused. return math_ops.exp( random_ops.random_uniform( shape, math_ops.log(minval), math_ops.log(maxval), dtype, seed=seed)) return _initializer class PhasedLSTMCell(rnn_cell_impl.RNNCell): """Phased LSTM recurrent network cell. https://arxiv.org/pdf/1610.09513v1.pdf """ def __init__(self, num_units, use_peepholes=False, leak=0.001, ratio_on=0.1, trainable_ratio_on=True, period_init_min=1.0, period_init_max=1000.0, reuse=None): """Initialize the Phased LSTM cell. Args: num_units: int, The number of units in the Phased LSTM cell. use_peepholes: bool, set True to enable peephole connections. leak: float or scalar float Tensor with value in [0, 1]. Leak applied during training. ratio_on: float or scalar float Tensor with value in [0, 1]. Ratio of the period during which the gates are open. trainable_ratio_on: bool, weather ratio_on is trainable. period_init_min: float or scalar float Tensor. With value > 0. Minimum value of the initialized period. The period values are initialized by drawing from the distribution: e^U(log(period_init_min), log(period_init_max)) Where U(.,.) is the uniform distribution. period_init_max: float or scalar float Tensor. With value > period_init_min. Maximum value of the initialized period. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. """ # We pass autocast=False because this layer can accept inputs of different # dtypes, so we do not want to automatically cast them to the same dtype. super(PhasedLSTMCell, self).__init__(_reuse=reuse, autocast=False) self._num_units = num_units self._use_peepholes = use_peepholes self._leak = leak self._ratio_on = ratio_on self._trainable_ratio_on = trainable_ratio_on self._period_init_min = period_init_min self._period_init_max = period_init_max self._reuse = reuse self._linear1 = None self._linear2 = None self._linear3 = None @property def state_size(self): return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units) @property def output_size(self): return self._num_units def _mod(self, x, y): """Modulo function that propagates x gradients.""" return array_ops.stop_gradient(math_ops.mod(x, y) - x) + x def _get_cycle_ratio(self, time, phase, period): """Compute the cycle ratio in the dtype of the time.""" phase_casted = math_ops.cast(phase, dtype=time.dtype) period_casted = math_ops.cast(period, dtype=time.dtype) shifted_time = time - phase_casted cycle_ratio = self._mod(shifted_time, period_casted) / period_casted return math_ops.cast(cycle_ratio, dtype=dtypes.float32) def call(self, inputs, state): """Phased LSTM Cell. Args: inputs: A tuple of 2 Tensor. The first Tensor has shape [batch, 1], and type float32 or float64. It stores the time. The second Tensor has shape [batch, features_size], and type float32. It stores the features. state: rnn_cell_impl.LSTMStateTuple, state from previous timestep. Returns: A tuple containing: - A Tensor of float32, and shape [batch_size, num_units], representing the output of the cell. - A rnn_cell_impl.LSTMStateTuple, containing 2 Tensors of float32, shape [batch_size, num_units], representing the new state and the output. """ (c_prev, h_prev) = state (time, x) = inputs in_mask_gates = [x, h_prev] if self._use_peepholes: in_mask_gates.append(c_prev) with vs.variable_scope("mask_gates"): if self._linear1 is None: self._linear1 = _Linear(in_mask_gates, 2 * self._num_units, True) mask_gates = math_ops.sigmoid(self._linear1(in_mask_gates)) [input_gate, forget_gate] = array_ops.split( axis=1, num_or_size_splits=2, value=mask_gates) with vs.variable_scope("new_input"): if self._linear2 is None: self._linear2 = _Linear([x, h_prev], self._num_units, True) new_input = math_ops.tanh(self._linear2([x, h_prev])) new_c = (c_prev * forget_gate + input_gate * new_input) in_out_gate = [x, h_prev] if self._use_peepholes: in_out_gate.append(new_c) with vs.variable_scope("output_gate"): if self._linear3 is None: self._linear3 = _Linear(in_out_gate, self._num_units, True) output_gate = math_ops.sigmoid(self._linear3(in_out_gate)) new_h = math_ops.tanh(new_c) * output_gate period = vs.get_variable( "period", [self._num_units], initializer=_random_exp_initializer(self._period_init_min, self._period_init_max)) phase = vs.get_variable( "phase", [self._num_units], initializer=init_ops.random_uniform_initializer(0., period.initial_value)) ratio_on = vs.get_variable( "ratio_on", [self._num_units], initializer=init_ops.constant_initializer(self._ratio_on), trainable=self._trainable_ratio_on) cycle_ratio = self._get_cycle_ratio(time, phase, period) k_up = 2 * cycle_ratio / ratio_on k_down = 2 - k_up k_closed = self._leak * cycle_ratio k = array_ops.where(cycle_ratio < ratio_on, k_down, k_closed) k = array_ops.where(cycle_ratio < 0.5 * ratio_on, k_up, k) new_c = k * new_c + (1 - k) * c_prev new_h = k * new_h + (1 - k) * h_prev new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h) return new_h, new_state class ConvLSTMCell(rnn_cell_impl.RNNCell): """Convolutional LSTM recurrent network cell. https://arxiv.org/pdf/1506.04214v1.pdf """ def __init__(self, conv_ndims, input_shape, output_channels, kernel_shape, use_bias=True, skip_connection=False, forget_bias=1.0, initializers=None, name="conv_lstm_cell"): """Construct ConvLSTMCell. Args: conv_ndims: Convolution dimensionality (1, 2 or 3). input_shape: Shape of the input as int tuple, excluding the batch size. output_channels: int, number of output channels of the conv LSTM. kernel_shape: Shape of kernel as an int tuple (of size 1, 2 or 3). use_bias: (bool) Use bias in convolutions. skip_connection: If set to `True`, concatenate the input to the output of the conv LSTM. Default: `False`. forget_bias: Forget bias. initializers: Unused. name: Name of the module. Raises: ValueError: If `skip_connection` is `True` and stride is different from 1 or if `input_shape` is incompatible with `conv_ndims`. """ super(ConvLSTMCell, self).__init__(name=name) if conv_ndims != len(input_shape) - 1: raise ValueError("Invalid input_shape {} for conv_ndims={}.".format( input_shape, conv_ndims)) self._conv_ndims = conv_ndims self._input_shape = input_shape self._output_channels = output_channels self._kernel_shape = list(kernel_shape) self._use_bias = use_bias self._forget_bias = forget_bias self._skip_connection = skip_connection self._total_output_channels = output_channels if self._skip_connection: self._total_output_channels += self._input_shape[-1] state_size = tensor_shape.TensorShape( self._input_shape[:-1] + [self._output_channels]) self._state_size = rnn_cell_impl.LSTMStateTuple(state_size, state_size) self._output_size = tensor_shape.TensorShape( self._input_shape[:-1] + [self._total_output_channels]) @property def output_size(self): return self._output_size @property def state_size(self): return self._state_size def call(self, inputs, state, scope=None): cell, hidden = state new_hidden = _conv([inputs, hidden], self._kernel_shape, 4 * self._output_channels, self._use_bias) gates = array_ops.split( value=new_hidden, num_or_size_splits=4, axis=self._conv_ndims + 1) input_gate, new_input, forget_gate, output_gate = gates new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input) output = math_ops.tanh(new_cell) * math_ops.sigmoid(output_gate) if self._skip_connection: output = array_ops.concat([output, inputs], axis=-1) new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output) return output, new_state class Conv1DLSTMCell(ConvLSTMCell): """1D Convolutional LSTM recurrent network cell. https://arxiv.org/pdf/1506.04214v1.pdf """ def __init__(self, name="conv_1d_lstm_cell", **kwargs): """Construct Conv1DLSTM. See `ConvLSTMCell` for more details.""" super(Conv1DLSTMCell, self).__init__(conv_ndims=1, name=name, **kwargs) class Conv2DLSTMCell(ConvLSTMCell): """2D Convolutional LSTM recurrent network cell. https://arxiv.org/pdf/1506.04214v1.pdf """ def __init__(self, name="conv_2d_lstm_cell", **kwargs): """Construct Conv2DLSTM. See `ConvLSTMCell` for more details.""" super(Conv2DLSTMCell, self).__init__(conv_ndims=2, name=name, **kwargs) class Conv3DLSTMCell(ConvLSTMCell): """3D Convolutional LSTM recurrent network cell. https://arxiv.org/pdf/1506.04214v1.pdf """ def __init__(self, name="conv_3d_lstm_cell", **kwargs): """Construct Conv3DLSTM. See `ConvLSTMCell` for more details.""" super(Conv3DLSTMCell, self).__init__(conv_ndims=3, name=name, **kwargs) def _conv(args, filter_size, num_features, bias, bias_start=0.0): """Convolution. Args: args: a Tensor or a list of Tensors of dimension 3D, 4D or 5D, batch x n, Tensors. filter_size: int tuple of filter shape (of size 1, 2 or 3). num_features: int, number of features. bias: Whether to use biases in the convolution layer. bias_start: starting value to initialize the bias; 0 by default. Returns: A 3D, 4D, or 5D Tensor with shape [batch ... num_features] Raises: ValueError: if some of the arguments has unspecified or wrong shape. """ # Calculate the total size of arguments on dimension 1. total_arg_size_depth = 0 shapes = [a.get_shape().as_list() for a in args] shape_length = len(shapes[0]) for shape in shapes: if len(shape) not in [3, 4, 5]: raise ValueError("Conv Linear expects 3D, 4D " "or 5D arguments: %s" % str(shapes)) if len(shape) != len(shapes[0]): raise ValueError("Conv Linear expects all args " "to be of same Dimension: %s" % str(shapes)) else: total_arg_size_depth += shape[-1] dtype = [a.dtype for a in args][0] # determine correct conv operation if shape_length == 3: conv_op = nn_ops.conv1d strides = 1 elif shape_length == 4: conv_op = nn_ops.conv2d strides = shape_length * [1] elif shape_length == 5: conv_op = nn_ops.conv3d strides = shape_length * [1] # Now the computation. kernel = vs.get_variable( "kernel", filter_size + [total_arg_size_depth, num_features], dtype=dtype) if len(args) == 1: res = conv_op(args[0], kernel, strides, padding="SAME") else: res = conv_op( array_ops.concat(axis=shape_length - 1, values=args), kernel, strides, padding="SAME") if not bias: return res bias_term = vs.get_variable( "biases", [num_features], dtype=dtype, initializer=init_ops.constant_initializer(bias_start, dtype=dtype)) return res + bias_term class GLSTMCell(rnn_cell_impl.RNNCell): """Group LSTM cell (G-LSTM). The implementation is based on: https://arxiv.org/abs/1703.10722 O. Kuchaiev and B. Ginsburg "Factorization Tricks for LSTM Networks", ICLR 2017 workshop. In brief, a G-LSTM cell consists of one LSTM sub-cell per group, where each sub-cell operates on an evenly-sized sub-vector of the input and produces an evenly-sized sub-vector of the output. For example, a G-LSTM cell with 128 units and 4 groups consists of 4 LSTMs sub-cells with 32 units each. If that G-LSTM cell is fed a 200-dim input, then each sub-cell receives a 50-dim part of the input and produces a 32-dim part of the output. """ def __init__(self, num_units, initializer=None, num_proj=None, number_of_groups=1, forget_bias=1.0, activation=math_ops.tanh, reuse=None): """Initialize the parameters of G-LSTM cell. Args: num_units: int, The number of units in the G-LSTM cell initializer: (optional) The initializer to use for the weight and projection matrices. num_proj: (optional) int, The output dimensionality for the projection matrices. If None, no projection is performed. number_of_groups: (optional) int, number of groups to use. If `number_of_groups` is 1, then it should be equivalent to LSTM cell forget_bias: Biases of the forget gate are initialized by default to 1 in order to reduce the scale of forgetting at the beginning of the training. activation: Activation function of the inner states. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. Raises: ValueError: If `num_units` or `num_proj` is not divisible by `number_of_groups`. """ super(GLSTMCell, self).__init__(_reuse=reuse) self._num_units = num_units self._initializer = initializer self._num_proj = num_proj self._forget_bias = forget_bias self._activation = activation self._number_of_groups = number_of_groups if self._num_units % self._number_of_groups != 0: raise ValueError("num_units must be divisible by number_of_groups") if self._num_proj: if self._num_proj % self._number_of_groups != 0: raise ValueError("num_proj must be divisible by number_of_groups") self._group_shape = [ int(self._num_proj / self._number_of_groups), int(self._num_units / self._number_of_groups) ] else: self._group_shape = [ int(self._num_units / self._number_of_groups), int(self._num_units / self._number_of_groups) ] if num_proj: self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj) self._output_size = num_proj else: self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units) self._output_size = num_units self._linear1 = [None] * number_of_groups self._linear2 = None @property def state_size(self): return self._state_size @property def output_size(self): return self._output_size def _get_input_for_group(self, inputs, group_id, group_size): """Slices inputs into groups to prepare for processing by cell's groups. Args: inputs: cell input or it's previous state, a Tensor, 2D, [batch x num_units] group_id: group id, a Scalar, for which to prepare input group_size: size of the group Returns: subset of inputs corresponding to group "group_id", a Tensor, 2D, [batch x num_units/number_of_groups] """ return array_ops.slice( input_=inputs, begin=[0, group_id * group_size], size=[self._batch_size, group_size], name=("GLSTM_group%d_input_generation" % group_id)) def call(self, inputs, state): """Run one step of G-LSTM. Args: inputs: input Tensor, 2D, [batch x num_inputs]. num_inputs must be statically-known and evenly divisible into groups. The innermost vectors of the inputs are split into evenly-sized sub-vectors and fed into the per-group LSTM sub-cells. state: this must be a tuple of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`. Returns: A tuple containing: - A `2-D, [batch x output_dim]`, Tensor representing the output of the G-LSTM after reading `inputs` when previous state was `state`. Here output_dim is: num_proj if num_proj was set, num_units otherwise. - LSTMStateTuple representing the new state of G-LSTM cell after reading `inputs` when the previous state was `state`. Raises: ValueError: If input size cannot be inferred from inputs via static shape inference, or if the input shape is incompatible with the number of groups. """ (c_prev, m_prev) = state self._batch_size = tensor_shape.dimension_value( inputs.shape[0]) or array_ops.shape(inputs)[0] # If the input size is statically-known, calculate and validate its group # size. Otherwise, use the output group size. input_size = tensor_shape.dimension_value(inputs.shape[1]) if input_size is None: raise ValueError("input size must be statically known") if input_size % self._number_of_groups != 0: raise ValueError( "input size (%d) must be divisible by number_of_groups (%d)" % (input_size, self._number_of_groups)) input_group_size = int(input_size / self._number_of_groups) dtype = inputs.dtype scope = vs.get_variable_scope() with vs.variable_scope(scope, initializer=self._initializer): i_parts = [] j_parts = [] f_parts = [] o_parts = [] for group_id in range(self._number_of_groups): with vs.variable_scope("group%d" % group_id): x_g_id = array_ops.concat( [ self._get_input_for_group(inputs, group_id, input_group_size), self._get_input_for_group(m_prev, group_id, self._group_shape[0]) ], axis=1) linear = self._linear1[group_id] if linear is None: linear = _Linear(x_g_id, 4 * self._group_shape[1], False) self._linear1[group_id] = linear R_k = linear(x_g_id) # pylint: disable=invalid-name i_k, j_k, f_k, o_k = array_ops.split(R_k, 4, 1) i_parts.append(i_k) j_parts.append(j_k) f_parts.append(f_k) o_parts.append(o_k) bi = vs.get_variable( name="bias_i", shape=[self._num_units], dtype=dtype, initializer=init_ops.constant_initializer(0.0, dtype=dtype)) bj = vs.get_variable( name="bias_j", shape=[self._num_units], dtype=dtype, initializer=init_ops.constant_initializer(0.0, dtype=dtype)) bf = vs.get_variable( name="bias_f", shape=[self._num_units], dtype=dtype, initializer=init_ops.constant_initializer(0.0, dtype=dtype)) bo = vs.get_variable( name="bias_o", shape=[self._num_units], dtype=dtype, initializer=init_ops.constant_initializer(0.0, dtype=dtype)) i = nn_ops.bias_add(array_ops.concat(i_parts, axis=1), bi) j = nn_ops.bias_add(array_ops.concat(j_parts, axis=1), bj) f = nn_ops.bias_add(array_ops.concat(f_parts, axis=1), bf) o = nn_ops.bias_add(array_ops.concat(o_parts, axis=1), bo) c = ( math_ops.sigmoid(f + self._forget_bias) * c_prev + math_ops.sigmoid(i) * math_ops.tanh(j)) m = math_ops.sigmoid(o) * self._activation(c) if self._num_proj is not None: with vs.variable_scope("projection"): if self._linear2 is None: self._linear2 = _Linear(m, self._num_proj, False) m = self._linear2(m) new_state = rnn_cell_impl.LSTMStateTuple(c, m) return m, new_state class LayerNormLSTMCell(rnn_cell_impl.RNNCell): """Long short-term memory unit (LSTM) recurrent network cell. The default non-peephole implementation is based on: https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf Felix Gers, Jurgen Schmidhuber, and Fred Cummins. "Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999. The peephole implementation is based on: https://research.google.com/pubs/archive/43905.pdf Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory recurrent neural network architectures for large scale acoustic modeling." INTERSPEECH, 2014. The class uses optional peep-hole connections, optional cell clipping, and an optional projection layer. Layer normalization implementation is based on: https://arxiv.org/abs/1607.06450. "Layer Normalization" Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton and is applied before the internal nonlinearities. """ def __init__(self, num_units, use_peepholes=False, cell_clip=None, initializer=None, num_proj=None, proj_clip=None, forget_bias=1.0, activation=None, layer_norm=False, norm_gain=1.0, norm_shift=0.0, reuse=None): """Initialize the parameters for an LSTM cell. Args: num_units: int, The number of units in the LSTM cell use_peepholes: bool, set True to enable diagonal/peephole connections. cell_clip: (optional) A float value, if provided the cell state is clipped by this value prior to the cell output activation. initializer: (optional) The initializer to use for the weight and projection matrices. num_proj: (optional) int, The output dimensionality for the projection matrices. If None, no projection is performed. proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is provided, then the projected values are clipped elementwise to within `[-proj_clip, proj_clip]`. forget_bias: Biases of the forget gate are initialized by default to 1 in order to reduce the scale of forgetting at the beginning of the training. Must set it manually to `0.0` when restoring from CudnnLSTM trained checkpoints. activation: Activation function of the inner states. Default: `tanh`. layer_norm: If `True`, layer normalization will be applied. norm_gain: float, The layer normalization gain initial value. If `layer_norm` has been set to `False`, this argument will be ignored. norm_shift: float, The layer normalization shift initial value. If `layer_norm` has been set to `False`, this argument will be ignored. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. When restoring from CudnnLSTM-trained checkpoints, must use CudnnCompatibleLSTMCell instead. """ super(LayerNormLSTMCell, self).__init__(_reuse=reuse) self._num_units = num_units self._use_peepholes = use_peepholes self._cell_clip = cell_clip self._initializer = initializer self._num_proj = num_proj self._proj_clip = proj_clip self._forget_bias = forget_bias self._activation = activation or math_ops.tanh self._layer_norm = layer_norm self._norm_gain = norm_gain self._norm_shift = norm_shift if num_proj: self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_proj)) self._output_size = num_proj else: self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_units)) self._output_size = num_units @property def state_size(self): return self._state_size @property def output_size(self): return self._output_size def _linear(self, args, output_size, bias, bias_initializer=None, kernel_initializer=None, layer_norm=False): """Linear map: sum_i(args[i] * W[i]), where W[i] is a Variable. Args: args: a 2D Tensor or a list of 2D, batch x n, Tensors. output_size: int, second dimension of W[i]. bias: boolean, whether to add a bias term or not. bias_initializer: starting value to initialize the bias (default is all zeros). kernel_initializer: starting value to initialize the weight. layer_norm: boolean, whether to apply layer normalization. Returns: A 2D Tensor with shape [batch x output_size] taking value sum_i(args[i] * W[i]), where each W[i] is a newly created Variable. Raises: ValueError: if some of the arguments has unspecified or wrong shape. """ if args is None or (nest.is_sequence(args) and not args): raise ValueError("`args` must be specified") if not nest.is_sequence(args): args = [args] # Calculate the total size of arguments on dimension 1. total_arg_size = 0 shapes = [a.get_shape() for a in args] for shape in shapes: if shape.ndims != 2: raise ValueError("linear is expecting 2D arguments: %s" % shapes) if tensor_shape.dimension_value(shape[1]) is None: raise ValueError("linear expects shape[1] to be provided for shape %s, " "but saw %s" % (shape, shape[1])) else: total_arg_size += tensor_shape.dimension_value(shape[1]) dtype = [a.dtype for a in args][0] # Now the computation. scope = vs.get_variable_scope() with vs.variable_scope(scope) as outer_scope: weights = vs.get_variable( "kernel", [total_arg_size, output_size], dtype=dtype, initializer=kernel_initializer) if len(args) == 1: res = math_ops.matmul(args[0], weights) else: res = math_ops.matmul(array_ops.concat(args, 1), weights) if not bias: return res with vs.variable_scope(outer_scope) as inner_scope: inner_scope.set_partitioner(None) if bias_initializer is None: bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype) biases = vs.get_variable( "bias", [output_size], dtype=dtype, initializer=bias_initializer) if not layer_norm: res = nn_ops.bias_add(res, biases) return res def call(self, inputs, state): """Run one step of LSTM. Args: inputs: input Tensor, 2D, batch x num_units. state: this must be a tuple of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`. Returns: A tuple containing: - A `2-D, [batch x output_dim]`, Tensor representing the output of the LSTM after reading `inputs` when previous state was `state`. Here output_dim is: num_proj if num_proj was set, num_units otherwise. - Tensor(s) representing the new state of LSTM after reading `inputs` when the previous state was `state`. Same type and shape(s) as `state`. Raises: ValueError: If input size cannot be inferred from inputs via static shape inference. """ sigmoid = math_ops.sigmoid (c_prev, m_prev) = state dtype = inputs.dtype input_size = inputs.get_shape().with_rank(2).dims[1] if input_size.value is None: raise ValueError("Could not infer input size from inputs.get_shape()[-1]") scope = vs.get_variable_scope() with vs.variable_scope(scope, initializer=self._initializer) as unit_scope: # i = input_gate, j = new_input, f = forget_gate, o = output_gate lstm_matrix = self._linear( [inputs, m_prev], 4 * self._num_units, bias=True, bias_initializer=None, layer_norm=self._layer_norm) i, j, f, o = array_ops.split( value=lstm_matrix, num_or_size_splits=4, axis=1) if self._layer_norm: i = _norm(self._norm_gain, self._norm_shift, i, "input") j = _norm(self._norm_gain, self._norm_shift, j, "transform") f = _norm(self._norm_gain, self._norm_shift, f, "forget") o = _norm(self._norm_gain, self._norm_shift, o, "output") # Diagonal connections if self._use_peepholes: with vs.variable_scope(unit_scope): w_f_diag = vs.get_variable( "w_f_diag", shape=[self._num_units], dtype=dtype) w_i_diag = vs.get_variable( "w_i_diag", shape=[self._num_units], dtype=dtype) w_o_diag = vs.get_variable( "w_o_diag", shape=[self._num_units], dtype=dtype) if self._use_peepholes: c = ( sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev + sigmoid(i + w_i_diag * c_prev) * self._activation(j)) else: c = ( sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * self._activation(j)) if self._layer_norm: c = _norm(self._norm_gain, self._norm_shift, c, "state") if self._cell_clip is not None: # pylint: disable=invalid-unary-operand-type c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip) # pylint: enable=invalid-unary-operand-type if self._use_peepholes: m = sigmoid(o + w_o_diag * c) * self._activation(c) else: m = sigmoid(o) * self._activation(c) if self._num_proj is not None: with vs.variable_scope("projection"): m = self._linear(m, self._num_proj, bias=False) if self._proj_clip is not None: # pylint: disable=invalid-unary-operand-type m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip) # pylint: enable=invalid-unary-operand-type new_state = (rnn_cell_impl.LSTMStateTuple(c, m)) return m, new_state class SRUCell(rnn_cell_impl.LayerRNNCell): """SRU, Simple Recurrent Unit. Implementation based on Training RNNs as Fast as CNNs (cf. https://arxiv.org/abs/1709.02755). This variation of RNN cell is characterized by the simplified data dependence between hidden states of two consecutive time steps. Traditionally, hidden states from a cell at time step t-1 needs to be multiplied with a matrix W_hh before being fed into the ensuing cell at time step t. This flavor of RNN replaces the matrix multiplication between h_{t-1} and W_hh with a pointwise multiplication, resulting in performance gain. Args: num_units: int, The number of units in the SRU cell. activation: Nonlinearity to use. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. name: (optional) String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. **kwargs: Additional keyword arguments. """ def __init__(self, num_units, activation=None, reuse=None, name=None, **kwargs): super(SRUCell, self).__init__(_reuse=reuse, name=name, **kwargs) self._num_units = num_units self._activation = activation or math_ops.tanh # Restrict inputs to be 2-dimensional matrices self.input_spec = input_spec.InputSpec(ndim=2) @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def build(self, inputs_shape): if tensor_shape.dimension_value(inputs_shape[1]) is None: raise ValueError( "Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape) input_depth = tensor_shape.dimension_value(inputs_shape[1]) # pylint: disable=protected-access self._kernel = self.add_variable( rnn_cell_impl._WEIGHTS_VARIABLE_NAME, shape=[input_depth, 4 * self._num_units]) # pylint: enable=protected-access self._bias = self.add_variable( rnn_cell_impl._BIAS_VARIABLE_NAME, # pylint: disable=protected-access shape=[2 * self._num_units], initializer=init_ops.zeros_initializer) self._built = True def call(self, inputs, state): """Simple recurrent unit (SRU) with num_units cells.""" U = math_ops.matmul(inputs, self._kernel) # pylint: disable=invalid-name x_bar, f_intermediate, r_intermediate, x_tx = array_ops.split( value=U, num_or_size_splits=4, axis=1) f_r = math_ops.sigmoid( nn_ops.bias_add( array_ops.concat([f_intermediate, r_intermediate], 1), self._bias)) f, r = array_ops.split(value=f_r, num_or_size_splits=2, axis=1) c = f * state + (1.0 - f) * x_bar h = r * self._activation(c) + (1.0 - r) * x_tx return h, c class WeightNormLSTMCell(rnn_cell_impl.RNNCell): """Weight normalized LSTM Cell. Adapted from `rnn_cell_impl.LSTMCell`. The weight-norm implementation is based on: https://arxiv.org/abs/1602.07868 Tim Salimans, Diederik P. Kingma. Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks The default LSTM implementation based on: https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf Felix Gers, Jurgen Schmidhuber, and Fred Cummins. "Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999. The class uses optional peephole connections, optional cell clipping and an optional projection layer. The optional peephole implementation is based on: https://research.google.com/pubs/archive/43905.pdf Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory recurrent neural network architectures for large scale acoustic modeling." INTERSPEECH, 2014. """ def __init__(self, num_units, norm=True, use_peepholes=False, cell_clip=None, initializer=None, num_proj=None, proj_clip=None, forget_bias=1, activation=None, reuse=None): """Initialize the parameters of a weight-normalized LSTM cell. Args: num_units: int, The number of units in the LSTM cell norm: If `True`, apply normalization to the weight matrices. If False, the result is identical to that obtained from `rnn_cell_impl.LSTMCell` use_peepholes: bool, set `True` to enable diagonal/peephole connections. cell_clip: (optional) A float value, if provided the cell state is clipped by this value prior to the cell output activation. initializer: (optional) The initializer to use for the weight matrices. num_proj: (optional) int, The output dimensionality for the projection matrices. If None, no projection is performed. proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is provided, then the projected values are clipped elementwise to within `[-proj_clip, proj_clip]`. forget_bias: Biases of the forget gate are initialized by default to 1 in order to reduce the scale of forgetting at the beginning of the training. activation: Activation function of the inner states. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. """ super(WeightNormLSTMCell, self).__init__(_reuse=reuse) self._scope = "wn_lstm_cell" self._num_units = num_units self._norm = norm self._initializer = initializer self._use_peepholes = use_peepholes self._cell_clip = cell_clip self._num_proj = num_proj self._proj_clip = proj_clip self._activation = activation or math_ops.tanh self._forget_bias = forget_bias self._weights_variable_name = "kernel" self._bias_variable_name = "bias" if num_proj: self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj) self._output_size = num_proj else: self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units) self._output_size = num_units @property def state_size(self): return self._state_size @property def output_size(self): return self._output_size def _normalize(self, weight, name): """Apply weight normalization. Args: weight: a 2D tensor with known number of columns. name: string, variable name for the normalizer. Returns: A tensor with the same shape as `weight`. """ output_size = weight.get_shape().as_list()[1] g = vs.get_variable(name, [output_size], dtype=weight.dtype) return nn_impl.l2_normalize(weight, axis=0) * g def _linear(self, args, output_size, norm, bias, bias_initializer=None, kernel_initializer=None): """Linear map: sum_i(args[i] * W[i]), where W[i] is a variable. Args: args: a 2D Tensor or a list of 2D, batch x n, Tensors. output_size: int, second dimension of W[i]. norm: bool, whether to normalize the weights. bias: boolean, whether to add a bias term or not. bias_initializer: starting value to initialize the bias (default is all zeros). kernel_initializer: starting value to initialize the weight. Returns: A 2D Tensor with shape [batch x output_size] equal to sum_i(args[i] * W[i]), where W[i]s are newly created matrices. Raises: ValueError: if some of the arguments has unspecified or wrong shape. """ if args is None or (nest.is_sequence(args) and not args): raise ValueError("`args` must be specified") if not nest.is_sequence(args): args = [args] # Calculate the total size of arguments on dimension 1. total_arg_size = 0 shapes = [a.get_shape() for a in args] for shape in shapes: if shape.ndims != 2: raise ValueError("linear is expecting 2D arguments: %s" % shapes) if tensor_shape.dimension_value(shape[1]) is None: raise ValueError("linear expects shape[1] to be provided for shape %s, " "but saw %s" % (shape, shape[1])) else: total_arg_size += tensor_shape.dimension_value(shape[1]) dtype = [a.dtype for a in args][0] # Now the computation. scope = vs.get_variable_scope() with vs.variable_scope(scope) as outer_scope: weights = vs.get_variable( self._weights_variable_name, [total_arg_size, output_size], dtype=dtype, initializer=kernel_initializer) if norm: wn = [] st = 0 with ops.control_dependencies(None): for i in range(len(args)): en = st + tensor_shape.dimension_value(shapes[i][1]) wn.append( self._normalize(weights[st:en, :], name="norm_{}".format(i))) st = en weights = array_ops.concat(wn, axis=0) if len(args) == 1: res = math_ops.matmul(args[0], weights) else: res = math_ops.matmul(array_ops.concat(args, 1), weights) if not bias: return res with vs.variable_scope(outer_scope) as inner_scope: inner_scope.set_partitioner(None) if bias_initializer is None: bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype) biases = vs.get_variable( self._bias_variable_name, [output_size], dtype=dtype, initializer=bias_initializer) return nn_ops.bias_add(res, biases) def call(self, inputs, state): """Run one step of LSTM. Args: inputs: input Tensor, 2D, batch x num_units. state: A tuple of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`. Returns: A tuple containing: - A `2-D, [batch x output_dim]`, Tensor representing the output of the LSTM after reading `inputs` when previous state was `state`. Here output_dim is: num_proj if num_proj was set, num_units otherwise. - Tensor(s) representing the new state of LSTM after reading `inputs` when the previous state was `state`. Same type and shape(s) as `state`. Raises: ValueError: If input size cannot be inferred from inputs via static shape inference. """ dtype = inputs.dtype num_units = self._num_units sigmoid = math_ops.sigmoid c, h = state input_size = inputs.get_shape().with_rank(2).dims[1] if input_size.value is None: raise ValueError("Could not infer input size from inputs.get_shape()[-1]") with vs.variable_scope(self._scope, initializer=self._initializer): concat = self._linear( [inputs, h], 4 * num_units, norm=self._norm, bias=True) # i = input_gate, j = new_input, f = forget_gate, o = output_gate i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1) if self._use_peepholes: w_f_diag = vs.get_variable("w_f_diag", shape=[num_units], dtype=dtype) w_i_diag = vs.get_variable("w_i_diag", shape=[num_units], dtype=dtype) w_o_diag = vs.get_variable("w_o_diag", shape=[num_units], dtype=dtype) new_c = ( c * sigmoid(f + self._forget_bias + w_f_diag * c) + sigmoid(i + w_i_diag * c) * self._activation(j)) else: new_c = ( c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j)) if self._cell_clip is not None: # pylint: disable=invalid-unary-operand-type new_c = clip_ops.clip_by_value(new_c, -self._cell_clip, self._cell_clip) # pylint: enable=invalid-unary-operand-type if self._use_peepholes: new_h = sigmoid(o + w_o_diag * new_c) * self._activation(new_c) else: new_h = sigmoid(o) * self._activation(new_c) if self._num_proj is not None: with vs.variable_scope("projection"): new_h = self._linear( new_h, self._num_proj, norm=self._norm, bias=False) if self._proj_clip is not None: # pylint: disable=invalid-unary-operand-type new_h = clip_ops.clip_by_value(new_h, -self._proj_clip, self._proj_clip) # pylint: enable=invalid-unary-operand-type new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h) return new_h, new_state class IndRNNCell(rnn_cell_impl.LayerRNNCell): """Independently Recurrent Neural Network (IndRNN) cell (cf. https://arxiv.org/abs/1803.04831). Args: num_units: int, The number of units in the RNN cell. activation: Nonlinearity to use. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. dtype: Default dtype of the layer (default of `None` means use the type of the first input). Required when `build` is called before `call`. """ def __init__(self, num_units, activation=None, reuse=None, name=None, dtype=None): super(IndRNNCell, self).__init__(_reuse=reuse, name=name, dtype=dtype) # Inputs must be 2-dimensional. self.input_spec = input_spec.InputSpec(ndim=2) self._num_units = num_units self._activation = activation or math_ops.tanh @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def build(self, inputs_shape): if tensor_shape.dimension_value(inputs_shape[1]) is None: raise ValueError( "Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape) input_depth = tensor_shape.dimension_value(inputs_shape[1]) # pylint: disable=protected-access self._kernel_w = self.add_variable( "%s_w" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME, shape=[input_depth, self._num_units]) self._kernel_u = self.add_variable( "%s_u" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME, shape=[1, self._num_units], initializer=init_ops.random_uniform_initializer( minval=-1, maxval=1, dtype=self.dtype)) self._bias = self.add_variable( rnn_cell_impl._BIAS_VARIABLE_NAME, shape=[self._num_units], initializer=init_ops.zeros_initializer(dtype=self.dtype)) # pylint: enable=protected-access self.built = True def call(self, inputs, state): """IndRNN: output = new_state = act(W * input + u * state + B).""" gate_inputs = math_ops.matmul(inputs, self._kernel_w) + ( state * self._kernel_u) gate_inputs = nn_ops.bias_add(gate_inputs, self._bias) output = self._activation(gate_inputs) return output, output class IndyGRUCell(rnn_cell_impl.LayerRNNCell): r"""Independently Gated Recurrent Unit cell. Based on IndRNNs (https://arxiv.org/abs/1803.04831) and similar to GRUCell, yet with the \\(U_r\\), \\(U_z\\), and \\(U\\) matrices in equations 5, 6, and 8 of http://arxiv.org/abs/1406.1078 respectively replaced by diagonal matrices, i.e. a Hadamard product with a single vector: $$r_j = \sigma\left([\mathbf W_r\mathbf x]_j + [\mathbf u_r\circ \mathbf h_{(t-1)}]_j\right)$$ $$z_j = \sigma\left([\mathbf W_z\mathbf x]_j + [\mathbf u_z\circ \mathbf h_{(t-1)}]_j\right)$$ $$\tilde{h}^{(t)}_j = \phi\left([\mathbf W \mathbf x]_j + [\mathbf u \circ \mathbf r \circ \mathbf h_{(t-1)}]_j\right)$$ where \\(\circ\\) denotes the Hadamard operator. This means that each IndyGRU node sees only its own state, as opposed to seeing all states in the same layer. Args: num_units: int, The number of units in the GRU cell. activation: Nonlinearity to use. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. kernel_initializer: (optional) The initializer to use for the weight matrices applied to the input. bias_initializer: (optional) The initializer to use for the bias. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. dtype: Default dtype of the layer (default of `None` means use the type of the first input). Required when `build` is called before `call`. """ def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None, name=None, dtype=None): super(IndyGRUCell, self).__init__(_reuse=reuse, name=name, dtype=dtype) # Inputs must be 2-dimensional. self.input_spec = input_spec.InputSpec(ndim=2) self._num_units = num_units self._activation = activation or math_ops.tanh self._kernel_initializer = kernel_initializer self._bias_initializer = bias_initializer @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def build(self, inputs_shape): if tensor_shape.dimension_value(inputs_shape[1]) is None: raise ValueError( "Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape) input_depth = tensor_shape.dimension_value(inputs_shape[1]) # pylint: disable=protected-access self._gate_kernel_w = self.add_variable( "gates/%s_w" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME, shape=[input_depth, 2 * self._num_units], initializer=self._kernel_initializer) self._gate_kernel_u = self.add_variable( "gates/%s_u" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME, shape=[1, 2 * self._num_units], initializer=init_ops.random_uniform_initializer( minval=-1, maxval=1, dtype=self.dtype)) self._gate_bias = self.add_variable( "gates/%s" % rnn_cell_impl._BIAS_VARIABLE_NAME, shape=[2 * self._num_units], initializer=(self._bias_initializer if self._bias_initializer is not None else init_ops.constant_initializer(1.0, dtype=self.dtype))) self._candidate_kernel_w = self.add_variable( "candidate/%s" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME, shape=[input_depth, self._num_units], initializer=self._kernel_initializer) self._candidate_kernel_u = self.add_variable( "candidate/%s_u" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME, shape=[1, self._num_units], initializer=init_ops.random_uniform_initializer( minval=-1, maxval=1, dtype=self.dtype)) self._candidate_bias = self.add_variable( "candidate/%s" % rnn_cell_impl._BIAS_VARIABLE_NAME, shape=[self._num_units], initializer=(self._bias_initializer if self._bias_initializer is not None else init_ops.zeros_initializer(dtype=self.dtype))) # pylint: enable=protected-access self.built = True def call(self, inputs, state): """Recurrently independent Gated Recurrent Unit (GRU) with nunits cells.""" gate_inputs = math_ops.matmul(inputs, self._gate_kernel_w) + ( gen_array_ops.tile(state, [1, 2]) * self._gate_kernel_u) gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias) value = math_ops.sigmoid(gate_inputs) r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1) r_state = r * state candidate = math_ops.matmul(inputs, self._candidate_kernel_w) + ( r_state * self._candidate_kernel_u) candidate = nn_ops.bias_add(candidate, self._candidate_bias) c = self._activation(candidate) new_h = u * state + (1 - u) * c return new_h, new_h class IndyLSTMCell(rnn_cell_impl.LayerRNNCell): r"""Basic IndyLSTM recurrent network cell. Based on IndRNNs (https://arxiv.org/abs/1803.04831) and similar to BasicLSTMCell, yet with the \\(U_f\\), \\(U_i\\), \\(U_o\\) and \\(U_c\\) matrices in the regular LSTM equations replaced by diagonal matrices, i.e. a Hadamard product with a single vector: $$f_t = \sigma_g\left(W_f x_t + u_f \circ h_{t-1} + b_f\right)$$ $$i_t = \sigma_g\left(W_i x_t + u_i \circ h_{t-1} + b_i\right)$$ $$o_t = \sigma_g\left(W_o x_t + u_o \circ h_{t-1} + b_o\right)$$ $$c_t = f_t \circ c_{t-1} + i_t \circ \sigma_c\left(W_c x_t + u_c \circ h_{t-1} + b_c\right)$$ where \\(\circ\\) denotes the Hadamard operator. This means that each IndyLSTM node sees only its own state \\(h\\) and \\(c\\), as opposed to seeing all states in the same layer. We add forget_bias (default: 1) to the biases of the forget gate in order to reduce the scale of forgetting in the beginning of the training. It does not allow cell clipping, a projection layer, and does not use peep-hole connections: it is the basic baseline. For a detailed analysis of IndyLSTMs, see https://arxiv.org/abs/1903.08023. """ def __init__(self, num_units, forget_bias=1.0, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None, name=None, dtype=None): """Initialize the IndyLSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). Must set to `0.0` manually when restoring from CudnnLSTM-trained checkpoints. activation: Activation function of the inner states. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. kernel_initializer: (optional) The initializer to use for the weight matrix applied to the inputs. bias_initializer: (optional) The initializer to use for the bias. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. dtype: Default dtype of the layer (default of `None` means use the type of the first input). Required when `build` is called before `call`. """ super(IndyLSTMCell, self).__init__(_reuse=reuse, name=name, dtype=dtype) # Inputs must be 2-dimensional. self.input_spec = input_spec.InputSpec(ndim=2) self._num_units = num_units self._forget_bias = forget_bias self._activation = activation or math_ops.tanh self._kernel_initializer = kernel_initializer self._bias_initializer = bias_initializer @property def state_size(self): return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units) @property def output_size(self): return self._num_units def build(self, inputs_shape): if tensor_shape.dimension_value(inputs_shape[1]) is None: raise ValueError( "Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape) input_depth = tensor_shape.dimension_value(inputs_shape[1]) # pylint: disable=protected-access self._kernel_w = self.add_variable( "%s_w" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME, shape=[input_depth, 4 * self._num_units], initializer=self._kernel_initializer) self._kernel_u = self.add_variable( "%s_u" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME, shape=[1, 4 * self._num_units], initializer=init_ops.random_uniform_initializer( minval=-1, maxval=1, dtype=self.dtype)) self._bias = self.add_variable( rnn_cell_impl._BIAS_VARIABLE_NAME, shape=[4 * self._num_units], initializer=(self._bias_initializer if self._bias_initializer is not None else init_ops.zeros_initializer(dtype=self.dtype))) # pylint: enable=protected-access self.built = True def call(self, inputs, state): """Independent Long short-term memory cell (IndyLSTM). Args: inputs: `2-D` tensor with shape `[batch_size, input_size]`. state: An `LSTMStateTuple` of state tensors, each shaped `[batch_size, num_units]`. Returns: A pair containing the new hidden state, and the new state (a `LSTMStateTuple`). """ sigmoid = math_ops.sigmoid one = constant_op.constant(1, dtype=dtypes.int32) c, h = state gate_inputs = math_ops.matmul(inputs, self._kernel_w) gate_inputs += gen_array_ops.tile(h, [1, 4]) * self._kernel_u gate_inputs = nn_ops.bias_add(gate_inputs, self._bias) # i = input_gate, j = new_input, f = forget_gate, o = output_gate i, j, f, o = array_ops.split( value=gate_inputs, num_or_size_splits=4, axis=one) forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype) # Note that using `add` and `multiply` instead of `+` and `*` gives a # performance improvement. So using those at the cost of readability. add = math_ops.add multiply = math_ops.multiply new_c = add( multiply(c, sigmoid(add(f, forget_bias_tensor))), multiply(sigmoid(i), self._activation(j))) new_h = multiply(self._activation(new_c), sigmoid(o)) new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h) return new_h, new_state NTMControllerState = collections.namedtuple( "NTMControllerState", ("controller_state", "read_vector_list", "w_list", "M", "time")) class NTMCell(rnn_cell_impl.LayerRNNCell): """Neural Turing Machine Cell with RNN controller. Implementation based on: https://arxiv.org/abs/1807.08518 Mark Collier, Joeran Beel which is in turn based on the source code of: https://github.com/snowkylin/ntm and of course the original NTM paper: Neural Turing Machines https://arxiv.org/abs/1410.5401 A Graves, G Wayne, I Danihelka """ def __init__(self, controller, memory_size, memory_vector_dim, read_head_num, write_head_num, shift_range=1, output_dim=None, clip_value=20, dtype=dtypes.float32, name=None): """Initialize the NTM Cell. Args: controller: an RNNCell, the RNN controller. memory_size: int, The number of memory locations in the NTM memory matrix memory_vector_dim: int, The dimensionality of each location in the NTM memory matrix read_head_num: int, The number of read heads from the controller into memory write_head_num: int, The number of write heads from the controller into memory shift_range: int, The number of places to the left/right it is possible to iterate the previous address to in a single step output_dim: int, The number of dimensions to make a linear projection of the NTM controller outputs to. If None, no linear projection is applied clip_value: float, The maximum absolute value the controller parameters are clipped to dtype: Default dtype of the layer (default of `None` means use the type of the first input). Required when `build` is called before `call`. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. """ super(NTMCell, self).__init__(dtype=dtype, name=name) rnn_cell_impl.assert_like_rnncell("NTM RNN controller cell", controller) self.controller = controller self.memory_size = memory_size self.memory_vector_dim = memory_vector_dim self.read_head_num = read_head_num self.write_head_num = write_head_num self.clip_value = clip_value self.output_dim = output_dim self.shift_range = shift_range self.num_parameters_per_head = ( self.memory_vector_dim + 2 * self.shift_range + 4) self.num_heads = self.read_head_num + self.write_head_num self.total_parameter_num = ( self.num_parameters_per_head * self.num_heads + self.memory_vector_dim * 2 * self.write_head_num) @property def state_size(self): return NTMControllerState( controller_state=self.controller.state_size, read_vector_list=[ self.memory_vector_dim for _ in range(self.read_head_num) ], w_list=[ self.memory_size for _ in range(self.read_head_num + self.write_head_num) ], M=tensor_shape.TensorShape([self.memory_size * self.memory_vector_dim]), time=tensor_shape.TensorShape([])) @property def output_size(self): return self.output_dim def build(self, inputs_shape): if self.output_dim is None: if inputs_shape[1].value is None: raise ValueError( "Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape) else: self.output_dim = inputs_shape[1].value def _create_linear_initializer(input_size, dtype=dtypes.float32): stddev = 1.0 / math.sqrt(input_size) return init_ops.truncated_normal_initializer(stddev=stddev, dtype=dtype) self._params_kernel = self.add_variable( "parameters_kernel", shape=[self.controller.output_size, self.total_parameter_num], initializer=_create_linear_initializer(self.controller.output_size)) self._params_bias = self.add_variable( "parameters_bias", shape=[self.total_parameter_num], initializer=init_ops.constant_initializer(0.0, dtype=self.dtype)) self._output_kernel = self.add_variable( "output_kernel", shape=[ self.controller.output_size + self.memory_vector_dim * self.read_head_num, self.output_dim ], initializer=_create_linear_initializer(self.controller.output_size + self.memory_vector_dim * self.read_head_num)) self._output_bias = self.add_variable( "output_bias", shape=[self.output_dim], initializer=init_ops.constant_initializer(0.0, dtype=self.dtype)) self._init_read_vectors = [ self.add_variable( "initial_read_vector_%d" % i, shape=[1, self.memory_vector_dim], initializer=initializers.glorot_uniform()) for i in range(self.read_head_num) ] self._init_address_weights = [ self.add_variable( "initial_address_weights_%d" % i, shape=[1, self.memory_size], initializer=initializers.glorot_uniform()) for i in range(self.read_head_num + self.write_head_num) ] self._M = self.add_variable( "memory", shape=[self.memory_size, self.memory_vector_dim], initializer=init_ops.constant_initializer(1e-6, dtype=self.dtype)) self.built = True def call(self, x, prev_state): # Addressing Mechanisms (Sec 3.3) def _prev_read_vector_list_initial_value(): return [ self._expand( math_ops.tanh( array_ops.squeeze( math_ops.matmul( array_ops.ones([1, 1]), self._init_read_vectors[i]))), dim=0, N=x.shape[0].value or array_ops.shape(x)[0]) for i in range(self.read_head_num) ] prev_read_vector_list = control_flow_ops.cond( math_ops.equal(prev_state.time, 0), _prev_read_vector_list_initial_value, lambda: prev_state.read_vector_list) if self.read_head_num == 1: prev_read_vector_list = [prev_read_vector_list] controller_input = array_ops.concat([x] + prev_read_vector_list, axis=1) controller_output, controller_state = self.controller( controller_input, prev_state.controller_state) parameters = math_ops.matmul(controller_output, self._params_kernel) parameters = nn_ops.bias_add(parameters, self._params_bias) parameters = clip_ops.clip_by_value(parameters, -self.clip_value, self.clip_value) head_parameter_list = array_ops.split( parameters[:, :self.num_parameters_per_head * self.num_heads], self.num_heads, axis=1) erase_add_list = array_ops.split( parameters[:, self.num_parameters_per_head * self.num_heads:], 2 * self.write_head_num, axis=1) def _prev_w_list_initial_value(): return [ self._expand( nn_ops.softmax( array_ops.squeeze( math_ops.matmul( array_ops.ones([1, 1]), self._init_address_weights[i]))), dim=0, N=x.shape[0].value or array_ops.shape(x)[0]) for i in range(self.read_head_num + self.write_head_num) ] prev_w_list = control_flow_ops.cond( math_ops.equal(prev_state.time, 0), _prev_w_list_initial_value, lambda: prev_state.w_list) if (self.read_head_num + self.write_head_num) == 1: prev_w_list = [prev_w_list] prev_M = control_flow_ops.cond( math_ops.equal(prev_state.time, 0), lambda: self._expand( self._M, dim=0, N=x.shape[0].value or array_ops.shape(x)[0]), lambda: prev_state.M) w_list = [] for i, head_parameter in enumerate(head_parameter_list): k = math_ops.tanh(head_parameter[:, 0:self.memory_vector_dim]) beta = nn_ops.softplus(head_parameter[:, self.memory_vector_dim]) g = math_ops.sigmoid(head_parameter[:, self.memory_vector_dim + 1]) s = nn_ops.softmax(head_parameter[:, self.memory_vector_dim + 2:(self.memory_vector_dim + 2 + (self.shift_range * 2 + 1))]) gamma = nn_ops.softplus(head_parameter[:, -1]) + 1 w = self._addressing(k, beta, g, s, gamma, prev_M, prev_w_list[i]) w_list.append(w) # Reading (Sec 3.1) read_w_list = w_list[:self.read_head_num] read_vector_list = [] for i in range(self.read_head_num): read_vector = math_ops.reduce_sum( array_ops.expand_dims(read_w_list[i], dim=2) * prev_M, axis=1) read_vector_list.append(read_vector) # Writing (Sec 3.2) write_w_list = w_list[self.read_head_num:] M = prev_M for i in range(self.write_head_num): w = array_ops.expand_dims(write_w_list[i], axis=2) erase_vector = array_ops.expand_dims( math_ops.sigmoid(erase_add_list[i * 2]), axis=1) add_vector = array_ops.expand_dims( math_ops.tanh(erase_add_list[i * 2 + 1]), axis=1) erase_M = array_ops.ones_like(M) - math_ops.matmul(w, erase_vector) M = M * erase_M + math_ops.matmul(w, add_vector) output = math_ops.matmul( array_ops.concat([controller_output] + read_vector_list, axis=1), self._output_kernel) output = nn_ops.bias_add(output, self._output_bias) output = clip_ops.clip_by_value(output, -self.clip_value, self.clip_value) return output, NTMControllerState( controller_state=controller_state, read_vector_list=read_vector_list, w_list=w_list, M=M, time=prev_state.time + 1) def _expand(self, x, dim, N): return array_ops.concat([array_ops.expand_dims(x, dim) for _ in range(N)], axis=dim) def _addressing(self, k, beta, g, s, gamma, prev_M, prev_w): # Sec 3.3.1 Focusing by Content k = array_ops.expand_dims(k, axis=2) inner_product = math_ops.matmul(prev_M, k) k_norm = math_ops.sqrt( math_ops.reduce_sum(math_ops.square(k), axis=1, keepdims=True)) M_norm = math_ops.sqrt( math_ops.reduce_sum(math_ops.square(prev_M), axis=2, keepdims=True)) norm_product = M_norm * k_norm # eq (6) K = array_ops.squeeze(inner_product / (norm_product + 1e-8)) K_amplified = math_ops.exp(array_ops.expand_dims(beta, axis=1) * K) # eq (5) w_c = K_amplified / math_ops.reduce_sum(K_amplified, axis=1, keepdims=True) # Sec 3.3.2 Focusing by Location g = array_ops.expand_dims(g, axis=1) # eq (7) w_g = g * w_c + (1 - g) * prev_w s = array_ops.concat([ s[:, :self.shift_range + 1], array_ops.zeros([ s.shape[0].value or array_ops.shape(s)[0], self.memory_size - (self.shift_range * 2 + 1) ]), s[:, -self.shift_range:] ], axis=1) t = array_ops.concat( [array_ops.reverse(s, axis=[1]), array_ops.reverse(s, axis=[1])], axis=1) s_matrix = array_ops.stack([ t[:, self.memory_size - i - 1:self.memory_size * 2 - i - 1] for i in range(self.memory_size) ], axis=1) # eq (8) w_ = math_ops.reduce_sum( array_ops.expand_dims(w_g, axis=1) * s_matrix, axis=2) w_sharpen = math_ops.pow(w_, array_ops.expand_dims(gamma, axis=1)) # eq (9) w = w_sharpen / math_ops.reduce_sum(w_sharpen, axis=1, keepdims=True) return w def zero_state(self, batch_size, dtype): read_vector_list = [ array_ops.zeros([batch_size, self.memory_vector_dim]) for _ in range(self.read_head_num) ] w_list = [ array_ops.zeros([batch_size, self.memory_size]) for _ in range(self.read_head_num + self.write_head_num) ] controller_init_state = self.controller.zero_state(batch_size, dtype) M = array_ops.zeros([batch_size, self.memory_size, self.memory_vector_dim]) return NTMControllerState( controller_state=controller_init_state, read_vector_list=read_vector_list, w_list=w_list, M=M, time=0) class MinimalRNNCell(rnn_cell_impl.LayerRNNCell): """MinimalRNN cell. The implementation is based on: https://arxiv.org/pdf/1806.05394v2.pdf Minmin Chen, Jeffrey Pennington, Samuel S. Schoenholz. "Dynamical Isometry and a Mean Field Theory of RNNs: Gating Enables Signal Propagation in Recurrent Neural Networks." ICML, 2018. A MinimalRNN cell first projects the input to the hidden space. The new hidden state is then calculated as a weighted sum of the projected input and the previous hidden state, using a single update gate. """ def __init__(self, units, activation="tanh", kernel_initializer="glorot_uniform", bias_initializer="ones", name=None, dtype=None, **kwargs): """Initialize the parameters for a MinimalRNN cell. Args: units: int, The number of units in the MinimalRNN cell. activation: Nonlinearity to use in the feedforward network. Default: `tanh`. kernel_initializer: The initializer to use for the weight in the update gate and feedforward network. Default: `glorot_uniform`. bias_initializer: The initializer to use for the bias in the update gate. Default: `ones`. name: String, the name of the cell. dtype: Default dtype of the cell. **kwargs: Dict, keyword named properties for common cell attributes. """ super(MinimalRNNCell, self).__init__(name=name, dtype=dtype, **kwargs) # Inputs must be 2-dimensional. self.input_spec = input_spec.InputSpec(ndim=2) self.units = units self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) @property def state_size(self): return self.units @property def output_size(self): return self.units def build(self, inputs_shape): if inputs_shape[-1] is None: raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" % str(inputs_shape)) input_size = inputs_shape[-1] # pylint: disable=protected-access # self._kernel contains W_x, W, V self.kernel = self.add_weight( name=rnn_cell_impl._WEIGHTS_VARIABLE_NAME, shape=[input_size + 2 * self.units, self.units], initializer=self.kernel_initializer) self.bias = self.add_weight( name=rnn_cell_impl._BIAS_VARIABLE_NAME, shape=[self.units], initializer=self.bias_initializer) # pylint: enable=protected-access self.built = True def call(self, inputs, state): """Run one step of MinimalRNN. Args: inputs: input Tensor, must be 2-D, `[batch, input_size]`. state: state Tensor, must be 2-D, `[batch, state_size]`. Returns: A tuple containing: - Output: A `2-D` tensor with shape `[batch_size, state_size]`. - New state: A `2-D` tensor with shape `[batch_size, state_size]`. Raises: ValueError: If input size cannot be inferred from inputs via static shape inference. """ input_size = inputs.get_shape()[1] if tensor_shape.dimension_value(input_size) is None: raise ValueError("Could not infer input size from inputs.get_shape()[-1]") feedforward_weight, gate_weight = array_ops.split( value=self.kernel, num_or_size_splits=[tensor_shape.dimension_value(input_size), 2 * self.units], axis=0) feedforward = math_ops.matmul(inputs, feedforward_weight) feedforward = self.activation(feedforward) gate_inputs = math_ops.matmul( array_ops.concat([feedforward, state], 1), gate_weight) gate_inputs = nn_ops.bias_add(gate_inputs, self.bias) u = math_ops.sigmoid(gate_inputs) new_h = u * state + (1 - u) * feedforward return new_h, new_h class CFNCell(rnn_cell_impl.LayerRNNCell): """Chaos Free Network cell. The implementation is based on: https://openreview.net/pdf?id=S1dIzvclg Thomas Laurent, James von Brecht. "A recurrent neural network without chaos." ICLR, 2017. A CFN cell first projects the input to the hidden space. The hidden state goes through a contractive mapping. The new hidden state is then calculated as a linear combination of the projected input and the contracted previous hidden state, using decoupled input and forget gates. """ def __init__(self, units, activation="tanh", kernel_initializer="glorot_uniform", bias_initializer="ones", name=None, dtype=None, **kwargs): """Initialize the parameters for a CFN cell. Args: units: int, The number of units in the CFN cell. activation: Nonlinearity to use. Default: `tanh`. kernel_initializer: Initializer for the `kernel` weights matrix. Default: `glorot_uniform`. bias_initializer: The initializer to use for the bias in the gates. Default: `ones`. name: String, the name of the cell. dtype: Default dtype of the cell. **kwargs: Dict, keyword named properties for common cell attributes. """ super(CFNCell, self).__init__(name=name, dtype=dtype, **kwargs) # Inputs must be 2-dimensional. self.input_spec = input_spec.InputSpec(ndim=2) self.units = units self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) @property def state_size(self): return self.units @property def output_size(self): return self.units def build(self, inputs_shape): if inputs_shape[-1] is None: raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" % str(inputs_shape)) input_size = inputs_shape[-1] # pylint: disable=protected-access # `self.kernel` contains V_{\theta}, V_{\eta}, W. # `self.recurrent_kernel` contains U_{\theta}, U_{\eta}. # `self.bias` contains b_{\theta}, b_{\eta}. self.kernel = self.add_weight( shape=[input_size, 3 * self.units], name=rnn_cell_impl._WEIGHTS_VARIABLE_NAME, initializer=self.kernel_initializer) self.recurrent_kernel = self.add_weight( shape=[self.units, 2 * self.units], name="recurrent_%s" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME, initializer=self.kernel_initializer) self.bias = self.add_weight( shape=[2 * self.units], name=rnn_cell_impl._BIAS_VARIABLE_NAME, initializer=self.bias_initializer) # pylint: enable=protected-access self.built = True def call(self, inputs, state): """Run one step of CFN. Args: inputs: input Tensor, must be 2-D, `[batch, input_size]`. state: state Tensor, must be 2-D, `[batch, state_size]`. Returns: A tuple containing: - Output: A `2-D` tensor with shape `[batch_size, state_size]`. - New state: A `2-D` tensor with shape `[batch_size, state_size]`. Raises: ValueError: If input size cannot be inferred from inputs via static shape inference. """ input_size = inputs.get_shape()[-1] if tensor_shape.dimension_value(input_size) is None: raise ValueError("Could not infer input size from inputs.get_shape()[-1]") # The variable names u, v, w, b are consistent with the notations in the # original paper. v, w = array_ops.split( value=self.kernel, num_or_size_splits=[2 * self.units, self.units], axis=1) u = self.recurrent_kernel b = self.bias gates = math_ops.matmul(state, u) + math_ops.matmul(inputs, v) gates = nn_ops.bias_add(gates, b) gates = math_ops.sigmoid(gates) theta, eta = array_ops.split(value=gates, num_or_size_splits=2, axis=1) proj_input = math_ops.matmul(inputs, w) # The input gate is (1 - eta), which is different from the original paper. # This is for the propose of initialization. With the default # bias_initializer `ones`, the input gate is initialized to a small number. new_h = theta * self.activation(state) + (1 - eta) * self.activation( proj_input) return new_h, new_h
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/python/ops/rnn_cell.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """RNN helpers for TensorFlow models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import array_ops from tensorflow.python.ops import rnn from tensorflow.python.ops import variable_scope as vs def stack_bidirectional_rnn(cells_fw, cells_bw, inputs, initial_states_fw=None, initial_states_bw=None, dtype=None, sequence_length=None, scope=None): """Creates a bidirectional recurrent neural network. Stacks several bidirectional rnn layers. The combined forward and backward layer outputs are used as input of the next layer. tf.bidirectional_rnn does not allow to share forward and backward information between layers. The input_size of the first forward and backward cells must match. The initial state for both directions is zero and no intermediate states are returned. As described in https://arxiv.org/abs/1303.5778 Args: cells_fw: List of instances of RNNCell, one per layer, to be used for forward direction. cells_bw: List of instances of RNNCell, one per layer, to be used for backward direction. inputs: A length T list of inputs, each a tensor of shape [batch_size, input_size], or a nested tuple of such elements. initial_states_fw: (optional) A list of the initial states (one per layer) for the forward RNN. Each tensor must has an appropriate type and shape `[batch_size, cell_fw.state_size]`. initial_states_bw: (optional) Same as for `initial_states_fw`, but using the corresponding properties of `cells_bw`. dtype: (optional) The data type for the initial state. Required if either of the initial states are not provided. sequence_length: (optional) An int32/int64 vector, size `[batch_size]`, containing the actual lengths for each of the sequences. scope: VariableScope for the created subgraph; defaults to None. Returns: A tuple (outputs, output_state_fw, output_state_bw) where: outputs is a length `T` list of outputs (one for each input), which are depth-concatenated forward and backward outputs. output_states_fw is the final states, one tensor per layer, of the forward rnn. output_states_bw is the final states, one tensor per layer, of the backward rnn. Raises: TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`. ValueError: If inputs is None, not a list or an empty list. """ if not cells_fw: raise ValueError("Must specify at least one fw cell for BidirectionalRNN.") if not cells_bw: raise ValueError("Must specify at least one bw cell for BidirectionalRNN.") if not isinstance(cells_fw, list): raise ValueError("cells_fw must be a list of RNNCells (one per layer).") if not isinstance(cells_bw, list): raise ValueError("cells_bw must be a list of RNNCells (one per layer).") if len(cells_fw) != len(cells_bw): raise ValueError("Forward and Backward cells must have the same depth.") if (initial_states_fw is not None and (not isinstance(initial_states_fw, list) or len(initial_states_fw) != len(cells_fw))): raise ValueError( "initial_states_fw must be a list of state tensors (one per layer).") if (initial_states_bw is not None and (not isinstance(initial_states_bw, list) or len(initial_states_bw) != len(cells_bw))): raise ValueError( "initial_states_bw must be a list of state tensors (one per layer).") states_fw = [] states_bw = [] prev_layer = inputs with vs.variable_scope(scope or "stack_bidirectional_rnn"): for i, (cell_fw, cell_bw) in enumerate(zip(cells_fw, cells_bw)): initial_state_fw = None initial_state_bw = None if initial_states_fw: initial_state_fw = initial_states_fw[i] if initial_states_bw: initial_state_bw = initial_states_bw[i] with vs.variable_scope("cell_%d" % i) as cell_scope: prev_layer, state_fw, state_bw = rnn.static_bidirectional_rnn( cell_fw, cell_bw, prev_layer, initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw, sequence_length=sequence_length, dtype=dtype, scope=cell_scope) states_fw.append(state_fw) states_bw.append(state_bw) return prev_layer, tuple(states_fw), tuple(states_bw) def stack_bidirectional_dynamic_rnn(cells_fw, cells_bw, inputs, initial_states_fw=None, initial_states_bw=None, dtype=None, sequence_length=None, parallel_iterations=None, time_major=False, scope=None, swap_memory=False): """Creates a dynamic bidirectional recurrent neural network. Stacks several bidirectional rnn layers. The combined forward and backward layer outputs are used as input of the next layer. tf.bidirectional_rnn does not allow to share forward and backward information between layers. The input_size of the first forward and backward cells must match. The initial state for both directions is zero and no intermediate states are returned. Args: cells_fw: List of instances of RNNCell, one per layer, to be used for forward direction. cells_bw: List of instances of RNNCell, one per layer, to be used for backward direction. inputs: The RNN inputs. this must be a tensor of shape: `[batch_size, max_time, ...]`, or a nested tuple of such elements. initial_states_fw: (optional) A list of the initial states (one per layer) for the forward RNN. Each tensor must has an appropriate type and shape `[batch_size, cell_fw.state_size]`. initial_states_bw: (optional) Same as for `initial_states_fw`, but using the corresponding properties of `cells_bw`. dtype: (optional) The data type for the initial state. Required if either of the initial states are not provided. sequence_length: (optional) An int32/int64 vector, size `[batch_size]`, containing the actual lengths for each of the sequences. parallel_iterations: (Default: 32). The number of iterations to run in parallel. Those operations which do not have any temporal dependency and can be run in parallel, will be. This parameter trades off time for space. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. time_major: The shape format of the inputs and outputs Tensors. If true, these Tensors must be shaped [max_time, batch_size, depth]. If false, these Tensors must be shaped [batch_size, max_time, depth]. Using time_major = True is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. scope: VariableScope for the created subgraph; defaults to None. swap_memory: Transparently swap the tensors produced in forward inference but needed for back prop from GPU to CPU. This allows training RNNs which would typically not fit on a single GPU, with very minimal (or no) performance penalty. Returns: A tuple (outputs, output_state_fw, output_state_bw) where: outputs: Output `Tensor` shaped: `[batch_size, max_time, layers_output]`. Where layers_output are depth-concatenated forward and backward outputs. output_states_fw is the final states, one tensor per layer, of the forward rnn. output_states_bw is the final states, one tensor per layer, of the backward rnn. Raises: TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`. ValueError: If inputs is `None`. """ if not cells_fw: raise ValueError("Must specify at least one fw cell for BidirectionalRNN.") if not cells_bw: raise ValueError("Must specify at least one bw cell for BidirectionalRNN.") if not isinstance(cells_fw, list): raise ValueError("cells_fw must be a list of RNNCells (one per layer).") if not isinstance(cells_bw, list): raise ValueError("cells_bw must be a list of RNNCells (one per layer).") if len(cells_fw) != len(cells_bw): raise ValueError("Forward and Backward cells must have the same depth.") if (initial_states_fw is not None and (not isinstance(initial_states_fw, list) or len(initial_states_fw) != len(cells_fw))): raise ValueError( "initial_states_fw must be a list of state tensors (one per layer).") if (initial_states_bw is not None and (not isinstance(initial_states_bw, list) or len(initial_states_bw) != len(cells_bw))): raise ValueError( "initial_states_bw must be a list of state tensors (one per layer).") states_fw = [] states_bw = [] prev_layer = inputs with vs.variable_scope(scope or "stack_bidirectional_rnn"): for i, (cell_fw, cell_bw) in enumerate(zip(cells_fw, cells_bw)): initial_state_fw = None initial_state_bw = None if initial_states_fw: initial_state_fw = initial_states_fw[i] if initial_states_bw: initial_state_bw = initial_states_bw[i] with vs.variable_scope("cell_%d" % i): outputs, (state_fw, state_bw) = rnn.bidirectional_dynamic_rnn( cell_fw, cell_bw, prev_layer, initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw, sequence_length=sequence_length, parallel_iterations=parallel_iterations, dtype=dtype, swap_memory=swap_memory, time_major=time_major) # Concat the outputs to create the new input. prev_layer = array_ops.concat(outputs, 2) states_fw.append(state_fw) states_bw.append(state_bw) return prev_layer, tuple(states_fw), tuple(states_bw)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/python/ops/rnn.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Module for constructing fused RNN cells.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six from tensorflow.python.ops import array_ops from tensorflow.python.ops import rnn @six.add_metaclass(abc.ABCMeta) class FusedRNNCell(object): """Abstract object representing a fused RNN cell. A fused RNN cell represents the entire RNN expanded over the time dimension. In effect, this represents an entire recurrent network. Unlike RNN cells which are subclasses of `rnn_cell.RNNCell`, a `FusedRNNCell` operates on the entire time sequence at once, by putting the loop over time inside the cell. This usually leads to much more efficient, but more complex and less flexible implementations. Every `FusedRNNCell` must implement `__call__` with the following signature. """ @abc.abstractmethod def __call__(self, inputs, initial_state=None, dtype=None, sequence_length=None, scope=None): """Run this fused RNN on inputs, starting from the given state. Args: inputs: `3-D` tensor with shape `[time_len x batch_size x input_size]` or a list of `time_len` tensors of shape `[batch_size x input_size]`. initial_state: either a tensor with shape `[batch_size x state_size]` or a tuple with shapes `[batch_size x s] for s in state_size`, if the cell takes tuples. If this is not provided, the cell is expected to create a zero initial state of type `dtype`. dtype: The data type for the initial state and expected output. Required if `initial_state` is not provided or RNN state has a heterogeneous dtype. sequence_length: Specifies the length of each sequence in inputs. An `int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0, time_len)`. Defaults to `time_len` for each element. scope: `VariableScope` or `string` for the created subgraph; defaults to class name. Returns: A pair containing: - Output: A `3-D` tensor of shape `[time_len x batch_size x output_size]` or a list of `time_len` tensors of shape `[batch_size x output_size]`, to match the type of the `inputs`. - Final state: Either a single `2-D` tensor, or a tuple of tensors matching the arity and shapes of `initial_state`. """ pass class FusedRNNCellAdaptor(FusedRNNCell): """This is an adaptor for RNNCell classes to be used with `FusedRNNCell`.""" def __init__(self, cell, use_dynamic_rnn=False): """Initialize the adaptor. Args: cell: an instance of a subclass of a `rnn_cell.RNNCell`. use_dynamic_rnn: whether to use dynamic (or static) RNN. """ self._cell = cell self._use_dynamic_rnn = use_dynamic_rnn def __call__(self, inputs, initial_state=None, dtype=None, sequence_length=None, scope=None): is_list = isinstance(inputs, list) if self._use_dynamic_rnn: if is_list: inputs = array_ops.stack(inputs) outputs, state = rnn.dynamic_rnn( self._cell, inputs, sequence_length=sequence_length, initial_state=initial_state, dtype=dtype, time_major=True, scope=scope) if is_list: # Convert outputs back to list outputs = array_ops.unstack(outputs) else: # non-dynamic rnn if not is_list: inputs = array_ops.unstack(inputs) outputs, state = rnn.static_rnn( self._cell, inputs, initial_state=initial_state, dtype=dtype, sequence_length=sequence_length, scope=scope) if not is_list: # Convert outputs back to tensor outputs = array_ops.stack(outputs) return outputs, state class TimeReversedFusedRNN(FusedRNNCell): """This is an adaptor to time-reverse a FusedRNNCell. For example, ```python cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(10) fw_lstm = tf.contrib.rnn.FusedRNNCellAdaptor(cell, use_dynamic_rnn=True) bw_lstm = tf.contrib.rnn.TimeReversedFusedRNN(fw_lstm) fw_out, fw_state = fw_lstm(inputs) bw_out, bw_state = bw_lstm(inputs) ``` """ def __init__(self, cell): self._cell = cell def _reverse(self, t, lengths): """Time reverse the provided tensor or list of tensors. Assumes the top dimension is the time dimension. Args: t: 3D tensor or list of 2D tensors to be reversed lengths: 1D tensor of lengths, or `None` Returns: A reversed tensor or list of tensors """ if isinstance(t, list): return list(reversed(t)) else: if lengths is None: return array_ops.reverse_v2(t, [0]) else: return array_ops.reverse_sequence(t, lengths, 0, 1) def __call__(self, inputs, initial_state=None, dtype=None, sequence_length=None, scope=None): inputs = self._reverse(inputs, sequence_length) outputs, state = self._cell( inputs, initial_state=initial_state, dtype=dtype, sequence_length=sequence_length, scope=scope) outputs = self._reverse(outputs, sequence_length) return outputs, state
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/rnn/python/ops/fused_rnn_cell.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlow Summary API v2. The operations in this package are safe to use with eager execution turned on or off. It has a more flexible API that allows summaries to be written directly from ops to places other than event log files, rather than propagating protos from `tf.summary.merge_all` to `tf.summary.FileWriter`. To use with eager execution enabled, write your code as follows: ```python global_step = tf.train.get_or_create_global_step() summary_writer = tf.contrib.summary.create_file_writer( train_dir, flush_millis=10000) with summary_writer.as_default(), tf.contrib.summary.always_record_summaries(): # model code goes here # and in it call tf.contrib.summary.scalar("loss", my_loss) # In this case every call to tf.contrib.summary.scalar will generate a record # ... ``` To use it with graph execution, write your code as follows: ```python global_step = tf.train.get_or_create_global_step() summary_writer = tf.contrib.summary.create_file_writer( train_dir, flush_millis=10000) with summary_writer.as_default(), tf.contrib.summary.always_record_summaries(): # model definition code goes here # and in it call tf.contrib.summary.scalar("loss", my_loss) # In this case every call to tf.contrib.summary.scalar will generate an op, # note the need to run tf.contrib.summary.all_summary_ops() to make sure these # ops get executed. # ... train_op = .... with tf.Session(...) as sess: tf.global_variables_initializer().run() tf.contrib.summary.initialize(graph=tf.get_default_graph()) # ... while not_done_training: sess.run([train_op, tf.contrib.summary.all_summary_ops()]) # ... ``` """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.ops.summary_ops_v2 import all_v2_summary_ops as all_summary_ops from tensorflow.python.ops.summary_ops_v2 import always_record_summaries from tensorflow.python.ops.summary_ops_v2 import audio from tensorflow.python.ops.summary_ops_v2 import create_db_writer from tensorflow.python.ops.summary_ops_v2 import create_file_writer from tensorflow.python.ops.summary_ops_v2 import create_summary_file_writer from tensorflow.python.ops.summary_ops_v2 import eval_dir from tensorflow.python.ops.summary_ops_v2 import flush from tensorflow.python.ops.summary_ops_v2 import generic from tensorflow.python.ops.summary_ops_v2 import graph from tensorflow.python.ops.summary_ops_v2 import histogram from tensorflow.python.ops.summary_ops_v2 import image from tensorflow.python.ops.summary_ops_v2 import import_event from tensorflow.python.ops.summary_ops_v2 import initialize from tensorflow.python.ops.summary_ops_v2 import never_record_summaries from tensorflow.python.ops.summary_ops_v2 import record_summaries_every_n_global_steps from tensorflow.python.ops.summary_ops_v2 import scalar from tensorflow.python.ops.summary_ops_v2 import should_record_summaries from tensorflow.python.ops.summary_ops_v2 import summary_writer_initializer_op from tensorflow.python.ops.summary_ops_v2 import SummaryWriter
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/summary/summary.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile import time import unittest import six from tensorflow.contrib.summary import summary as summary_ops from tensorflow.contrib.summary import summary_test_util from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import gfile from tensorflow.python.platform import test from tensorflow.python.training import training_util get_all = summary_test_util.get_all class GraphFileTest(test_util.TensorFlowTestCase): def testSummaryOps(self): logdir = self.get_temp_dir() writer = summary_ops.create_file_writer(logdir, max_queue=0) with writer.as_default(), summary_ops.always_record_summaries(): summary_ops.generic('tensor', 1, step=1) summary_ops.scalar('scalar', 2.0, step=1) summary_ops.histogram('histogram', [1.0], step=1) summary_ops.image('image', [[[[1.0]]]], step=1) summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1) with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) sess.run(summary_ops.all_summary_ops()) # The working condition of the ops is tested in the C++ test so we just # test here that we're calling them correctly. self.assertTrue(gfile.Exists(logdir)) def testSummaryName(self): logdir = self.get_temp_dir() writer = summary_ops.create_file_writer(logdir, max_queue=0) with writer.as_default(), summary_ops.always_record_summaries(): summary_ops.scalar('scalar', 2.0, step=1) with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) sess.run(summary_ops.all_summary_ops()) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(2, len(events)) self.assertEqual('scalar', events[1].summary.value[0].tag) def testSummaryNameScope(self): logdir = self.get_temp_dir() writer = summary_ops.create_file_writer(logdir, max_queue=0) with writer.as_default(), summary_ops.always_record_summaries(): with ops.name_scope('scope'): summary_ops.scalar('scalar', 2.0, step=1) with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) sess.run(summary_ops.all_summary_ops()) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(2, len(events)) self.assertEqual('scope/scalar', events[1].summary.value[0].tag) def testSummaryGlobalStep(self): training_util.get_or_create_global_step() logdir = self.get_temp_dir() writer = summary_ops.create_file_writer(logdir, max_queue=0) with writer.as_default(), summary_ops.always_record_summaries(): summary_ops.scalar('scalar', 2.0) with self.cached_session() as sess: sess.run(variables.global_variables_initializer()) sess.run(summary_ops.summary_writer_initializer_op()) step, _ = sess.run( [training_util.get_global_step(), summary_ops.all_summary_ops()]) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(2, len(events)) self.assertEqual(step, events[1].step) def testMaxQueue(self): logdir = self.get_temp_dir() writer = summary_ops.create_file_writer( logdir, max_queue=1, flush_millis=999999) with writer.as_default(), summary_ops.always_record_summaries(): summary_ops.scalar('scalar', 2.0, step=1) with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) # Note: First tf.compat.v1.Event is always file_version. self.assertEqual(1, get_total()) sess.run(summary_ops.all_summary_ops()) self.assertEqual(1, get_total()) # Should flush after second summary since max_queue = 1 sess.run(summary_ops.all_summary_ops()) self.assertEqual(3, get_total()) def testFlushFunction(self): logdir = self.get_temp_dir() writer = summary_ops.create_file_writer( logdir, max_queue=999999, flush_millis=999999) with writer.as_default(), summary_ops.always_record_summaries(): summary_ops.scalar('scalar', 2.0, step=1) flush_op = summary_ops.flush() with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) # Note: First tf.compat.v1.Event is always file_version. self.assertEqual(1, get_total()) sess.run(summary_ops.all_summary_ops()) self.assertEqual(1, get_total()) sess.run(flush_op) self.assertEqual(2, get_total()) # Test "writer" parameter sess.run(summary_ops.all_summary_ops()) sess.run(summary_ops.flush(writer=writer)) self.assertEqual(3, get_total()) sess.run(summary_ops.all_summary_ops()) sess.run(summary_ops.flush(writer=writer._resource)) # pylint:disable=protected-access self.assertEqual(4, get_total()) def testSharedName(self): logdir = self.get_temp_dir() with summary_ops.always_record_summaries(): # Create with default shared name (should match logdir) writer1 = summary_ops.create_file_writer(logdir) with writer1.as_default(): summary_ops.scalar('one', 1.0, step=1) # Create with explicit logdir shared name (should be same resource/file) shared_name = 'logdir:' + logdir writer2 = summary_ops.create_file_writer(logdir, name=shared_name) with writer2.as_default(): summary_ops.scalar('two', 2.0, step=2) # Create with different shared name (should be separate resource/file) writer3 = summary_ops.create_file_writer(logdir, name='other') with writer3.as_default(): summary_ops.scalar('three', 3.0, step=3) with self.cached_session() as sess: # Run init ops across writers sequentially to avoid race condition. # TODO(nickfelt): fix race condition in resource manager lookup or create sess.run(writer1.init()) sess.run(writer2.init()) time.sleep(1.1) # Ensure filename has a different timestamp sess.run(writer3.init()) sess.run(summary_ops.all_summary_ops()) sess.run([writer1.flush(), writer2.flush(), writer3.flush()]) event_files = iter(sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))) # First file has tags "one" and "two" events = summary_test_util.events_from_file(next(event_files)) self.assertEqual('brain.Event:2', events[0].file_version) tags = [e.summary.value[0].tag for e in events[1:]] self.assertItemsEqual(['one', 'two'], tags) # Second file has tag "three" events = summary_test_util.events_from_file(next(event_files)) self.assertEqual('brain.Event:2', events[0].file_version) tags = [e.summary.value[0].tag for e in events[1:]] self.assertItemsEqual(['three'], tags) # No more files self.assertRaises(StopIteration, lambda: next(event_files)) def testWriterInitAndClose(self): logdir = self.get_temp_dir() with summary_ops.always_record_summaries(): writer = summary_ops.create_file_writer( logdir, max_queue=100, flush_millis=1000000) with writer.as_default(): summary_ops.scalar('one', 1.0, step=1) with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) self.assertEqual(1, get_total()) # file_version Event # Running init() again while writer is open has no effect sess.run(writer.init()) self.assertEqual(1, get_total()) sess.run(summary_ops.all_summary_ops()) self.assertEqual(1, get_total()) # Running close() should do an implicit flush sess.run(writer.close()) self.assertEqual(2, get_total()) # Running init() on a closed writer should start a new file time.sleep(1.1) # Ensure filename has a different timestamp sess.run(writer.init()) sess.run(summary_ops.all_summary_ops()) sess.run(writer.close()) files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))) self.assertEqual(2, len(files)) self.assertEqual(2, len(summary_test_util.events_from_file(files[1]))) def testWriterFlush(self): logdir = self.get_temp_dir() with summary_ops.always_record_summaries(): writer = summary_ops.create_file_writer( logdir, max_queue=100, flush_millis=1000000) with writer.as_default(): summary_ops.scalar('one', 1.0, step=1) with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) self.assertEqual(1, get_total()) # file_version Event sess.run(summary_ops.all_summary_ops()) self.assertEqual(1, get_total()) sess.run(writer.flush()) self.assertEqual(2, get_total()) class GraphDbTest(summary_test_util.SummaryDbTest): def testGraphPassedToGraph_isForbiddenForThineOwnSafety(self): with self.assertRaises(TypeError): summary_ops.graph(ops.Graph()) with self.assertRaises(TypeError): summary_ops.graph('') # TODO(b/133791853) Re-enable these tests. @unittest.skip('Skipping because of b/133791853.') def testGraphSummary(self): training_util.get_or_create_global_step() name = 'hi' graph = graph_pb2.GraphDef(node=(node_def_pb2.NodeDef(name=name),)) with self.cached_session(): with self.create_db_writer().as_default(): summary_ops.initialize(graph=graph) six.assertCountEqual(self, [name], get_all(self.db, 'SELECT node_name FROM Nodes')) def testScalarSummary(self): """Test record_summaries_every_n_global_steps and all_summaries().""" with ops.Graph().as_default(), self.cached_session() as sess: global_step = training_util.get_or_create_global_step() global_step.initializer.run() with ops.device('/cpu:0'): step_increment = state_ops.assign_add(global_step, 1) sess.run(step_increment) # Increment global step from 0 to 1 logdir = tempfile.mkdtemp() with summary_ops.create_file_writer(logdir, max_queue=0, name='t2').as_default(): with summary_ops.record_summaries_every_n_global_steps(2): summary_ops.initialize() summary_op = summary_ops.scalar('my_scalar', 2.0) # Neither of these should produce a summary because # global_step is 1 and "1 % 2 != 0" sess.run(summary_ops.all_summary_ops()) sess.run(summary_op) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 1) # Increment global step from 1 to 2 and check that the summary # is now written sess.run(step_increment) sess.run(summary_ops.all_summary_ops()) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'my_scalar') def testScalarSummaryNameScope(self): """Test record_summaries_every_n_global_steps and all_summaries().""" with ops.Graph().as_default(), self.cached_session() as sess: global_step = training_util.get_or_create_global_step() global_step.initializer.run() with ops.device('/cpu:0'): step_increment = state_ops.assign_add(global_step, 1) sess.run(step_increment) # Increment global step from 0 to 1 logdir = tempfile.mkdtemp() with summary_ops.create_file_writer(logdir, max_queue=0, name='t2').as_default(): with summary_ops.record_summaries_every_n_global_steps(2): summary_ops.initialize() with ops.name_scope('scope'): summary_op = summary_ops.scalar('my_scalar', 2.0) # Neither of these should produce a summary because # global_step is 1 and "1 % 2 != 0" sess.run(summary_ops.all_summary_ops()) sess.run(summary_op) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 1) # Increment global step from 1 to 2 and check that the summary # is now written sess.run(step_increment) sess.run(summary_ops.all_summary_ops()) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'scope/my_scalar') def testSummaryGraphModeCond(self): with ops.Graph().as_default(), self.cached_session(): training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t2').as_default(), summary_ops.always_record_summaries(): summary_ops.initialize() training_util.get_or_create_global_step().initializer.run() def f(): summary_ops.scalar('scalar', 2.0) return constant_op.constant(True) pred = array_ops.placeholder(dtypes.bool) x = control_flow_ops.cond(pred, f, lambda: constant_op.constant(False)) x.eval(feed_dict={pred: True}) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'cond/scalar') def testSummaryGraphModeWhile(self): with ops.Graph().as_default(), self.cached_session(): training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t2').as_default(), summary_ops.always_record_summaries(): summary_ops.initialize() training_util.get_or_create_global_step().initializer.run() def body(unused_pred): summary_ops.scalar('scalar', 2.0) return constant_op.constant(False) def cond(pred): return pred pred = array_ops.placeholder(dtypes.bool) x = control_flow_ops.while_loop(cond, body, [pred]) x.eval(feed_dict={pred: True}) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'while/scalar') if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/summary/summary_ops_graph_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities to test summaries.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import os import sqlite3 from tensorflow.core.util import event_pb2 from tensorflow.python.framework import test_util from tensorflow.python.lib.io import tf_record from tensorflow.python.ops import summary_ops_v2 as summary_ops from tensorflow.python.platform import gfile class SummaryDbTest(test_util.TensorFlowTestCase): """Helper for summary database testing.""" def setUp(self): super(SummaryDbTest, self).setUp() self.db_path = os.path.join(self.get_temp_dir(), 'DbTest.sqlite') if os.path.exists(self.db_path): os.unlink(self.db_path) self.db = sqlite3.connect(self.db_path) self.create_db_writer = functools.partial( summary_ops.create_db_writer, db_uri=self.db_path, experiment_name='experiment', run_name='run', user_name='user') def tearDown(self): self.db.close() super(SummaryDbTest, self).tearDown() def events_from_file(filepath): """Returns all events in a single event file. Args: filepath: Path to the event file. Returns: A list of all tf.compat.v1.Event protos in the event file. """ records = list(tf_record.tf_record_iterator(filepath)) result = [] for r in records: event = event_pb2.Event() event.ParseFromString(r) result.append(event) return result def events_from_logdir(logdir): """Returns all events in the single eventfile in logdir. Args: logdir: The directory in which the single event file is sought. Returns: A list of all tf.compat.v1.Event protos from the single event file. Raises: AssertionError: If logdir does not contain exactly one file. """ assert gfile.Exists(logdir) files = gfile.ListDirectory(logdir) assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files return events_from_file(os.path.join(logdir, files[0])) def get_one(db, q, *p): return db.execute(q, p).fetchone()[0] def get_all(db, q, *p): return unroll(db.execute(q, p).fetchall()) def unroll(list_of_tuples): return sum(list_of_tuples, ())
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/summary/summary_test_util.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile import time import unittest import sqlite3 import numpy as np import six from tensorflow.contrib.summary import summary as summary_ops from tensorflow.contrib.summary import summary_test_util from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.core.framework import types_pb2 from tensorflow.python.eager import function from tensorflow.python.eager import test from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import state_ops from tensorflow.python.platform import gfile from tensorflow.python.training import training_util get_all = summary_test_util.get_all get_one = summary_test_util.get_one _NUMPY_NUMERIC_TYPES = { types_pb2.DT_HALF: np.float16, types_pb2.DT_FLOAT: np.float32, types_pb2.DT_DOUBLE: np.float64, types_pb2.DT_INT8: np.int8, types_pb2.DT_INT16: np.int16, types_pb2.DT_INT32: np.int32, types_pb2.DT_INT64: np.int64, types_pb2.DT_UINT8: np.uint8, types_pb2.DT_UINT16: np.uint16, types_pb2.DT_UINT32: np.uint32, types_pb2.DT_UINT64: np.uint64, types_pb2.DT_COMPLEX64: np.complex64, types_pb2.DT_COMPLEX128: np.complex128, types_pb2.DT_BOOL: np.bool_, } class EagerFileTest(test_util.TensorFlowTestCase): def testShouldRecordSummary(self): self.assertFalse(summary_ops.should_record_summaries()) with summary_ops.always_record_summaries(): self.assertTrue(summary_ops.should_record_summaries()) def testSummaryOps(self): training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t0').as_default(), summary_ops.always_record_summaries(): summary_ops.generic('tensor', 1, '') summary_ops.scalar('scalar', 2.0) summary_ops.histogram('histogram', [1.0]) summary_ops.image('image', [[[[1.0]]]]) summary_ops.audio('audio', [[1.0]], 1.0, 1) # The working condition of the ops is tested in the C++ test so we just # test here that we're calling them correctly. self.assertTrue(gfile.Exists(logdir)) @test_util.assert_no_new_pyobjects_executing_eagerly def testEagerMemory(self): training_util.get_or_create_global_step() logdir = self.get_temp_dir() with summary_ops.create_file_writer( logdir, max_queue=0, name='t0').as_default(), summary_ops.always_record_summaries(): summary_ops.generic('tensor', 1, '') summary_ops.scalar('scalar', 2.0) summary_ops.histogram('histogram', [1.0]) summary_ops.image('image', [[[[1.0]]]]) summary_ops.audio('audio', [[1.0]], 1.0, 1) def testDefunSummarys(self): training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t1').as_default(), summary_ops.always_record_summaries(): @function.defun def write(): summary_ops.scalar('scalar', 2.0) write() events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].simple_value, 2.0) def testSummaryName(self): training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t2').as_default(), summary_ops.always_record_summaries(): summary_ops.scalar('scalar', 2.0) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'scalar') def testSummaryNameScope(self): training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t2').as_default(), summary_ops.always_record_summaries(): with ops.name_scope('scope'): summary_ops.scalar('scalar', 2.0) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'scope/scalar') def testSummaryGlobalStep(self): step = training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t2').as_default(), summary_ops.always_record_summaries(): summary_ops.scalar('scalar', 2.0, step=step) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'scalar') def testRecordEveryNGlobalSteps(self): step = training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() def run_step(): summary_ops.scalar('scalar', i, step=step) step.assign_add(1) with summary_ops.create_file_writer( logdir).as_default(), summary_ops.record_summaries_every_n_global_steps( 2, step): for i in range(10): run_step() # And another 10 steps as a graph function. run_step_fn = function.defun(run_step) for i in range(10): run_step_fn() events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 11) def testMaxQueue(self): logs = tempfile.mkdtemp() with summary_ops.create_file_writer( logs, max_queue=1, flush_millis=999999, name='lol').as_default(), summary_ops.always_record_summaries(): get_total = lambda: len(summary_test_util.events_from_logdir(logs)) # Note: First tf.compat.v1.Event is always file_version. self.assertEqual(1, get_total()) summary_ops.scalar('scalar', 2.0, step=1) self.assertEqual(1, get_total()) # Should flush after second summary since max_queue = 1 summary_ops.scalar('scalar', 2.0, step=2) self.assertEqual(3, get_total()) def testFlushFunction(self): logs = tempfile.mkdtemp() writer = summary_ops.create_file_writer( logs, max_queue=999999, flush_millis=999999, name='lol') with writer.as_default(), summary_ops.always_record_summaries(): get_total = lambda: len(summary_test_util.events_from_logdir(logs)) # Note: First tf.compat.v1.Event is always file_version. self.assertEqual(1, get_total()) summary_ops.scalar('scalar', 2.0, step=1) summary_ops.scalar('scalar', 2.0, step=2) self.assertEqual(1, get_total()) summary_ops.flush() self.assertEqual(3, get_total()) # Test "writer" parameter summary_ops.scalar('scalar', 2.0, step=3) summary_ops.flush(writer=writer) self.assertEqual(4, get_total()) summary_ops.scalar('scalar', 2.0, step=4) summary_ops.flush(writer=writer._resource) # pylint:disable=protected-access self.assertEqual(5, get_total()) def testSharedName(self): logdir = self.get_temp_dir() with summary_ops.always_record_summaries(): # Create with default shared name (should match logdir) writer1 = summary_ops.create_file_writer(logdir) with writer1.as_default(): summary_ops.scalar('one', 1.0, step=1) summary_ops.flush() # Create with explicit logdir shared name (should be same resource/file) shared_name = 'logdir:' + logdir writer2 = summary_ops.create_file_writer(logdir, name=shared_name) with writer2.as_default(): summary_ops.scalar('two', 2.0, step=2) summary_ops.flush() # Create with different shared name (should be separate resource/file) time.sleep(1.1) # Ensure filename has a different timestamp writer3 = summary_ops.create_file_writer(logdir, name='other') with writer3.as_default(): summary_ops.scalar('three', 3.0, step=3) summary_ops.flush() event_files = iter(sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))) # First file has tags "one" and "two" events = iter(summary_test_util.events_from_file(next(event_files))) self.assertEqual('brain.Event:2', next(events).file_version) self.assertEqual('one', next(events).summary.value[0].tag) self.assertEqual('two', next(events).summary.value[0].tag) self.assertRaises(StopIteration, lambda: next(events)) # Second file has tag "three" events = iter(summary_test_util.events_from_file(next(event_files))) self.assertEqual('brain.Event:2', next(events).file_version) self.assertEqual('three', next(events).summary.value[0].tag) self.assertRaises(StopIteration, lambda: next(events)) # No more files self.assertRaises(StopIteration, lambda: next(event_files)) def testWriterInitAndClose(self): logdir = self.get_temp_dir() get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) with summary_ops.always_record_summaries(): writer = summary_ops.create_file_writer( logdir, max_queue=100, flush_millis=1000000) self.assertEqual(1, get_total()) # file_version Event # Calling init() again while writer is open has no effect writer.init() self.assertEqual(1, get_total()) try: # Not using .as_default() to avoid implicit flush when exiting writer.set_as_default() summary_ops.scalar('one', 1.0, step=1) self.assertEqual(1, get_total()) # Calling .close() should do an implicit flush writer.close() self.assertEqual(2, get_total()) # Calling init() on a closed writer should start a new file time.sleep(1.1) # Ensure filename has a different timestamp writer.init() files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))) self.assertEqual(2, len(files)) get_total = lambda: len(summary_test_util.events_from_file(files[1])) self.assertEqual(1, get_total()) # file_version Event summary_ops.scalar('two', 2.0, step=2) writer.close() self.assertEqual(2, get_total()) finally: # Clean up by resetting default writer summary_ops.create_file_writer(None).set_as_default() def testWriterFlush(self): logdir = self.get_temp_dir() get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) with summary_ops.always_record_summaries(): writer = summary_ops.create_file_writer( logdir, max_queue=100, flush_millis=1000000) self.assertEqual(1, get_total()) # file_version Event with writer.as_default(): summary_ops.scalar('one', 1.0, step=1) self.assertEqual(1, get_total()) writer.flush() self.assertEqual(2, get_total()) summary_ops.scalar('two', 2.0, step=2) # Exiting the "as_default()" should do an implicit flush of the "two" tag self.assertEqual(3, get_total()) class EagerDbTest(summary_test_util.SummaryDbTest): # TODO(b/133791853) Re-enable these tests. @unittest.skip('Skipping because of b/133791853.') def testDbURIOpen(self): tmpdb_path = os.path.join(self.get_temp_dir(), 'tmpDbURITest.sqlite') tmpdb_uri = six.moves.urllib_parse.urljoin('file:', tmpdb_path) tmpdb_writer = summary_ops.create_db_writer(tmpdb_uri, 'experimentA', 'run1', 'user1') with summary_ops.always_record_summaries(): with tmpdb_writer.as_default(): summary_ops.scalar('t1', 2.0) tmpdb = sqlite3.connect(tmpdb_path) num = get_one(tmpdb, 'SELECT count(*) FROM Tags WHERE tag_name = "t1"') self.assertEqual(num, 1) tmpdb.close() # TODO(b/133791853) Re-enable these tests. @unittest.skip('Skipping because of b/133791853.') def testIntegerSummaries(self): step = training_util.create_global_step() writer = self.create_db_writer() def adder(x, y): state_ops.assign_add(step, 1) summary_ops.generic('x', x) summary_ops.generic('y', y) sum_ = x + y summary_ops.generic('sum', sum_) return sum_ with summary_ops.always_record_summaries(): with writer.as_default(): self.assertEqual(5, adder(int64(2), int64(3)).numpy()) six.assertCountEqual( self, [1, 1, 1], get_all(self.db, 'SELECT step FROM Tensors WHERE dtype IS NOT NULL')) six.assertCountEqual(self, ['x', 'y', 'sum'], get_all(self.db, 'SELECT tag_name FROM Tags')) x_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "x"') y_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "y"') sum_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "sum"') with summary_ops.always_record_summaries(): with writer.as_default(): self.assertEqual(9, adder(int64(4), int64(5)).numpy()) six.assertCountEqual( self, [1, 1, 1, 2, 2, 2], get_all(self.db, 'SELECT step FROM Tensors WHERE dtype IS NOT NULL')) six.assertCountEqual(self, [x_id, y_id, sum_id], get_all(self.db, 'SELECT tag_id FROM Tags')) self.assertEqual(2, get_tensor(self.db, x_id, 1)) self.assertEqual(3, get_tensor(self.db, y_id, 1)) self.assertEqual(5, get_tensor(self.db, sum_id, 1)) self.assertEqual(4, get_tensor(self.db, x_id, 2)) self.assertEqual(5, get_tensor(self.db, y_id, 2)) self.assertEqual(9, get_tensor(self.db, sum_id, 2)) six.assertCountEqual( self, ['experiment'], get_all(self.db, 'SELECT experiment_name FROM Experiments')) six.assertCountEqual(self, ['run'], get_all(self.db, 'SELECT run_name FROM Runs')) six.assertCountEqual(self, ['user'], get_all(self.db, 'SELECT user_name FROM Users')) def testBadExperimentName(self): with self.assertRaises(ValueError): self.create_db_writer(experiment_name='\0') def testBadRunName(self): with self.assertRaises(ValueError): self.create_db_writer(run_name='\0') def testBadUserName(self): with self.assertRaises(ValueError): self.create_db_writer(user_name='-hi') with self.assertRaises(ValueError): self.create_db_writer(user_name='hi-') with self.assertRaises(ValueError): self.create_db_writer(user_name='@') # TODO(b/133791853) Re-enable these tests. @unittest.skip('Skipping because of b/133791853.') def testGraphSummary(self): training_util.get_or_create_global_step() name = 'hi' graph = graph_pb2.GraphDef(node=(node_def_pb2.NodeDef(name=name),)) with summary_ops.always_record_summaries(): with self.create_db_writer().as_default(): summary_ops.graph(graph) six.assertCountEqual(self, [name], get_all(self.db, 'SELECT node_name FROM Nodes')) def get_tensor(db, tag_id, step): cursor = db.execute( 'SELECT dtype, shape, data FROM Tensors WHERE series = ? AND step = ?', (tag_id, step)) dtype, shape, data = cursor.fetchone() assert dtype in _NUMPY_NUMERIC_TYPES buf = np.frombuffer(data, dtype=_NUMPY_NUMERIC_TYPES[dtype]) if not shape: return buf[0] return buf.reshape([int(i) for i in shape.split(',')]) def int64(x): return array_ops.constant(x, dtypes.int64) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/summary/summary_ops_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests to ensure ClusterResolvers are usable via the old contrib path.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.cluster_resolver import SimpleClusterResolver from tensorflow.contrib.cluster_resolver.python.training import cluster_resolver from tensorflow.contrib.cluster_resolver.python.training import UnionClusterResolver from tensorflow.python.platform import test from tensorflow.python.training import server_lib class ClusterResolverInitializationTest(test.TestCase): def testCreateSimpleClusterResolverFromLib(self): base_cluster_spec = server_lib.ClusterSpec({ "ps": ["ps0:2222", "ps1:2222"], "worker": ["worker0:2222", "worker1:2222", "worker2:2222"] }) cluster_resolver.SimpleClusterResolver(base_cluster_spec) def testCreateSimpleClusterResolver(self): base_cluster_spec = server_lib.ClusterSpec({ "ps": ["ps0:2222", "ps1:2222"], "worker": ["worker0:2222", "worker1:2222", "worker2:2222"] }) SimpleClusterResolver(base_cluster_spec) def testCreateUnionClusterResolver(self): base_cluster_spec = server_lib.ClusterSpec({ "ps": ["ps0:2222", "ps1:2222"], "worker": ["worker0:2222", "worker1:2222", "worker2:2222"] }) simple_cr = SimpleClusterResolver(base_cluster_spec) UnionClusterResolver(simple_cr) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/cluster_resolver/cluster_resolver_initialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Standard imports for Cluster Resolvers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=wildcard-import,unused-import from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver from tensorflow.python.distribute.cluster_resolver.cluster_resolver import SimpleClusterResolver from tensorflow.python.distribute.cluster_resolver.cluster_resolver import UnionClusterResolver from tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver import GCEClusterResolver from tensorflow.python.distribute.cluster_resolver.kubernetes_cluster_resolver import KubernetesClusterResolver from tensorflow.python.distribute.cluster_resolver.slurm_cluster_resolver import SlurmClusterResolver from tensorflow.python.distribute.cluster_resolver.tfconfig_cluster_resolver import TFConfigClusterResolver from tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver import TPUClusterResolver # pylint: enable=wildcard-import,unused-import from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = [ 'ClusterResolver', 'SimpleClusterResolver', 'UnionClusterResolver', 'GCEClusterResolver', 'KubernetesClusterResolver', 'TFConfigClusterResolver', 'TPUClusterResolver', 'SlurmClusterResolver', ] remove_undocumented(__name__, _allowed_symbols)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/cluster_resolver/__init__.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Stub file for KubernetesClusterResolver for backwards compatibility.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # This file (and all files in this directory in general) is a backwards # compatibility shim that exists to re-export ClusterResolvers such that # existing OSS code will not be broken. # pylint: disable=unused-import from tensorflow.python.distribute.cluster_resolver.kubernetes_cluster_resolver import KubernetesClusterResolver # pylint: enable=unused-import from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = [ 'KubernetesClusterResolver', ] remove_undocumented(__name__, _allowed_symbols)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/cluster_resolver/python/training/kubernetes_cluster_resolver.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Stub file for SlurmClusterResolver to maintain backwards compatibility.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # This file (and all files in this directory in general) is a backwards # compatibility shim that exists to re-export ClusterResolvers such that # existing OSS code will not be broken. # pylint: disable=unused-import from tensorflow.python.distribute.cluster_resolver.slurm_cluster_resolver import SlurmClusterResolver # pylint: enable=unused-import from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = [ 'SlurmClusterResolver', ] remove_undocumented(__name__, _allowed_symbols)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/cluster_resolver/python/training/slurm_cluster_resolver.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Stub file for GCEClusterResolver to maintain backwards compatibility.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # This file (and all files in this directory in general) is a backwards # compatibility shim that exists to re-export ClusterResolvers such that # existing OSS code will not be broken. # pylint: disable=unused-import from tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver import GCEClusterResolver # pylint: enable=unused-import from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = [ 'GCEClusterResolver', ] remove_undocumented(__name__, _allowed_symbols)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/cluster_resolver/python/training/gce_cluster_resolver.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Library Imports for Cluster Resolvers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # This file (and all files in this directory in general) is a backwards # compatibility shim that exists to re-export ClusterResolvers such that # existing OSS code will not be broken. from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver from tensorflow.python.distribute.cluster_resolver.cluster_resolver import SimpleClusterResolver from tensorflow.python.distribute.cluster_resolver.cluster_resolver import UnionClusterResolver from tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver import GCEClusterResolver from tensorflow.python.distribute.cluster_resolver.kubernetes_cluster_resolver import KubernetesClusterResolver from tensorflow.python.distribute.cluster_resolver.slurm_cluster_resolver import SlurmClusterResolver from tensorflow.python.distribute.cluster_resolver.tfconfig_cluster_resolver import TFConfigClusterResolver from tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver import TPUClusterResolver from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = [ 'cluster_resolver', 'gce_cluster_resolver', 'kubernetes_cluster_resolver', 'slurm_cluster_resolver', 'tfconfig_cluster_resolver', 'tpu_cluster_resolver', 'ClusterResolver', 'SimpleClusterResolver', 'UnionClusterResolver', 'GCEClusterResolver', 'KubernetesClusterResolver', 'TFConfigClusterResolver', 'TPUClusterResolver', 'SlurmClusterResolver', ] remove_undocumented(__name__, _allowed_symbols)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/cluster_resolver/python/training/__init__.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Stub file for TFConfigClusterResolver to maintain backwards compatibility.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # This file (and all files in this directory in general) is a backwards # compatibility shim that exists to re-export ClusterResolvers such that # existing OSS code will not be broken. # pylint: disable=unused-import from tensorflow.python.distribute.cluster_resolver.tfconfig_cluster_resolver import TFConfigClusterResolver # pylint: enable=unused-import from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = [ 'TFConfigClusterResolver', ] remove_undocumented(__name__, _allowed_symbols)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/cluster_resolver/python/training/tfconfig_cluster_resolver.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Stub file for TPUClusterResolver to maintain backwards compatibility.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # This file (and all files in this directory in general) is a backwards # compatibility shim that exists to re-export ClusterResolvers such that # existing OSS code will not be broken. # pylint: disable=unused-import from tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver import TPUClusterResolver # pylint: enable=unused-import from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = [ 'TPUClusterResolver', ] remove_undocumented(__name__, _allowed_symbols)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/cluster_resolver/python/training/tpu_cluster_resolver.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Stub file for ClusterResolver to maintain backwards compatibility.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # This file (and all files in this directory in general) is a backwards # compatibility shim that exists to re-export ClusterResolvers such that # existing OSS code will not be broken. # pylint: disable=unused-import from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver from tensorflow.python.distribute.cluster_resolver.cluster_resolver import SimpleClusterResolver from tensorflow.python.distribute.cluster_resolver.cluster_resolver import UnionClusterResolver # pylint: enable=unused-import from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = [ 'ClusterResolver', 'SimpleClusterResolver', 'UnionClusterResolver', ] remove_undocumented(__name__, _allowed_symbols)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/cluster_resolver/python/training/cluster_resolver.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops and estimators that enable explicit kernel methods in TensorFlow. @@KernelLinearClassifier @@RandomFourierFeatureMapper @@sparse_multiclass_hinge_loss """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.kernel_methods.python.kernel_estimators import KernelLinearClassifier from tensorflow.contrib.kernel_methods.python.losses import sparse_multiclass_hinge_loss from tensorflow.contrib.kernel_methods.python.mappers.random_fourier_features import RandomFourierFeatureMapper from tensorflow.python.util.all_util import remove_undocumented remove_undocumented(__name__)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/kernel_methods/__init__.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of kernel-methods-related loss operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops.losses import losses def sparse_multiclass_hinge_loss( labels, logits, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS): r"""Adds Ops for computing the multiclass hinge loss. The implementation is based on the following paper: On the Algorithmic Implementation of Multiclass Kernel-based Vector Machines by Crammer and Singer. link: http://jmlr.csail.mit.edu/papers/volume2/crammer01a/crammer01a.pdf This is a generalization of standard (binary) hinge loss. For a given instance with correct label c*, the loss is given by: $$loss = max_{c != c*} logits_c - logits_{c*} + 1.$$ or equivalently $$loss = max_c { logits_c - logits_{c*} + I_{c != c*} }$$ where \\(I_{c != c*} = 1\ \text{if}\ c != c*\\) and 0 otherwise. Args: labels: `Tensor` of shape [batch_size] or [batch_size, 1]. Corresponds to the ground truth. Each entry must be an index in `[0, num_classes)`. logits: `Tensor` of shape [batch_size, num_classes] corresponding to the unscaled logits. Its dtype should be either `float32` or `float64`. weights: Optional (python) scalar or `Tensor`. If a non-scalar `Tensor`, its rank should be either 1 ([batch_size]) or 2 ([batch_size, 1]). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is a scalar. Raises: ValueError: If `logits`, `labels` or `weights` have invalid or inconsistent shapes. ValueError: If `labels` tensor has invalid dtype. """ with ops.name_scope(scope, 'sparse_multiclass_hinge_loss', (logits, labels)) as scope: # Check logits Tensor has valid rank. logits_rank = logits.get_shape().ndims if logits_rank != 2: raise ValueError( 'logits should have rank 2 ([batch_size, num_classes]). Given rank is' ' {}'.format(logits_rank)) logits_shape = array_ops.shape(logits) batch_size, num_classes = logits_shape[0], logits_shape[1] logits = math_ops.cast(logits, dtypes.float32) # Check labels have valid type. if labels.dtype != dtypes.int32 and labels.dtype != dtypes.int64: raise ValueError( 'Invalid dtype for labels: {}. Acceptable dtypes: int32 and int64'. format(labels.dtype)) # Check labels and weights have valid ranks and are consistent. labels_rank = labels.get_shape().ndims if labels_rank not in [1, 2]: raise ValueError( 'labels should have rank 1 ([batch_size]) or 2 ([batch_size, 1]). ' 'Given rank is {}'.format(labels_rank)) with ops.control_dependencies([ check_ops.assert_less(labels, math_ops.cast(num_classes, labels.dtype)) ]): labels = array_ops.reshape(labels, shape=[-1]) weights = ops.convert_to_tensor(weights) weights_rank = weights.get_shape().ndims if weights_rank not in [0, 1, 2]: raise ValueError( 'non-scalar weights should have rank 1 ([batch_size]) or 2 ' '([batch_size, 1]). Given rank is {}'.format(labels_rank)) if weights_rank > 0: weights = array_ops.reshape(weights, shape=[-1]) # Check weights and labels have the same number of elements. weights.get_shape().assert_is_compatible_with(labels.get_shape()) # Compute the logits tensor corresponding to the correct class per instance. example_indices = array_ops.reshape( math_ops.range(batch_size), shape=[batch_size, 1]) indices = array_ops.concat( [ example_indices, array_ops.reshape( math_ops.cast(labels, example_indices.dtype), shape=[batch_size, 1]) ], axis=1) label_logits = array_ops.reshape( array_ops.gather_nd(params=logits, indices=indices), shape=[batch_size, 1]) one_cold_labels = array_ops.one_hot( indices=labels, depth=num_classes, on_value=0.0, off_value=1.0) margin = logits - label_logits + one_cold_labels margin = nn_ops.relu(margin) loss = math_ops.reduce_max(margin, axis=1) return losses.compute_weighted_loss( loss, weights, scope, loss_collection, reduction=reduction)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/kernel_methods/python/losses.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Estimators that combine explicit kernel mappings with linear models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from tensorflow.contrib import layers from tensorflow.contrib.kernel_methods.python.mappers import dense_kernel_mapper as dkm from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators import head as head_lib from tensorflow.contrib.learn.python.learn.estimators import linear from tensorflow.contrib.learn.python.learn.estimators import prediction_key from tensorflow.python.ops import array_ops from tensorflow.python.platform import tf_logging as logging _FEATURE_COLUMNS = "feature_columns" _KERNEL_MAPPERS = "kernel_mappers" _OPTIMIZER = "optimizer" def _check_valid_kernel_mappers(kernel_mappers): """Checks that the input kernel_mappers are valid.""" if kernel_mappers is None: return True for kernel_mappers_list in six.itervalues(kernel_mappers): for kernel_mapper in kernel_mappers_list: if not isinstance(kernel_mapper, dkm.DenseKernelMapper): return False return True def _check_valid_head(head): """Returns true if the provided head is supported.""" if head is None: return False # pylint: disable=protected-access return isinstance(head, head_lib._BinaryLogisticHead) or isinstance( head, head_lib._MultiClassHead) # pylint: enable=protected-access def _update_features_and_columns(features, feature_columns, kernel_mappers_dict): """Updates features and feature_columns based on provided kernel mappers. Currently supports the update of `RealValuedColumn`s only. Args: features: Initial features dict. The key is a `string` (feature column name) and the value is a tensor. feature_columns: Initial iterable containing all the feature columns to be consumed (possibly after being updated) by the model. All items should be instances of classes derived from `FeatureColumn`. kernel_mappers_dict: A dict from feature column (type: _FeatureColumn) to objects inheriting from KernelMapper class. Returns: updated features and feature_columns based on provided kernel_mappers_dict. """ if kernel_mappers_dict is None: return features, feature_columns # First construct new columns and features affected by kernel_mappers_dict. mapped_features = {} mapped_columns = set() for feature_column in kernel_mappers_dict: column_name = feature_column.name # Currently only mappings over RealValuedColumns are supported. if not isinstance(feature_column, layers.feature_column._RealValuedColumn): # pylint: disable=protected-access logging.warning( "Updates are currently supported on RealValuedColumns only. Metadata " "for FeatureColumn {} will not be updated.".format(column_name)) continue mapped_column_name = column_name + "_MAPPED" # Construct new feature columns based on provided kernel_mappers. column_kernel_mappers = kernel_mappers_dict[feature_column] new_dim = sum(mapper.output_dim for mapper in column_kernel_mappers) mapped_columns.add( layers.feature_column.real_valued_column(mapped_column_name, new_dim)) # Get mapped features by concatenating mapped tensors (one mapped tensor # per kernel mappers from the list of kernel mappers corresponding to each # feature column). output_tensors = [] for kernel_mapper in column_kernel_mappers: output_tensors.append(kernel_mapper.map(features[column_name])) tensor = array_ops.concat(output_tensors, 1) mapped_features[mapped_column_name] = tensor # Finally update features dict and feature_columns. features = features.copy() features.update(mapped_features) feature_columns = set(feature_columns) feature_columns.update(mapped_columns) return features, feature_columns def _kernel_model_fn(features, labels, mode, params, config=None): """model_fn for the Estimator using kernel methods. Args: features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`). labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64` in the range `[0, n_classes)`. mode: Defines whether this is training, evaluation or prediction. See `ModeKeys`. params: A dict of hyperparameters. The following hyperparameters are expected: * head: A `Head` instance. * feature_columns: An iterable containing all the feature columns used by the model. * optimizer: string, `Optimizer` object, or callable that defines the optimizer to use for training. If `None`, will use a FTRL optimizer. * kernel_mappers: Dictionary of kernel mappers to be applied to the input features before training. config: `RunConfig` object to configure the runtime settings. Returns: A `ModelFnOps` instance. Raises: ValueError: If mode is not any of the `ModeKeys`. """ feature_columns = params[_FEATURE_COLUMNS] kernel_mappers = params[_KERNEL_MAPPERS] updated_features, updated_columns = _update_features_and_columns( features, feature_columns, kernel_mappers) params[_FEATURE_COLUMNS] = updated_columns return linear._linear_model_fn( # pylint: disable=protected-access updated_features, labels, mode, params, config) class _KernelEstimator(estimator.Estimator): """Generic kernel-based linear estimator.""" def __init__(self, feature_columns=None, model_dir=None, weight_column_name=None, head=None, optimizer=None, kernel_mappers=None, config=None): """Constructs a `_KernelEstimator` object.""" if not feature_columns and not kernel_mappers: raise ValueError( "You should set at least one of feature_columns, kernel_mappers.") if not _check_valid_kernel_mappers(kernel_mappers): raise ValueError("Invalid kernel mappers.") if not _check_valid_head(head): raise ValueError( "head type: {} is not supported. Supported head types: " "_BinaryLogisticHead, _MultiClassHead.".format(type(head))) params = { "head": head, _FEATURE_COLUMNS: feature_columns or [], _OPTIMIZER: optimizer, _KERNEL_MAPPERS: kernel_mappers, } super(_KernelEstimator, self).__init__( model_fn=_kernel_model_fn, model_dir=model_dir, config=config, params=params) class KernelLinearClassifier(_KernelEstimator): """Linear classifier using kernel methods as feature preprocessing. It trains a linear model after possibly mapping initial input features into a mapped space using explicit kernel mappings. Due to the kernel mappings, training a linear classifier in the mapped (output) space can detect non-linearities in the input space. The user can provide a list of kernel mappers to be applied to all or a subset of existing feature_columns. This way, the user can effectively provide 2 types of feature columns: * those passed as elements of feature_columns in the classifier's constructor * those appearing as a key of the kernel_mappers dict. If a column appears in feature_columns only, no mapping is applied to it. If it appears as a key in kernel_mappers, the corresponding kernel mappers are applied to it. Note that it is possible that a column appears in both places. Currently kernel_mappers are supported for _RealValuedColumns only. Example usage: ``` real_column_a = real_valued_column(name='real_column_a',...) sparse_column_b = sparse_column_with_hash_bucket(...) kernel_mappers = {real_column_a : [RandomFourierFeatureMapper(...)]} optimizer = ... # real_column_a is used as a feature in both its initial and its transformed # (mapped) form. sparse_column_b is not affected by kernel mappers. kernel_classifier = KernelLinearClassifier( feature_columns=[real_column_a, sparse_column_b], model_dir=..., optimizer=optimizer, kernel_mappers=kernel_mappers) # real_column_a is used as a feature in its transformed (mapped) form only. # sparse_column_b is not affected by kernel mappers. kernel_classifier = KernelLinearClassifier( feature_columns=[sparse_column_b], model_dir=..., optimizer=optimizer, kernel_mappers=kernel_mappers) # Input builders def train_input_fn: # returns x, y ... def eval_input_fn: # returns x, y ... kernel_classifier.fit(input_fn=train_input_fn) kernel_classifier.evaluate(input_fn=eval_input_fn) kernel_classifier.predict(...) ``` Input of `fit` and `evaluate` should have following features, otherwise there will be a `KeyError`: * if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. * for each `column` in `feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `WeightedSparseColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `RealValuedColumn`, a feature with `key=column.name` whose `value` is a `Tensor`. """ def __init__(self, feature_columns=None, model_dir=None, n_classes=2, weight_column_name=None, optimizer=None, kernel_mappers=None, config=None): """Construct a `KernelLinearClassifier` estimator object. Args: feature_columns: An iterable containing all the feature columns used by the model. All items in the set should be instances of classes derived from `FeatureColumn`. model_dir: Directory to save model parameters, graph etc. This can also be used to load checkpoints from the directory into an estimator to continue training a previously saved model. n_classes: number of label classes. Default is binary classification. Note that class labels are integers representing the class index (i.e. values from 0 to n_classes-1). For arbitrary label values (e.g. string labels), convert to class indices first. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. optimizer: The optimizer used to train the model. If specified, it should be an instance of `tf.Optimizer`. If `None`, the Ftrl optimizer is used by default. kernel_mappers: Dictionary of kernel mappers to be applied to the input features before training a (linear) model. Keys are feature columns and values are lists of mappers to be applied to the corresponding feature column. Currently only _RealValuedColumns are supported and therefore all mappers should conform to the `DenseKernelMapper` interface (see ./mappers/dense_kernel_mapper.py). config: `RunConfig` object to configure the runtime settings. Returns: A `KernelLinearClassifier` estimator. Raises: ValueError: if n_classes < 2. ValueError: if neither feature_columns nor kernel_mappers are provided. ValueError: if mappers provided as kernel_mappers values are invalid. """ super(KernelLinearClassifier, self).__init__( feature_columns=feature_columns, model_dir=model_dir, weight_column_name=weight_column_name, head=head_lib.multi_class_head( n_classes=n_classes, weight_column_name=weight_column_name), optimizer=optimizer, kernel_mappers=kernel_mappers, config=config) def predict_classes(self, input_fn=None): """Runs inference to determine the predicted class per instance. Args: input_fn: The input function providing features. Returns: A generator of predicted classes for the features provided by input_fn. Each predicted class is represented by its class index (i.e. integer from 0 to n_classes-1) """ key = prediction_key.PredictionKey.CLASSES predictions = super(KernelLinearClassifier, self).predict( input_fn=input_fn, outputs=[key]) return (pred[key] for pred in predictions) def predict_proba(self, input_fn=None): """Runs inference to determine the class probability predictions. Args: input_fn: The input function providing features. Returns: A generator of predicted class probabilities for the features provided by input_fn. """ key = prediction_key.PredictionKey.PROBABILITIES predictions = super(KernelLinearClassifier, self).predict( input_fn=input_fn, outputs=[key]) return (pred[key] for pred in predictions)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/kernel_methods/python/kernel_estimators.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for kernel_estimators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib import layers from tensorflow.contrib.kernel_methods.python import kernel_estimators from tensorflow.contrib.kernel_methods.python.mappers.random_fourier_features import RandomFourierFeatureMapper from tensorflow.contrib.learn.python.learn.estimators import test_data from tensorflow.python.framework import constant_op from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework.test_util import TensorFlowTestCase from tensorflow.python.platform import googletest def _linearly_separable_binary_input_fn(): """Returns linearly-separable data points (binary classification).""" return { 'feature1': constant_op.constant([[0.0], [1.0], [3.0]]), 'feature2': constant_op.constant([[1.0], [-1.2], [1.0]]), }, constant_op.constant([[1], [0], [1]]) def _linearly_inseparable_binary_input_fn(): """Returns non-linearly-separable data points (binary classification).""" return { 'multi_dim_feature': constant_op.constant([[1.0, 1.0], [1.0, -1.0], [-1.0, -1.0], [-1.0, 1.0]]), }, constant_op.constant([[1], [0], [1], [0]]) class KernelLinearClassifierTest(TensorFlowTestCase): def testNoFeatureColumnsOrKernelMappers(self): """Tests that at least one of feature columns or kernels is provided.""" with self.assertRaises(ValueError): _ = kernel_estimators.KernelLinearClassifier() def testInvalidKernelMapper(self): """ValueError raised when the kernel mappers provided have invalid type.""" class DummyKernelMapper(object): def __init__(self): pass feature = layers.real_valued_column('feature') kernel_mappers = {feature: [DummyKernelMapper()]} with self.assertRaises(ValueError): _ = kernel_estimators.KernelLinearClassifier( feature_columns=[feature], kernel_mappers=kernel_mappers) def testInvalidNumberOfClasses(self): """ValueError raised when the kernel mappers provided have invalid type.""" feature = layers.real_valued_column('feature') with self.assertRaises(ValueError): _ = kernel_estimators.KernelLinearClassifier( feature_columns=[feature], n_classes=1) def testLinearlySeparableBinaryDataNoKernels(self): """Tests classifier w/o kernels (log. regression) for lin-separable data.""" feature1 = layers.real_valued_column('feature1') feature2 = layers.real_valued_column('feature2') logreg_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[feature1, feature2]) logreg_classifier.fit( input_fn=_linearly_separable_binary_input_fn, steps=100) metrics = logreg_classifier.evaluate( input_fn=_linearly_separable_binary_input_fn, steps=1) # Since the data is linearly separable, the classifier should have small # loss and perfect accuracy. self.assertLess(metrics['loss'], 0.1) self.assertEqual(metrics['accuracy'], 1.0) # As a result, it should assign higher probability to class 1 for the 1st # and 3rd example and higher probability to class 0 for the second example. logreg_prob_predictions = list( logreg_classifier.predict_proba(input_fn= _linearly_separable_binary_input_fn)) self.assertGreater(logreg_prob_predictions[0][1], 0.5) self.assertGreater(logreg_prob_predictions[1][0], 0.5) self.assertGreater(logreg_prob_predictions[2][1], 0.5) def testLinearlyInseparableBinaryDataWithAndWithoutKernels(self): """Tests classifier w/ and w/o kernels on non-linearly-separable data.""" multi_dim_feature = layers.real_valued_column( 'multi_dim_feature', dimension=2) # Data points are non-linearly separable so there will be at least one # mis-classified sample (accuracy < 0.8). In fact, the loss is minimized for # w1=w2=0.0, in which case each example incurs a loss of ln(2). The overall # (average) loss should then be ln(2) and the logits should be approximately # 0.0 for each sample. logreg_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[multi_dim_feature]) logreg_classifier.fit( input_fn=_linearly_inseparable_binary_input_fn, steps=50) logreg_metrics = logreg_classifier.evaluate( input_fn=_linearly_inseparable_binary_input_fn, steps=1) logreg_loss = logreg_metrics['loss'] logreg_accuracy = logreg_metrics['accuracy'] logreg_predictions = logreg_classifier.predict( input_fn=_linearly_inseparable_binary_input_fn, as_iterable=False) self.assertAlmostEqual(logreg_loss, np.log(2), places=3) self.assertLess(logreg_accuracy, 0.8) self.assertAllClose(logreg_predictions['logits'], [[0.0], [0.0], [0.0], [0.0]]) # Using kernel mappers allows to discover non-linearities in data. Mapping # the data to a higher dimensional feature space using approx RBF kernels, # substantially reduces the loss and leads to perfect classification # accuracy. kernel_mappers = { multi_dim_feature: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')] } kernelized_logreg_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[], kernel_mappers=kernel_mappers) kernelized_logreg_classifier.fit( input_fn=_linearly_inseparable_binary_input_fn, steps=50) kernelized_logreg_metrics = kernelized_logreg_classifier.evaluate( input_fn=_linearly_inseparable_binary_input_fn, steps=1) kernelized_logreg_loss = kernelized_logreg_metrics['loss'] kernelized_logreg_accuracy = kernelized_logreg_metrics['accuracy'] self.assertLess(kernelized_logreg_loss, 0.2) self.assertEqual(kernelized_logreg_accuracy, 1.0) def testVariablesWithAndWithoutKernels(self): """Tests variables w/ and w/o kernel.""" multi_dim_feature = layers.real_valued_column( 'multi_dim_feature', dimension=2) linear_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[multi_dim_feature]) linear_classifier.fit( input_fn=_linearly_inseparable_binary_input_fn, steps=50) linear_variables = linear_classifier.get_variable_names() self.assertIn('linear/multi_dim_feature/weight', linear_variables) self.assertIn('linear/bias_weight', linear_variables) linear_weights = linear_classifier.get_variable_value( 'linear/multi_dim_feature/weight') linear_bias = linear_classifier.get_variable_value('linear/bias_weight') kernel_mappers = { multi_dim_feature: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')] } kernel_linear_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[], kernel_mappers=kernel_mappers) kernel_linear_classifier.fit( input_fn=_linearly_inseparable_binary_input_fn, steps=50) kernel_linear_variables = kernel_linear_classifier.get_variable_names() self.assertIn('linear/multi_dim_feature_MAPPED/weight', kernel_linear_variables) self.assertIn('linear/bias_weight', kernel_linear_variables) kernel_linear_weights = kernel_linear_classifier.get_variable_value( 'linear/multi_dim_feature_MAPPED/weight') kernel_linear_bias = kernel_linear_classifier.get_variable_value( 'linear/bias_weight') # The feature column used for linear classification (no kernels) has # dimension 2 so the model will learn a 2-dimension weights vector (and a # scalar for the bias). In the kernelized model, the features are mapped to # a 30-dimensional feature space and so the weights variable will also have # dimension 30. self.assertEqual(2, len(linear_weights)) self.assertEqual(1, len(linear_bias)) self.assertEqual(30, len(kernel_linear_weights)) self.assertEqual(1, len(kernel_linear_bias)) def testClassifierWithAndWithoutKernelsNoRealValuedColumns(self): """Tests kernels have no effect for non-real valued columns .""" def input_fn(): return { 'price': constant_op.constant([[0.4], [0.6], [0.3]]), 'country': sparse_tensor.SparseTensor( values=['IT', 'US', 'GB'], indices=[[0, 0], [1, 3], [2, 1]], dense_shape=[3, 5]), }, constant_op.constant([[1], [0], [1]]) price = layers.real_valued_column('price') country = layers.sparse_column_with_hash_bucket( 'country', hash_bucket_size=5) linear_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[price, country]) linear_classifier.fit(input_fn=input_fn, steps=100) linear_metrics = linear_classifier.evaluate(input_fn=input_fn, steps=1) linear_loss = linear_metrics['loss'] linear_accuracy = linear_metrics['accuracy'] kernel_mappers = { country: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')] } kernel_linear_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[price, country], kernel_mappers=kernel_mappers) kernel_linear_classifier.fit(input_fn=input_fn, steps=100) kernel_linear_metrics = kernel_linear_classifier.evaluate( input_fn=input_fn, steps=1) kernel_linear_loss = kernel_linear_metrics['loss'] kernel_linear_accuracy = kernel_linear_metrics['accuracy'] # The kernel mapping is applied to a non-real-valued feature column and so # it should have no effect on the model. The loss and accuracy of the # "kernelized" model should match the loss and accuracy of the initial model # (without kernels). self.assertAlmostEqual(linear_loss, kernel_linear_loss, delta=0.01) self.assertAlmostEqual(linear_accuracy, kernel_linear_accuracy, delta=0.01) def testMulticlassDataWithAndWithoutKernels(self): """Tests classifier w/ and w/o kernels on multiclass data.""" feature_column = layers.real_valued_column('feature', dimension=4) # Metrics for linear classifier (no kernels). linear_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[feature_column], n_classes=3) linear_classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=50) linear_metrics = linear_classifier.evaluate( input_fn=test_data.iris_input_multiclass_fn, steps=1) linear_loss = linear_metrics['loss'] linear_accuracy = linear_metrics['accuracy'] # Using kernel mappers allows to discover non-linearities in data (via RBF # kernel approximation), reduces loss and increases accuracy. kernel_mappers = { feature_column: [ RandomFourierFeatureMapper( input_dim=4, output_dim=50, stddev=1.0, name='rffm') ] } kernel_linear_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[], n_classes=3, kernel_mappers=kernel_mappers) kernel_linear_classifier.fit( input_fn=test_data.iris_input_multiclass_fn, steps=50) kernel_linear_metrics = kernel_linear_classifier.evaluate( input_fn=test_data.iris_input_multiclass_fn, steps=1) kernel_linear_loss = kernel_linear_metrics['loss'] kernel_linear_accuracy = kernel_linear_metrics['accuracy'] self.assertLess(kernel_linear_loss, linear_loss) self.assertGreater(kernel_linear_accuracy, linear_accuracy) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/kernel_methods/python/kernel_estimators_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for third_party.tensorflow.contrib.kernel_methods.python.losses.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.kernel_methods.python import losses from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class SparseMulticlassHingeLossTest(test.TestCase): def testInvalidLogitsShape(self): """An error is raised when logits have invalid shape.""" with self.cached_session(): logits = constant_op.constant([-1.0, 2.1], shape=(2,)) labels = constant_op.constant([0, 1]) with self.assertRaises(ValueError): _ = losses.sparse_multiclass_hinge_loss(labels, logits) def testInvalidLabelsShape(self): """An error is raised when labels have invalid shape.""" with self.cached_session(): logits = constant_op.constant([-1.0, 2.1], shape=(2, 1)) labels = constant_op.constant([1, 0], shape=(1, 1, 2)) with self.assertRaises(ValueError): _ = losses.sparse_multiclass_hinge_loss(labels, logits) def testInvalidWeightsShape(self): """An error is raised when weights have invalid shape.""" with self.cached_session(): logits = constant_op.constant([-1.0, 2.1], shape=(2, 1)) labels = constant_op.constant([1, 0], shape=(2,)) weights = constant_op.constant([1.5, 0.2], shape=(2, 1, 1)) with self.assertRaises(ValueError): _ = losses.sparse_multiclass_hinge_loss(labels, logits, weights) def testInvalidLabelsDtype(self): """An error is raised when labels have invalid shape.""" with self.cached_session(): logits = constant_op.constant([-1.0, 2.1], shape=(2, 1)) labels = constant_op.constant([1, 0], dtype=dtypes.float32) with self.assertRaises(ValueError): _ = losses.sparse_multiclass_hinge_loss(labels, logits) def testNoneWeightRaisesValueError(self): """An error is raised when weights are None.""" with self.cached_session(): logits = constant_op.constant([-1.0, 2.1], shape=(2, 1)) labels = constant_op.constant([1, 0]) with self.assertRaises(ValueError): _ = losses.sparse_multiclass_hinge_loss(labels, logits, weights=None) def testInconsistentLabelsAndWeightsShapesSameRank(self): """Error raised when weights and labels have same ranks, different sizes.""" with self.cached_session(): logits = constant_op.constant([-1.0, 2.1, 4.1], shape=(3, 1)) labels = constant_op.constant([1, 0, 2], shape=(3, 1)) weights = constant_op.constant([1.1, 2.0], shape=(2, 1)) with self.assertRaises(ValueError): _ = losses.sparse_multiclass_hinge_loss(labels, logits, weights) def testInconsistentLabelsAndWeightsShapesDifferentRank(self): """Error raised when weights and labels have different ranks and sizes.""" with self.cached_session(): logits = constant_op.constant([-1.0, 2.1], shape=(2, 1)) labels = constant_op.constant([1, 0], shape=(2, 1)) weights = constant_op.constant([1.1, 2.0, 2.8], shape=(3,)) with self.assertRaises(ValueError): _ = losses.sparse_multiclass_hinge_loss(labels, logits, weights) def testOutOfRangeLabels(self): """An error is raised when labels are not in [0, num_classes).""" with self.cached_session(): logits = constant_op.constant([[1.2, -1.4, -1.0], [1.4, 1.8, 4.0], [0.5, 1.8, -1.0]]) labels = constant_op.constant([1, 0, 4]) loss = losses.sparse_multiclass_hinge_loss(labels, logits) with self.assertRaises(errors.InvalidArgumentError): loss.eval() def testZeroLossInt32Labels(self): """Loss is 0 if true class logits sufficiently higher than other classes.""" with self.cached_session(): logits = constant_op.constant([[1.2, -1.4, -1.0], [1.4, 1.8, 4.0], [0.5, 1.8, -1.0]]) labels = constant_op.constant([0, 2, 1], dtype=dtypes.int32) loss = losses.sparse_multiclass_hinge_loss(labels, logits) self.assertAlmostEqual(loss.eval(), 0.0, 3) def testZeroLossInt64Labels(self): """Loss is 0 if true class logits sufficiently higher than other classes.""" with self.cached_session(): logits = constant_op.constant([[2.1, -0.4, -1.0], [1.4, 2.8, 4.0], [-0.5, 0.8, -1.0]]) labels = constant_op.constant([0, 2, 1], dtype=dtypes.int64) loss = losses.sparse_multiclass_hinge_loss(labels, logits) self.assertAlmostEqual(loss.eval(), 0.0, 3) def testUnknownShape(self): """Result keeps same with `testZeroLossInt32Labels`""" logits_np = np.array([[1.2, -1.4, -1.0], [1.4, 1.8, 4.0], [0.5, 1.8, -1.0]]) labels_np = np.array([0, 2, 1], dtype=np.int32) logits_shapes = [ [3, 3], # batch_size, num_classes [None, 3], [3, None], [None, None] ] for batch_size, num_classes in logits_shapes: with self.cached_session(): logits = array_ops.placeholder( dtypes.float32, shape=(batch_size, num_classes)) labels = array_ops.placeholder(dtypes.int32, shape=(batch_size,)) loss = losses.sparse_multiclass_hinge_loss(labels, logits) result = loss.eval(feed_dict={logits: logits_np, labels: labels_np}) self.assertAlmostEqual(result, 0.0, 3) def testCorrectPredictionsSomeClassesInsideMargin(self): """Loss is > 0 even if true class logits are higher than other classes.""" with self.cached_session(): logits = constant_op.constant([[1.2, -1.4, 0.8], [1.4, 1.8, 4.0], [1.5, 1.8, -1.0]]) labels = constant_op.constant([0, 2, 1]) loss = losses.sparse_multiclass_hinge_loss(labels, logits) # The first and third samples incur some loss (0.6 and 0.7 respectively). self.assertAlmostEqual(loss.eval(), 0.4333, 3) def testIncorrectPredictions(self): """Loss is >0 when an incorrect class has higher logits than true class.""" with self.cached_session(): logits = constant_op.constant([[2.6, 0.4, 0.8], [1.4, 0.8, -1.0], [0.5, -1.8, 2.0]]) labels = constant_op.constant([1, 0, 2]) loss = losses.sparse_multiclass_hinge_loss(labels, logits) # The first examples incurs a high loss (3.2) since the logits of an # incorrect class (0) are higher than the logits of the ground truth. The # second example also incures a (smaller) loss (0.4). self.assertAlmostEqual(loss.eval(), 1.2, 3) def testIncorrectPredictionsColumnLabels(self): """Same as above but labels is a rank-2 tensor.""" with self.cached_session(): logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0], [0.2, -1.8, 4.0]]) labels = constant_op.constant([1, 0, 2], shape=(3, 1)) loss = losses.sparse_multiclass_hinge_loss(labels, logits) # The first examples incurs a high loss (3.0) since the logits of an # incorrect class (0) are higher than the logits of the ground truth. The # second example also incures a (smaller) loss (0.3). self.assertAlmostEqual(loss.eval(), 1.1, 3) def testIncorrectPredictionsZeroWeights(self): """Loss is 0 when all weights are missing even if predictions are wrong.""" with self.cached_session(): logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0], [0.2, -1.8, 4.0]]) labels = constant_op.constant([1, 0, 2], shape=(3, 1)) weights = constant_op.constant([0.0, 0.0, 0.0], shape=(3, 1)) loss = losses.sparse_multiclass_hinge_loss(labels, logits, weights) # No overall loss since all weights are 0. self.assertAlmostEqual(loss.eval(), 0.0, 3) def testNonZeroLossWithPythonScalarWeights(self): """Weighted loss is correctly computed when weights is a python scalar.""" with self.cached_session(): logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0], [0.2, -1.8, 4.0]]) labels = constant_op.constant([1, 0, 2], shape=(3, 1)) weights = 10.0 loss = losses.sparse_multiclass_hinge_loss(labels, logits, weights) self.assertAlmostEqual(loss.eval(), 11.0, 3) def testNonZeroLossWithScalarTensorWeights(self): """Weighted loss is correctly computed when weights is a rank-0 tensor.""" with self.cached_session(): logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0], [0.2, -1.8, 4.0]]) labels = constant_op.constant([1, 0, 2], shape=(3, 1)) weights = constant_op.constant(5.0) loss = losses.sparse_multiclass_hinge_loss(labels, logits, weights) self.assertAlmostEqual(loss.eval(), 5.5, 3) def testNonZeroLossWith1DTensorWeightsColumnLabels(self): """Weighted loss is correctly computed when weights is a rank-0 tensor.""" with self.cached_session(): logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0], [0.2, -1.8, 4.0]]) labels = constant_op.constant([1, 0, 2], shape=(3, 1)) weights = constant_op.constant([1.0, 0.5, 2.0], shape=(3,)) loss = losses.sparse_multiclass_hinge_loss(labels, logits, weights) # The overall loss is 1/3 *(3.0*1.0 + 0.5*0.3+ 2.0*0.0) = 1.05 self.assertAlmostEqual(loss.eval(), 1.05, 3) def testNonZeroLossWith2DTensorWeights1DLabelsSomeWeightsMissing(self): """Weighted loss is correctly computed when weights is a rank-0 tensor.""" with self.cached_session(): logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0], [0.2, -1.8, 4.0], [1.6, 1.8, -4.0]]) labels = constant_op.constant([1, 0, 2, 1]) weights = constant_op.constant([[1.0], [0.0], [2.0], [4.0]]) loss = losses.sparse_multiclass_hinge_loss(labels, logits, weights) # The overall loss is 1/3 *(3.0*1.0 + 0.0*0.3+ 2.0*0.0 + 4.0*0.8) = 6.2/3. self.assertAlmostEqual(loss.eval(), 2.06666, 3) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/kernel_methods/python/losses_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for RandomFourierFeatureMapper.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib.kernel_methods.python.mappers import dense_kernel_mapper from tensorflow.contrib.kernel_methods.python.mappers.random_fourier_features import RandomFourierFeatureMapper from tensorflow.python.framework import constant_op from tensorflow.python.framework.test_util import TensorFlowTestCase from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import random_ops from tensorflow.python.platform import googletest def _inner_product(x, y): r"""Inner product between tensors x and y. The input tensors are assumed to be in ROW representation, that is, the method returns \\(x * y^T\\). Args: x: input tensor in row format y: input tensor in row format Returns: the inner product of x, y """ return math_ops.matmul(x, y, transpose_b=True) def _compute_exact_rbf_kernel(x, y, stddev): """Computes exact RBF kernel given input tensors x and y and stddev.""" diff = math_ops.subtract(x, y) diff_squared_norm = _inner_product(diff, diff) return math_ops.exp(-diff_squared_norm / (2 * stddev * stddev)) class RandomFourierFeatureMapperTest(TensorFlowTestCase): def testInvalidInputShape(self): x = constant_op.constant([[2.0, 1.0]]) with self.cached_session(): rffm = RandomFourierFeatureMapper(3, 10) with self.assertRaisesWithPredicateMatch( dense_kernel_mapper.InvalidShapeError, r'Invalid dimension: expected 3 input features, got 2 instead.'): rffm.map(x) def testMappedShape(self): x1 = constant_op.constant([[2.0, 1.0, 0.0]]) x2 = constant_op.constant([[1.0, -1.0, 2.0], [-1.0, 10.0, 1.0], [4.0, -2.0, -1.0]]) with self.cached_session(): rffm = RandomFourierFeatureMapper(3, 10, 1.0) mapped_x1 = rffm.map(x1) mapped_x2 = rffm.map(x2) self.assertEqual([1, 10], mapped_x1.get_shape()) self.assertEqual([3, 10], mapped_x2.get_shape()) def testSameOmegaReused(self): x = constant_op.constant([[2.0, 1.0, 0.0]]) with self.cached_session(): rffm = RandomFourierFeatureMapper(3, 100) mapped_x = rffm.map(x) mapped_x_copy = rffm.map(x) # Two different evaluations of tensors output by map on the same input # are identical because the same parameters are used for the mappings. self.assertAllClose(mapped_x.eval(), mapped_x_copy.eval(), atol=0.001) def testTwoMapperObjects(self): x = constant_op.constant([[2.0, 1.0, 0.0]]) y = constant_op.constant([[1.0, -1.0, 2.0]]) stddev = 3.0 with self.cached_session(): # The mapped dimension is fairly small, so the kernel approximation is # very rough. rffm1 = RandomFourierFeatureMapper(3, 100, stddev) rffm2 = RandomFourierFeatureMapper(3, 100, stddev) mapped_x1 = rffm1.map(x) mapped_y1 = rffm1.map(y) mapped_x2 = rffm2.map(x) mapped_y2 = rffm2.map(y) approx_kernel_value1 = _inner_product(mapped_x1, mapped_y1) approx_kernel_value2 = _inner_product(mapped_x2, mapped_y2) self.assertAllClose( approx_kernel_value1.eval(), approx_kernel_value2.eval(), atol=0.01) def testBadKernelApproximation(self): x = constant_op.constant([[2.0, 1.0, 0.0]]) y = constant_op.constant([[1.0, -1.0, 2.0]]) stddev = 3.0 with self.cached_session(): # The mapped dimension is fairly small, so the kernel approximation is # very rough. rffm = RandomFourierFeatureMapper(3, 100, stddev, seed=0) mapped_x = rffm.map(x) mapped_y = rffm.map(y) exact_kernel_value = _compute_exact_rbf_kernel(x, y, stddev) approx_kernel_value = _inner_product(mapped_x, mapped_y) self.assertAllClose( exact_kernel_value.eval(), approx_kernel_value.eval(), atol=0.2) def testGoodKernelApproximationAmortized(self): # Parameters. num_points = 20 input_dim = 5 mapped_dim = 5000 stddev = 5.0 points_shape = [1, input_dim] points = [ random_ops.random_uniform(shape=points_shape, maxval=1.0) for _ in xrange(num_points) ] normalized_points = [nn.l2_normalize(point, dim=1) for point in points] total_absolute_error = 0.0 with self.cached_session(): rffm = RandomFourierFeatureMapper(input_dim, mapped_dim, stddev, seed=0) # Cache mappings so that they are not computed multiple times. cached_mappings = dict((point, rffm.map(point)) for point in normalized_points) for x in normalized_points: mapped_x = cached_mappings[x] for y in normalized_points: mapped_y = cached_mappings[y] exact_kernel_value = _compute_exact_rbf_kernel(x, y, stddev) approx_kernel_value = _inner_product(mapped_x, mapped_y) abs_error = math_ops.abs(exact_kernel_value - approx_kernel_value) total_absolute_error += abs_error self.assertAllClose( [[0.0]], total_absolute_error.eval() / (num_points * num_points), atol=0.02) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/kernel_methods/python/mappers/random_fourier_features_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Approximate kernel mapper for RBF kernel based on Random Fourier Features.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import numpy as np from tensorflow.contrib.kernel_methods.python.mappers import dense_kernel_mapper as dkm from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import math_ops # TODO(sibyl-vie3Poto,felixyu): add an option to control whether the parameters in the # kernel map are trainable. class RandomFourierFeatureMapper(dkm.DenseKernelMapper): r"""Class that implements Random Fourier Feature Mapping (RFFM) in TensorFlow. The RFFM mapping is used to approximate the Gaussian (RBF) kernel: $$(exp(-||x-y||_2^2 / (2 * \sigma^2))$$ The implementation of RFFM is based on the following paper: "Random Features for Large-Scale Kernel Machines" by Ali Rahimi and Ben Recht. (link: https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf) The mapping uses a matrix \\(\Omega \in R^{d x D}\\) and a bias vector \\(b \in R^D\\) where \\(d\\) is the input dimension (number of dense input features) and \\(D\\) is the output dimension (i.e., dimension of the feature space the input is mapped to). Each entry of \\(\Omega\\) is sampled i.i.d. from a (scaled) Gaussian distribution and each entry of \\(b\\) is sampled independently and uniformly from [0, \\(2 * \pi\\)]. For a single input feature vector \\(x \in R^d\\), its RFFM is defined as: $$\sqrt(2/D) * cos(x * \Omega + b)$$ where \\(cos\\) is the element-wise cosine function and \\(x, b\\) are represented as row vectors. The aforementioned paper shows that the linear kernel of RFFM-mapped vectors approximates the Gaussian kernel of the initial vectors. """ def __init__(self, input_dim, output_dim, stddev=1.0, seed=1, name=None): r"""Constructs a RandomFourierFeatureMapper instance. Args: input_dim: The dimension (number of features) of the tensors to be mapped. output_dim: The output dimension of the mapping. stddev: The standard deviation of the Gaussian kernel to be approximated. The error of the classifier trained using this approximation is very sensitive to this parameter. seed: An integer used to initialize the parameters (\\(\Omega\\) and \\(b\\)) of the mapper. For repeatable sequences across different invocations of the mapper object (for instance, to ensure consistent mapping both at training and eval/inference if these happen in different invocations), set this to the same integer. name: name for the mapper object. """ # TODO(sibyl-vie3Poto): Maybe infer input_dim and/or output_dim (if not explicitly # provided). input_dim can be inferred lazily, the first time map is called. # output_dim can be inferred from input_dim using heuristics on the error of # the approximation (and, by extension, the error of the classification # based on the approximation). self._input_dim = input_dim self._output_dim = output_dim self._stddev = stddev self._seed = seed self._name = name @property def name(self): """Returns a name for the `RandomFourierFeatureMapper` instance. If the name provided in the constructor is `None`, then the object's unique id is returned. Returns: A name for the `RandomFourierFeatureMapper` instance. """ return self._name or str(id(self)) @property def input_dim(self): return self._input_dim @property def output_dim(self): return self._output_dim def map(self, input_tensor): """Maps each row of input_tensor using random Fourier features. Args: input_tensor: a `Tensor` containing input features. It's shape is [batch_size, self._input_dim]. Returns: A `Tensor` of shape [batch_size, self._output_dim] containing RFFM-mapped features. Raises: InvalidShapeError: if the shape of the `input_tensor` is inconsistent with expected input dimension. """ input_tensor_shape = input_tensor.get_shape() if len(input_tensor_shape) != 2: raise dkm.InvalidShapeError( 'The shape of the tensor should be 2. Got %d instead.' % len(input_tensor_shape)) features_dim = input_tensor_shape[1] if features_dim != self._input_dim: raise dkm.InvalidShapeError( 'Invalid dimension: expected %d input features, got %d instead.' % (self._input_dim, features_dim)) # Add ops that compute (deterministically) omega_matrix and bias based on # the provided seed. # TODO(sibyl-vie3Poto): Storing the mapper's parameters (omega_matrix and bias) as # constants incurs no RPC calls to the parameter server during distributed # training. However, if the parameters grow too large (for instance if they # don't fit into memory or if they blow up the size of the GraphDef proto), # stroring them as constants is no longer an option. In this case, we should # have a heuristic to choose out of one of the following alternatives: # a) store them as variables (in the parameter server) # b) store them as worker local variables # c) generating on the fly the omega matrix at each step np.random.seed(self._seed) omega_matrix_shape = [self._input_dim, self._output_dim] bias_shape = [self._output_dim] omega_matrix = constant_op.constant( np.random.normal( scale=1.0 / self._stddev, size=omega_matrix_shape), dtype=dtypes.float32) bias = constant_op.constant( np.random.uniform( low=0.0, high=2 * np.pi, size=bias_shape), dtype=dtypes.float32) x_omega_plus_bias = math_ops.add( math_ops.matmul(input_tensor, omega_matrix), bias) return math.sqrt(2.0 / self._output_dim) * math_ops.cos(x_omega_plus_bias)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/kernel_methods/python/mappers/random_fourier_features.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """API class for dense (approximate) kernel mappers. See ./random_fourier_features.py for a concrete instantiation of this class. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six class InvalidShapeError(Exception): """Exception thrown when a tensor's shape deviates from an expected shape.""" @six.add_metaclass(abc.ABCMeta) class DenseKernelMapper(object): """Abstract class for a kernel mapper that maps dense inputs to dense outputs. This class is abstract. Users should not create instances of this class. """ @abc.abstractmethod def map(self, input_tensor): """Main Dense-Tensor-In-Dense-Tensor-Out (DTIDTO) map method. Should be implemented by subclasses. Args: input_tensor: The dense input tensor to be mapped using the (approximate) kernel mapper. """ raise NotImplementedError('map is not implemented for {}.'.format(self)) @abc.abstractproperty def name(self): """Returns the name of the kernel mapper.""" pass @abc.abstractproperty def output_dim(self): """Returns the output dimension of the mapping.""" pass
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/kernel_methods/python/mappers/dense_kernel_mapper.py
# ============================================================================= # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Custom op used by periodic_resample.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.periodic_resample.python.ops.periodic_resample_op import periodic_resample from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = ["periodic_resample"] remove_undocumented(__name__, _allowed_symbols)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/periodic_resample/__init__.py
# ============================================================================= # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Public API of periodic_resample.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/periodic_resample/python/__init__.py
# ============================================================================= # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy from tensorflow.contrib.periodic_resample import periodic_resample from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import variables from tensorflow.python.platform import googletest class PeriodicResampleTest(test_util.TensorFlowTestCase): def testPeriodicResampleBasic2D(self): input_tensor = numpy.arange(12).reshape((3, 4)) desired_shape = numpy.array([6, None]) output_tensor = input_tensor.reshape((6, 2)) with self.cached_session(): variables.global_variables_initializer().run() result = periodic_resample(input_tensor, desired_shape).eval() self.assertAllEqual(result, output_tensor) def testPeriodicResampleTruncatedBasic2D(self): input_tensor = numpy.arange(12).reshape((3, 4)) desired_shape = numpy.array([5, None]) output_tensor = input_tensor.reshape((6, 2))[:-1] with self.cached_session(): variables.global_variables_initializer().run() result = periodic_resample(input_tensor, desired_shape).eval() self.assertAllEqual(result, output_tensor) def testPeriodicResampleBasic3D(self): input_tensor = numpy.arange(2 * 2 * 4).reshape((2, 2, 4)) desired_shape = numpy.array([4, 4, None]) output_tensor = numpy.array([[[0], [2], [4], [6]], [[1], [3], [5], [7]], [[8], [10], [12], [14]], [[9], [11], [13], [15]]]) # NOTE: output_tensor != input_tensor.reshape((4, 4, -1)) with self.cached_session(): variables.global_variables_initializer().run() result = periodic_resample(input_tensor, desired_shape).eval() # input_tensor[0, 0, 0] == result[0, 0, 0] # input_tensor[0, 0, 1] == result[1, 0, 0] # input_tensor[0, 0, 2] == result[0, 1, 0] # input_tensor[0, 0, 3] == result[1, 1, 0] self.assertAllEqual(result, output_tensor) def testPeriodicResampleBasic4D(self): input_tensor = numpy.arange(2 * 2 * 2 * 8).reshape((2, 2, 2, 8)) desired_shape = numpy.array([4, 4, 4, None]) output_tensor = numpy.array( [[[[0], [4], [8], [12]], [[2], [6], [10], [14]], [[16], [20], [24], [28]], [[18], [22], [26], [30]]], [[[1], [5], [9], [13]], [[3], [7], [11], [15]], [[17], [21], [25], [29]], [[19], [23], [27], [31]]], [[[32], [36], [40], [44]], [[34], [38], [42], [46]], [[48], [52], [56], [60]], [[50], [54], [58], [62]]], [[[33], [37], [41], [45]], [[35], [39], [43], [47]], [[49], [53], [57], [61]], [[51], [55], [59], [63]]]]) # NOTE: output_tensor != input_tensor.reshape((4, 4, 4, -1)) with self.cached_session(): variables.global_variables_initializer().run() result = periodic_resample(input_tensor, desired_shape).eval() self.assertAllEqual(result, output_tensor) def testPeriodicResampleErrors(self): input_tensor = numpy.zeros(shape=[1, 2, 2, 4]) with self.cached_session(): with self.assertRaisesWithPredicateMatch( errors_impl.InvalidArgumentError, 'Dimension 3 input tensor has size 4, desired shape has size 1'): periodic_resample(input_tensor, [None, 4, 4, 1]).eval() with self.assertRaisesWithPredicateMatch( errors_impl.InvalidArgumentError, '4, to be the same as the length of the desired shape, 3'): periodic_resample(input_tensor, [None, 4, 4]).eval() def testPeriodicResampleGradient(self): desired_shape = numpy.array([4, 4, None]) result_shape = (4, 4, 1) input_shape = (2, 2, 4) with self.cached_session() as sess: x = array_ops.placeholder(dtypes.float32, shape=input_shape) output = periodic_resample(x, desired_shape) error = gradient_checker.compute_gradient_error( x, input_shape, output, result_shape) self.assertLess(error, 1e-4) def testPeriodicResampleShapeInference(self): with self.cached_session() as sess: # Case 1: output shape can be fully inferreed. x = array_ops.placeholder(dtypes.float32, shape=(2, 2, 4)) output = periodic_resample(x, [4, 4, None]) self.assertEqual(output.shape, [4, 4, 1]) # Case 2: output shape can not be inferred - report desired shape. x = array_ops.placeholder(dtypes.float32, shape=(2, 2, None)) output = periodic_resample(x, [4, 4, None]) self.assertTrue(output.shape.is_compatible_with([4, 4, None])) self.assertEqual(output.shape[2].value, None) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/periodic_resample/python/kernel_tests/periodic_resample_op_test.py
# ============================================================================= # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.contrib.periodic_resample.python.ops import gen_periodic_resample_op from tensorflow.contrib.periodic_resample.python.ops.gen_periodic_resample_op import periodic_resample, periodic_resample_op_grad from tensorflow.contrib.util import loader from tensorflow.python.framework import ops from tensorflow.python.platform import resource_loader # pylint: enable=unused-import _periodic_resample_op = loader.load_op_library( resource_loader.get_path_to_datafile('_periodic_resample_op.so')) @ops.RegisterGradient("PeriodicResample") def _periodic_resample_grad_cc(op, grad): return periodic_resample_op_grad( grad, op.inputs[0].shape, op.get_attr('shape'))
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/periodic_resample/python/ops/periodic_resample_op.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the deprecated summary ops in tf.contrib.deprecated.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.ops import array_ops from tensorflow.python.ops import logging_ops from tensorflow.python.platform import test class DeprecatedSummariesTest(test.TestCase): def testScalarSummary(self): with self.cached_session(): c = constant_op.constant(3) s = logging_ops.scalar_summary('tag', c) self.assertEqual(s.op.type, u'ScalarSummary') def testHistogramSummary(self): with self.cached_session(): c = constant_op.constant(3) s = logging_ops.histogram_summary('tag', c) self.assertEqual(s.op.type, u'HistogramSummary') def testImageSummary(self): with self.cached_session(): i = array_ops.ones((5, 4, 4, 3)) s = logging_ops.image_summary('tag', i) self.assertEqual(s.op.type, u'ImageSummary') def testAudioSummary(self): with self.cached_session(): c = constant_op.constant(3.0) s = logging_ops.audio_summary('tag', c, sample_rate=8000) self.assertEqual(s.op.type, u'AudioSummaryV2') def testMergeSummary(self): with self.cached_session(): c = constant_op.constant(3) a = logging_ops.scalar_summary('a', c) b = logging_ops.scalar_summary('b', c) s = logging_ops.merge_summary([a, b]) self.assertEqual(s.op.type, u'MergeSummary') if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/deprecated/summaries_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Non-core alias for the deprecated tf.X_summary ops. For TensorFlow 1.0, we have reorganized the TensorFlow summary ops into a submodule, and made some semantic tweaks. The first thing to note is that we moved the APIs around as follows: ```python tf.scalar_summary -> tf.compat.v1.summary.scalar tf.histogram_summary -> tf.compat.v1.summary.histogram tf.audio_summary -> tf.compat.v1.summary.audio tf.image_summary -> tf.compat.v1.summary.image tf.merge_summary -> tf.compat.v1.summary.merge tf.merge_all_summaries -> tf.compat.v1.summary.merge_all ``` We think this API is cleaner and will improve long-term discoverability and clarity of the TensorFlow API. But we also took the opportunity to make an important change to how summary "tags" work. The "tag" of a summary is the string that is associated with the output data, i.e. the key for organizing the generated protobufs. Previously, the tag was allowed to be any unique string; it had no relation to the summary op generating it, and no relation to the TensorFlow name system. This behavior made it very difficult to write reusable that would add summary ops to the graph. If you had a function to add summary ops, you would need to pass in a `tf.name_scope`, manually, to that function to create deduplicated tags. Otherwise your program would fail with a runtime error due to tag collision. The new summary APIs under `tf.summary` throw away the "tag" as an independent concept; instead, the first argument is the node name. So summary tags now automatically inherit the surrounding `tf.name_scope`, and automatically are deduplicated if there is a conflict. Now however, the only allowed characters are alphanumerics, underscores, and forward slashes. To make migration easier, the new APIs automatically convert illegal characters to underscores. Just as an example, consider the following "before" and "after" code snippets: ```python # Before def add_activation_summaries(v, scope): tf.scalar_summary("%s/fraction_of_zero" % scope, tf.nn.fraction_of_zero(v)) tf.histogram_summary("%s/activations" % scope, v) # After def add_activation_summaries(v): tf.compat.v1.summary.scalar("fraction_of_zero", tf.nn.fraction_of_zero(v)) tf.compat.v1.summary.histogram("activations", v) ``` Now, so long as the add_activation_summaries function is called from within the right `tf.name_scope`, the behavior is the same. Because this change does modify the behavior and could break tests, we can't automatically migrate usage to the new APIs. That is why we are making the old APIs temporarily available here at `tf.contrib.deprecated`. In addition to the name change described above, there are two further changes to the new summary ops: - the "max_images" argument for `tf.image_summary` was renamed to "max_outputs for `tf.compat.v1.summary.image` - `tf.scalar_summary` accepted arbitrary tensors of tags and values. But `tf.compat.v1.summary.scalar` requires a single scalar name and scalar value. In most cases, you can create `tf.compat.v1.summary.scalar` in a loop to get the same behavior As before, TensorBoard groups charts by the top-level `tf.name_scope` which may be inconvenient, for in the new summary ops, the summary will inherit that `tf.name_scope` without user control. We plan to add more grouping mechanisms to TensorBoard, so it will be possible to specify the TensorBoard group for each summary via the summary API. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.ops.logging_ops import audio_summary from tensorflow.python.ops.logging_ops import histogram_summary from tensorflow.python.ops.logging_ops import image_summary from tensorflow.python.ops.logging_ops import merge_all_summaries from tensorflow.python.ops.logging_ops import merge_summary from tensorflow.python.ops.logging_ops import scalar_summary from tensorflow.python.util.all_util import remove_undocumented # pylint: enable=unused-import,line-too-long _allowed_symbols = [ 'audio_summary', 'histogram_summary', 'image_summary', 'merge_all_summaries', 'merge_summary', 'scalar_summary' ] remove_undocumented(__name__, _allowed_symbols)
tensorflow-r1.15.5-nv23.03
tensorflow/contrib/deprecated/__init__.py
tensorflow-r1.15.5-nv23.03
tensorflow/examples/__init__.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of Estimator for Iris plant dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from sklearn import datasets from sklearn import metrics from sklearn import model_selection import tensorflow as tf X_FEATURE = 'x' # Name of the input feature. def my_model(features, labels, mode): """DNN with three hidden layers, and dropout of 0.1 probability.""" # Create three fully connected layers respectively of size 10, 20, and 10 with # each layer having a dropout probability of 0.1. net = features[X_FEATURE] for units in [10, 20, 10]: net = tf.layers.dense(net, units=units, activation=tf.nn.relu) net = tf.layers.dropout(net, rate=0.1) # Compute logits (1 per class). logits = tf.layers.dense(net, 3, activation=None) # Compute predictions. predicted_classes = tf.argmax(logits, 1) if mode == tf.estimator.ModeKeys.PREDICT: predictions = { 'class': predicted_classes, 'prob': tf.nn.softmax(logits) } return tf.estimator.EstimatorSpec(mode, predictions=predictions) # Compute loss. loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) # Create training op. if mode == tf.estimator.ModeKeys.TRAIN: optimizer = tf.train.AdagradOptimizer(learning_rate=0.1) train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step()) return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) # Compute evaluation metrics. eval_metric_ops = { 'accuracy': tf.metrics.accuracy( labels=labels, predictions=predicted_classes) } return tf.estimator.EstimatorSpec( mode, loss=loss, eval_metric_ops=eval_metric_ops) def main(unused_argv): iris = datasets.load_iris() x_train, x_test, y_train, y_test = model_selection.train_test_split( iris.data, iris.target, test_size=0.2, random_state=42) classifier = tf.estimator.Estimator(model_fn=my_model) # Train. train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True) classifier.train(input_fn=train_input_fn, steps=1000) # Predict. test_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False) predictions = classifier.predict(input_fn=test_input_fn) y_predicted = np.array(list(p['class'] for p in predictions)) y_predicted = y_predicted.reshape(np.array(y_test).shape) # Score with sklearn. score = metrics.accuracy_score(y_test, y_predicted) print('Accuracy (sklearn): {0:f}'.format(score)) # Score with tensorflow. scores = classifier.evaluate(input_fn=test_input_fn) print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy'])) if __name__ == '__main__': tf.app.run()
tensorflow-r1.15.5-nv23.03
tensorflow/examples/learn/iris_custom_model.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of DNNClassifier for Iris plant dataset, with exponential decay.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from sklearn import datasets from sklearn import metrics from sklearn import model_selection import tensorflow as tf X_FEATURE = 'x' # Name of the input feature. def my_model(features, labels, mode): """DNN with three hidden layers.""" # Create three fully connected layers respectively of size 10, 20, and 10. net = features[X_FEATURE] for units in [10, 20, 10]: net = tf.layers.dense(net, units=units, activation=tf.nn.relu) # Compute logits (1 per class). logits = tf.layers.dense(net, 3, activation=None) # Compute predictions. predicted_classes = tf.argmax(logits, 1) if mode == tf.estimator.ModeKeys.PREDICT: predictions = { 'class': predicted_classes, 'prob': tf.nn.softmax(logits) } return tf.estimator.EstimatorSpec(mode, predictions=predictions) # Compute loss. loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) # Create training op with exponentially decaying learning rate. if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.train.get_global_step() learning_rate = tf.train.exponential_decay( learning_rate=0.1, global_step=global_step, decay_steps=100, decay_rate=0.001) optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss, global_step=global_step) return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) # Compute evaluation metrics. eval_metric_ops = { 'accuracy': tf.metrics.accuracy( labels=labels, predictions=predicted_classes) } return tf.estimator.EstimatorSpec( mode, loss=loss, eval_metric_ops=eval_metric_ops) def main(unused_argv): iris = datasets.load_iris() x_train, x_test, y_train, y_test = model_selection.train_test_split( iris.data, iris.target, test_size=0.2, random_state=42) classifier = tf.estimator.Estimator(model_fn=my_model) # Train. train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True) classifier.train(input_fn=train_input_fn, steps=1000) # Predict. test_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False) predictions = classifier.predict(input_fn=test_input_fn) y_predicted = np.array(list(p['class'] for p in predictions)) y_predicted = y_predicted.reshape(np.array(y_test).shape) # Score with sklearn. score = metrics.accuracy_score(y_test, y_predicted) print('Accuracy (sklearn): {0:f}'.format(score)) # Score with tensorflow. scores = classifier.evaluate(input_fn=test_input_fn) print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy'])) if __name__ == '__main__': tf.app.run()
tensorflow-r1.15.5-nv23.03
tensorflow/examples/learn/iris_custom_decay_dnn.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Export an RNN cell in SavedModel format.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import app from absl import flags import numpy as np import tensorflow.compat.v2 as tf FLAGS = flags.FLAGS flags.DEFINE_string("export_dir", None, "Directory to export SavedModel.") def main(argv): del argv root = tf.train.Checkpoint() # Create a cell and attach to our trackable. root.rnn_cell = tf.keras.layers.LSTMCell(units=10, recurrent_initializer=None) # Wrap the rnn_cell.__call__ function and assign to next_state. root.next_state = tf.function(root.rnn_cell.__call__) # Wrap the rnn_cell.get_initial_function using a decorator and assign to an # attribute with the same name. @tf.function(input_signature=[tf.TensorSpec([None, None], tf.float32)]) def get_initial_state(tensor): return root.rnn_cell.get_initial_state(tensor, None, None) root.get_initial_state = get_initial_state # Construct an initial_state, then call next_state explicitly to trigger a # trace for serialization (we need an explicit call, because next_state has # not been annotated with an input_signature). initial_state = root.get_initial_state( tf.constant(np.random.uniform(size=[3, 10]).astype(np.float32))) root.next_state( tf.constant(np.random.uniform(size=[3, 19]).astype(np.float32)), initial_state) tf.saved_model.save(root, FLAGS.export_dir) if __name__ == "__main__": app.run(main)
tensorflow-r1.15.5-nv23.03
tensorflow/examples/saved_model/integration_tests/export_rnn_cell.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility to write SavedModel integration tests. SavedModel testing requires isolation between the process that creates and consumes it. This file helps doing that by relaunching the same binary that calls `assertCommandSucceeded` with an environment flag indicating what source file to execute. That binary must start by calling `MaybeRunScriptInstead`. This allows to wire this into existing building systems without having to depend on data dependencies. And as so allow to keep a fixed binary size and allows interop with GPU tests. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import importlib import os import subprocess import sys from absl import app import tensorflow.compat.v2 as tf from tensorflow.python.platform import tf_logging as logging class TestCase(tf.test.TestCase): """Base class to write SavedModel integration tests.""" def assertCommandSucceeded(self, script_name, **flags): """Runs an integration test script with given flags.""" run_script = sys.argv[0] if run_script.endswith(".py"): command_parts = [sys.executable, run_script] else: command_parts = [run_script] for flag_key, flag_value in flags.items(): command_parts.append("--%s=%s" % (flag_key, flag_value)) env = dict(TF2_BEHAVIOR="enabled", SCRIPT_NAME=script_name) logging.info("Running: %s with environment flags %s" % (command_parts, env)) subprocess.check_call(command_parts, env=dict(os.environ, **env)) def MaybeRunScriptInstead(): if "SCRIPT_NAME" in os.environ: # Append current path to import path and execute `SCRIPT_NAME` main. sys.path.extend([os.path.dirname(__file__)]) module_name = os.environ["SCRIPT_NAME"] retval = app.run(importlib.import_module(module_name).main) # pylint: disable=assignment-from-no-return sys.exit(retval)
tensorflow-r1.15.5-nv23.03
tensorflow/examples/saved_model/integration_tests/integration_scripts.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Deploys a SavedModel with an MNIST classifier to TFLite.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import app from absl import flags import numpy as np import tensorflow.compat.v2 as tf from tensorflow.examples.saved_model.integration_tests import mnist_util FLAGS = flags.FLAGS flags.DEFINE_string( 'saved_model_dir', None, 'Directory of the SavedModel to deploy.') flags.DEFINE_bool( 'use_fashion_mnist', False, 'Use Fashion MNIST (products) instead of the real MNIST (digits).') flags.DEFINE_bool( 'fast_test_mode', False, 'Limit amount of test data for running in unit tests.') flags.DEFINE_string( 'tflite_output_file', None, 'The filename of the .tflite model file to write (optional).') def main(argv): del argv # First convert the SavedModel in a pristine environment. converter = tf.lite.TFLiteConverter.from_saved_model(FLAGS.saved_model_dir) lite_model_content = converter.convert() # Here is how you can save it for actual deployment. if FLAGS.tflite_output_file: with open(FLAGS.tflite_output_file, 'wb') as outfile: outfile.write(lite_model_content) # For testing, the TFLite model can be executed like this. interpreter = tf.lite.Interpreter(model_content=lite_model_content) def lite_model(images): interpreter.allocate_tensors() interpreter.set_tensor(interpreter.get_input_details()[0]['index'], images) interpreter.invoke() return interpreter.get_tensor(interpreter.get_output_details()[0]['index']) # Load the SavedModel again for use as a test baseline. imported = tf.saved_model.load(FLAGS.saved_model_dir) def tf_model(images): output_dict = imported.signatures['serving_default'](tf.constant(images)) logits, = output_dict.values() # Unpack single value. return logits # Compare model outputs on the test inputs. (_, _), (x_test, _) = mnist_util.load_reshaped_data( use_fashion_mnist=FLAGS.use_fashion_mnist, fake_tiny_data=FLAGS.fast_test_mode) for i, x in enumerate(x_test): x = x[None, ...] # Make batch of size 1. y_lite = lite_model(x) y_tf = tf_model(x) # This numpy primitive uses plain `raise` and works outside tf.TestCase. # Model outputs are probabilities that sum to 1, so atol makes sense here. np.testing.assert_allclose(y_lite, y_tf, rtol=0, atol=1e-5, err_msg='Mismatch at test example %d' % i) # Test that it loads correctly with v1 load APIs as well. with tf.compat.v1.Graph().as_default(), tf.compat.v1.Session() as session: tf.compat.v1.saved_model.load( session, [tf.compat.v1.saved_model.SERVING], FLAGS.saved_model_dir) if __name__ == '__main__': app.run(main)
tensorflow-r1.15.5-nv23.03
tensorflow/examples/saved_model/integration_tests/deploy_mnist_cnn.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Exports a convolutional feature extractor for MNIST in SavedModel format. The feature extractor is a convolutional neural network plus a hidden layer that gets trained as part of an MNIST classifier and then written to a SavedModel (without the classification layer). From there, use_mnist_cnn.py picks it up for transfer learning. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import app from absl import flags import tensorflow.compat.v2 as tf from tensorflow.examples.saved_model.integration_tests import mnist_util from tensorflow.python.util import tf_decorator from tensorflow.python.util import tf_inspect FLAGS = flags.FLAGS flags.DEFINE_string( 'export_dir', None, 'Directory of exported SavedModel.') flags.DEFINE_integer( 'epochs', 10, 'Number of epochs to train.') flags.DEFINE_bool( 'use_keras_save_api', False, 'Uses tf.keras.models.save_model() on the feature extractor ' 'instead of tf.saved_model.save() on a manually wrapped version. ' 'With this, the exported model as no hparams.') flags.DEFINE_bool( 'fast_test_mode', False, 'Shortcut training for running in unit tests.') flags.DEFINE_bool( 'export_print_hparams', False, 'If true, the exported function will print its effective hparams.') def make_feature_extractor(l2_strength, dropout_rate): """Returns a Keras Model to compute a feature vector from MNIST images.""" regularizer = lambda: tf.keras.regularizers.l2(l2_strength) net = inp = tf.keras.Input(mnist_util.INPUT_SHAPE) net = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', name='conv1', kernel_regularizer=regularizer())(net) net = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', name='conv2', kernel_regularizer=regularizer())(net) net = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), name='pool1')(net) net = tf.keras.layers.Dropout(dropout_rate, name='dropout1')(net) net = tf.keras.layers.Flatten(name='flatten')(net) net = tf.keras.layers.Dense(10, activation='relu', name='dense1', kernel_regularizer=regularizer())(net) return tf.keras.Model(inputs=inp, outputs=net) def set_feature_extractor_hparams(model, dropout_rate): model.get_layer('dropout1').rate = dropout_rate def make_classifier(feature_extractor, l2_strength, dropout_rate=0.5): """Returns a Keras Model to classify MNIST using feature_extractor.""" regularizer = lambda: tf.keras.regularizers.l2(l2_strength) net = inp = tf.keras.Input(mnist_util.INPUT_SHAPE) net = feature_extractor(net) net = tf.keras.layers.Dropout(dropout_rate)(net) net = tf.keras.layers.Dense(mnist_util.NUM_CLASSES, activation='softmax', kernel_regularizer=regularizer())(net) return tf.keras.Model(inputs=inp, outputs=net) def wrap_keras_model_for_export(model, batch_input_shape, set_hparams, default_hparams): """Wraps `model` for saving and loading as SavedModel.""" # The primary input to the module is a Tensor with a batch of images. # Here we determine its spec. inputs_spec = tf.TensorSpec(shape=batch_input_shape, dtype=tf.float32) # The module also accepts certain hparams as optional Tensor inputs. # Here, we cut all the relevant slices from `default_hparams` # (and don't worry if anyone accidentally modifies it later). if default_hparams is None: default_hparams = {} hparam_keys = list(default_hparams.keys()) hparam_defaults = tuple(default_hparams.values()) hparams_spec = {name: tf.TensorSpec.from_tensor(tf.constant(value)) for name, value in default_hparams.items()} # The goal is to save a function with this argspec... argspec = tf_inspect.FullArgSpec( args=(['inputs', 'training'] + hparam_keys), defaults=((False,) + hparam_defaults), varargs=None, varkw=None, kwonlyargs=[], kwonlydefaults=None, annotations={}) # ...and this behavior: def call_fn(inputs, training, *args): if FLAGS.export_print_hparams: args = [tf.keras.backend.print_tensor(args[i], 'training=%s and %s=' % (training, hparam_keys[i])) for i in range(len(args))] kwargs = dict(zip(hparam_keys, args)) if kwargs: set_hparams(model, **kwargs) return model(inputs, training=training) # We cannot spell out `args` in def statement for call_fn, but since # tf.function uses tf_inspect, we can use tf_decorator to wrap it with # the desired argspec. def wrapped(*args, **kwargs): # TODO(arnoegw): Can we use call_fn itself? return call_fn(*args, **kwargs) traced_call_fn = tf.function( tf_decorator.make_decorator(call_fn, wrapped, decorator_argspec=argspec)) # Now we need to trigger traces for all supported combinations of the # non-Tensor-value inputs. for training in (True, False): traced_call_fn.get_concrete_function(inputs_spec, training, **hparams_spec) # Finally, we assemble the object for tf.saved_model.save(). obj = tf.train.Checkpoint() obj.__call__ = traced_call_fn obj.trainable_variables = model.trainable_variables obj.variables = model.trainable_variables + model.non_trainable_variables # Make tf.functions for the regularization terms of the loss. obj.regularization_losses = [_get_traced_loss(model, i) for i in range(len(model.losses))] return obj def _get_traced_loss(model, i): """Returns tf.function for model.losses[i] with a trace for zero args. The intended usage is [_get_traced_loss(model, i) for i in range(len(model.losses))] This is better than [tf.function(lambda: model.losses[i], input_signature=[]) for i ...] because it avoids capturing a loop index in a lambda, and removes any chance of deferring the trace. Args: model: a Keras Model. i: an integer between from 0 up to but to len(model.losses). """ f = tf.function(lambda: model.losses[i]) _ = f.get_concrete_function() return f def main(argv): del argv # Build a complete classifier model using a feature extractor. default_hparams = dict(dropout_rate=0.25) l2_strength = 0.01 # Not a hparam for inputs -> outputs. feature_extractor = make_feature_extractor(l2_strength=l2_strength, **default_hparams) classifier = make_classifier(feature_extractor, l2_strength=l2_strength) # Train the complete model. (x_train, y_train), (x_test, y_test) = mnist_util.load_reshaped_data( fake_tiny_data=FLAGS.fast_test_mode) classifier.compile(loss=tf.keras.losses.categorical_crossentropy, optimizer=tf.keras.optimizers.SGD(), metrics=['accuracy']) classifier.fit(x_train, y_train, batch_size=128, epochs=FLAGS.epochs, verbose=1, validation_data=(x_test, y_test)) # Save the feature extractor to a framework-agnostic SavedModel for reuse. # Note that the feature_extractor object has not been compiled or fitted, # so it does not contain an optimizer and related state. if FLAGS.use_keras_save_api: # Use Keras' built-in way of creating reusable SavedModels. # This has no support for adjustable hparams at this time (July 2019). # (We could also call tf.saved_model.save(feature_extractor, ...), # point is we're passing a Keras model, not a plain Checkpoint.) tf.keras.models.save_model(feature_extractor, FLAGS.export_dir) else: # Assemble a reusable SavedModel manually, with adjustable hparams. exportable = wrap_keras_model_for_export(feature_extractor, (None,) + mnist_util.INPUT_SHAPE, set_feature_extractor_hparams, default_hparams) tf.saved_model.save(exportable, FLAGS.export_dir) if __name__ == '__main__': app.run(main)
tensorflow-r1.15.5-nv23.03
tensorflow/examples/saved_model/integration_tests/export_mnist_cnn.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utils related to tf.distribute.strategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from tensorflow.python.distribute import strategy_combinations _strategies = [ strategy_combinations.one_device_strategy, strategy_combinations.mirrored_strategy_with_one_cpu, strategy_combinations.mirrored_strategy_with_one_gpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus, ] named_strategies = collections.OrderedDict( [(None, None)] + [(str(s), s) for s in _strategies]) class MaybeDistributionScope(object): """Provides a context allowing no distribution strategy.""" @staticmethod def from_name(name): return MaybeDistributionScope(named_strategies[name].strategy if name else None) def __init__(self, distribution): self._distribution = distribution self._scope = None def __enter__(self): if self._distribution: self._scope = self._distribution.scope() self._scope.__enter__() def __exit__(self, exc_type, value, traceback): if self._distribution: self._scope.__exit__(exc_type, value, traceback) self._scope = None
tensorflow-r1.15.5-nv23.03
tensorflow/examples/saved_model/integration_tests/distribution_strategy_utils.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Load and use RNN model stored as a SavedModel.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tempfile from absl import app from absl import flags import tensorflow.compat.v2 as tf FLAGS = flags.FLAGS flags.DEFINE_string("model_dir", None, "Directory to load SavedModel from.") def main(argv): del argv sentences = [ "<S> sentence <E>", "<S> second sentence <E>", "<S> third sentence<E>" ] model = tf.saved_model.load(FLAGS.model_dir) model.train(tf.constant(sentences)) decoded = model.decode_greedy( sequence_length=10, first_word=tf.constant("<S>")) _ = [d.numpy() for d in decoded] # This is testing that a model using a SavedModel can be re-exported again, # e.g. to catch issues such as b/142231881. tf.saved_model.save(model, tempfile.mkdtemp()) if __name__ == "__main__": app.run(main)
tensorflow-r1.15.5-nv23.03
tensorflow/examples/saved_model/integration_tests/use_text_rnn_model.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Load and use text embedding module in a Dataset map function.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tempfile from absl import app from absl import flags import numpy as np import tensorflow.compat.v2 as tf FLAGS = flags.FLAGS flags.DEFINE_string("model_dir", None, "Directory to load SavedModel from.") def train(): """Build a Keras model and train with mock data.""" module = tf.saved_model.load(FLAGS.model_dir) def _map_fn(features, labels): features = tf.expand_dims(features, 0) features = module(features) features = tf.squeeze(features, 0) return features, labels features = np.array(["my first sentence", "my second sentence"]) labels = np.array([1, 0]) dataset = tf.data.Dataset.from_tensor_slices((features, labels)).map(_map_fn) # Create the sequential keras model. l = tf.keras.layers model = tf.keras.Sequential() model.add(l.Dense(10, activation="relu")) model.add(l.Dense(1, activation="sigmoid")) model.compile( optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]) model.fit_generator(generator=dataset.batch(10), epochs=5) # This is testing that a model using a SavedModel can be re-exported again, # e.g. to catch issues such as b/142231881. tf.saved_model.save(model, tempfile.mkdtemp()) def main(argv): del argv train() if __name__ == "__main__": tf.enable_v2_behavior() app.run(main)
tensorflow-r1.15.5-nv23.03
tensorflow/examples/saved_model/integration_tests/use_text_embedding_in_dataset.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Load and use an RNN cell stored as a SavedModel.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tempfile from absl import app from absl import flags import numpy as np import tensorflow.compat.v2 as tf FLAGS = flags.FLAGS flags.DEFINE_string("model_dir", None, "Directory to load SavedModel from.") def main(argv): del argv cell = tf.saved_model.load(FLAGS.model_dir) initial_state = cell.get_initial_state( tf.constant(np.random.uniform(size=[3, 10]).astype(np.float32))) cell.next_state( tf.constant(np.random.uniform(size=[3, 19]).astype(np.float32)), initial_state) # This is testing that a model using a SavedModel can be re-exported again, # e.g. to catch issues such as b/142231881. tf.saved_model.save(cell, tempfile.mkdtemp()) if __name__ == "__main__": app.run(main)
tensorflow-r1.15.5-nv23.03
tensorflow/examples/saved_model/integration_tests/use_rnn_cell.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SavedModel integration tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl.testing import parameterized import tensorflow.compat.v2 as tf from tensorflow.examples.saved_model.integration_tests import distribution_strategy_utils as ds_utils from tensorflow.examples.saved_model.integration_tests import integration_scripts as scripts from tensorflow.python.distribute import combinations class SavedModelTest(scripts.TestCase, parameterized.TestCase): def __init__(self, method_name="runTest", has_extra_deps=False): super(SavedModelTest, self).__init__(method_name) self.has_extra_deps = has_extra_deps def skipIfMissingExtraDeps(self): """Skip test if it requires extra dependencies. b/132234211: The extra dependencies are not available in all environments that run the tests, e.g. "tensorflow_hub" is not available from tests within "tensorflow" alone. Those tests are instead run by another internal test target. """ if not self.has_extra_deps: self.skipTest("Missing extra dependencies") def test_text_rnn(self): export_dir = self.get_temp_dir() self.assertCommandSucceeded("export_text_rnn_model", export_dir=export_dir) self.assertCommandSucceeded("use_text_rnn_model", model_dir=export_dir) def test_rnn_cell(self): export_dir = self.get_temp_dir() self.assertCommandSucceeded("export_rnn_cell", export_dir=export_dir) self.assertCommandSucceeded("use_rnn_cell", model_dir=export_dir) def test_text_embedding_in_sequential_keras(self): self.skipIfMissingExtraDeps() export_dir = self.get_temp_dir() self.assertCommandSucceeded( "export_simple_text_embedding", export_dir=export_dir) self.assertCommandSucceeded( "use_model_in_sequential_keras", model_dir=export_dir) def test_text_embedding_in_dataset(self): export_dir = self.get_temp_dir() self.assertCommandSucceeded( "export_simple_text_embedding", export_dir=export_dir) self.assertCommandSucceeded( "use_text_embedding_in_dataset", model_dir=export_dir) TEST_MNIST_CNN_GENERATE_KWARGS = dict( combinations=( combinations.combine( # Test all combinations with tf.saved_model.save(). # Test all combinations using tf.keras.models.save_model() # for both the reusable and the final full model. use_keras_save_api=True, named_strategy=list(ds_utils.named_strategies.values()), retrain_flag_value=["true", "false"], regularization_loss_multiplier=[None, 2], # Test for b/134528831. ) + combinations.combine( # Test few critcial combinations with raw tf.saved_model.save(), # including export of a reusable SavedModel that gets assembled # manually, including support for adjustable hparams. use_keras_save_api=False, named_strategy=None, retrain_flag_value=["true", "false"], regularization_loss_multiplier=[None, 2], # Test for b/134528831. )), test_combinations=[combinations.NamedGPUCombination()]) @combinations.generate(**TEST_MNIST_CNN_GENERATE_KWARGS) def test_mnist_cnn(self, use_keras_save_api, named_strategy, retrain_flag_value, regularization_loss_multiplier): self.skipIfMissingExtraDeps() fast_test_mode = True temp_dir = self.get_temp_dir() feature_extrator_dir = os.path.join(temp_dir, "mnist_feature_extractor") full_model_dir = os.path.join(temp_dir, "full_model") self.assertCommandSucceeded( "export_mnist_cnn", fast_test_mode=fast_test_mode, export_dir=feature_extrator_dir, use_keras_save_api=use_keras_save_api) use_kwargs = dict(fast_test_mode=fast_test_mode, input_saved_model_dir=feature_extrator_dir, retrain=retrain_flag_value, output_saved_model_dir=full_model_dir, use_keras_save_api=use_keras_save_api) if named_strategy: use_kwargs["strategy"] = str(named_strategy) if regularization_loss_multiplier is not None: use_kwargs[ "regularization_loss_multiplier"] = regularization_loss_multiplier self.assertCommandSucceeded("use_mnist_cnn", **use_kwargs) self.assertCommandSucceeded( "deploy_mnist_cnn", fast_test_mode=fast_test_mode, saved_model_dir=full_model_dir) if __name__ == "__main__": scripts.MaybeRunScriptInstead() tf.test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/examples/saved_model/integration_tests/saved_model_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Text RNN model stored as a SavedModel.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import app from absl import flags import tensorflow.compat.v2 as tf FLAGS = flags.FLAGS flags.DEFINE_string("export_dir", None, "Directory to export SavedModel.") class TextRnnModel(tf.train.Checkpoint): """Text RNN model. A full generative text RNN model that can train and decode sentences from a starting word. """ def __init__(self, vocab, emb_dim, buckets, state_size): super(TextRnnModel, self).__init__() self._buckets = buckets self._lstm_cell = tf.keras.layers.LSTMCell(units=state_size) self._rnn_layer = tf.keras.layers.RNN( self._lstm_cell, return_sequences=True) self._embeddings = tf.Variable(tf.random.uniform(shape=[buckets, emb_dim])) self._logit_layer = tf.keras.layers.Dense(buckets) self._set_up_vocab(vocab) def _tokenize(self, sentences): # Perform a minimalistic text preprocessing by removing punctuation and # splitting on spaces. normalized_sentences = tf.strings.regex_replace( input=sentences, pattern=r"\pP", rewrite="") sparse_tokens = tf.strings.split(normalized_sentences, " ").to_sparse() # Deal with a corner case: there is one empty sentence. sparse_tokens, _ = tf.sparse.fill_empty_rows(sparse_tokens, tf.constant("")) # Deal with a corner case: all sentences are empty. sparse_tokens = tf.sparse.reset_shape(sparse_tokens) return (sparse_tokens.indices, sparse_tokens.values, sparse_tokens.dense_shape) def _set_up_vocab(self, vocab_tokens): # TODO(vbardiovsky): Currently there is no real vocabulary, because # saved_model serialization does not support trackable resources. Add a real # vocabulary when it does. vocab_list = ["UNK"] * self._buckets for vocab_token in vocab_tokens: index = self._words_to_indices(vocab_token).numpy() vocab_list[index] = vocab_token # This is a variable representing an inverse index. self._vocab_tensor = tf.Variable(vocab_list) def _indices_to_words(self, indices): return tf.gather(self._vocab_tensor, indices) def _words_to_indices(self, words): return tf.strings.to_hash_bucket(words, self._buckets) @tf.function(input_signature=[tf.TensorSpec([None], tf.dtypes.string)]) def train(self, sentences): token_ids, token_values, token_dense_shape = self._tokenize(sentences) tokens_sparse = tf.sparse.SparseTensor( indices=token_ids, values=token_values, dense_shape=token_dense_shape) tokens = tf.sparse.to_dense(tokens_sparse, default_value="") sparse_lookup_ids = tf.sparse.SparseTensor( indices=tokens_sparse.indices, values=self._words_to_indices(tokens_sparse.values), dense_shape=tokens_sparse.dense_shape) lookup_ids = tf.sparse.to_dense(sparse_lookup_ids, default_value=0) # Targets are the next word for each word of the sentence. tokens_ids_seq = lookup_ids[:, 0:-1] tokens_ids_target = lookup_ids[:, 1:] tokens_prefix = tokens[:, 0:-1] # Mask determining which positions we care about for a loss: all positions # that have a valid non-terminal token. mask = tf.logical_and( tf.logical_not(tf.equal(tokens_prefix, "")), tf.logical_not(tf.equal(tokens_prefix, "<E>"))) input_mask = tf.cast(mask, tf.int32) with tf.GradientTape() as t: sentence_embeddings = tf.nn.embedding_lookup(self._embeddings, tokens_ids_seq) lstm_initial_state = self._lstm_cell.get_initial_state( sentence_embeddings) lstm_output = self._rnn_layer( inputs=sentence_embeddings, initial_state=lstm_initial_state) # Stack LSTM outputs into a batch instead of a 2D array. lstm_output = tf.reshape(lstm_output, [-1, self._lstm_cell.output_size]) logits = self._logit_layer(lstm_output) targets = tf.reshape(tokens_ids_target, [-1]) weights = tf.cast(tf.reshape(input_mask, [-1]), tf.float32) losses = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=targets, logits=logits) # Final loss is the mean loss for all token losses. final_loss = tf.math.divide( tf.reduce_sum(tf.multiply(losses, weights)), tf.reduce_sum(weights), name="final_loss") watched = t.watched_variables() gradients = t.gradient(final_loss, watched) for w, g in zip(watched, gradients): w.assign_sub(g) return final_loss @tf.function def decode_greedy(self, sequence_length, first_word): initial_state = self._lstm_cell.get_initial_state( dtype=tf.float32, batch_size=1) sequence = [first_word] current_word = first_word current_id = tf.expand_dims(self._words_to_indices(current_word), 0) current_state = initial_state for _ in range(sequence_length): token_embeddings = tf.nn.embedding_lookup(self._embeddings, current_id) lstm_outputs, current_state = self._lstm_cell(token_embeddings, current_state) lstm_outputs = tf.reshape(lstm_outputs, [-1, self._lstm_cell.output_size]) logits = self._logit_layer(lstm_outputs) softmax = tf.nn.softmax(logits) next_ids = tf.math.argmax(softmax, axis=1) next_words = self._indices_to_words(next_ids)[0] current_id = next_ids current_word = next_words sequence.append(current_word) return sequence def main(argv): del argv sentences = ["<S> hello there <E>", "<S> how are you doing today <E>"] vocab = [ "<S>", "<E>", "hello", "there", "how", "are", "you", "doing", "today" ] module = TextRnnModel(vocab=vocab, emb_dim=10, buckets=100, state_size=128) for _ in range(100): _ = module.train(tf.constant(sentences)) # We have to call this function explicitly if we want it exported, because it # has no input_signature in the @tf.function decorator. decoded = module.decode_greedy( sequence_length=10, first_word=tf.constant("<S>")) _ = [d.numpy() for d in decoded] tf.saved_model.save(module, FLAGS.export_dir) if __name__ == "__main__": app.run(main)
tensorflow-r1.15.5-nv23.03
tensorflow/examples/saved_model/integration_tests/export_text_rnn_model.py