python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for computing statistics of samples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import util
from tensorflow.python.ops.signal import fft_ops
__all__ = [
"auto_correlation",
"percentile",
]
# TODO(langmore) Write separate versions of this for real/complex dtype, taking
# advantage of optimized real-fft ops.
def auto_correlation(
x,
axis=-1,
max_lags=None,
center=True,
normalize=True,
name="auto_correlation"):
"""Auto correlation along one axis.
Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation
`RXX` may be defined as (with `E` expectation and `Conj` complex conjugate)
```
RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) },
W[n] := (X[n] - MU) / S,
MU := E{ X[0] },
S**2 := E{ (X[0] - MU) Conj(X[0] - MU) }.
```
This function takes the viewpoint that `x` is (along one axis) a finite
sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an
estimate of `RXX[m]` as follows:
After extending `x` from length `L` to `inf` by zero padding, the auto
correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as
```
rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]),
w[n] := (x[n] - mu) / s,
mu := L**-1 sum_n x[n],
s**2 := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu)
```
The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users
often set `max_lags` small enough so that the entire output is meaningful.
Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by
`len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation
contains a slight bias, which goes to zero as `len(x) - m --> infinity`.
Args:
x: `float32` or `complex64` `Tensor`.
axis: Python `int`. The axis number along which to compute correlation.
Other dimensions index different batch members.
max_lags: Positive `int` tensor. The maximum value of `m` to consider
(in equation above). If `max_lags >= x.shape[axis]`, we effectively
re-set `max_lags` to `x.shape[axis] - 1`.
center: Python `bool`. If `False`, do not subtract the mean estimate `mu`
from `x[n]` when forming `w[n]`.
normalize: Python `bool`. If `False`, do not divide by the variance
estimate `s**2` when forming `w[n]`.
name: `String` name to prepend to created ops.
Returns:
`rxx`: `Tensor` of same `dtype` as `x`. `rxx.shape[i] = x.shape[i]` for
`i != axis`, and `rxx.shape[axis] = max_lags + 1`.
Raises:
TypeError: If `x` is not a supported type.
"""
# Implementation details:
# Extend length N / 2 1-D array x to length N by zero padding onto the end.
# Then, set
# F[x]_k := sum_n x_n exp{-i 2 pi k n / N }.
# It is not hard to see that
# F[x]_k Conj(F[x]_k) = F[R]_k, where
# R_m := sum_n x_n Conj(x_{(n - m) mod N}).
# One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m].
# Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT
# based version of estimating RXX.
# Note that this is a special case of the Wiener-Khinchin Theorem.
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# Rotate dimensions of x in order to put axis at the rightmost dim.
# FFT op requires this.
rank = util.prefer_static_rank(x)
if axis < 0:
axis = rank + axis
shift = rank - 1 - axis
# Suppose x.shape[axis] = T, so there are T "time" steps.
# ==> x_rotated.shape = B + [T],
# where B is x_rotated's batch shape.
x_rotated = util.rotate_transpose(x, shift)
if center:
x_rotated -= math_ops.reduce_mean(x_rotated, axis=-1, keepdims=True)
# x_len = N / 2 from above explanation. The length of x along axis.
# Get a value for x_len that works in all cases.
x_len = util.prefer_static_shape(x_rotated)[-1]
# TODO(langmore) Investigate whether this zero padding helps or hurts. At
# the moment is necessary so that all FFT implementations work.
# Zero pad to the next power of 2 greater than 2 * x_len, which equals
# 2**(ceil(Log_2(2 * x_len))). Note: Log_2(X) = Log_e(X) / Log_e(2).
x_len_float64 = math_ops.cast(x_len, np.float64)
target_length = math_ops.pow(
np.float64(2.),
math_ops.ceil(math_ops.log(x_len_float64 * 2) / np.log(2.)))
pad_length = math_ops.cast(target_length - x_len_float64, np.int32)
# We should have:
# x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length]
# = B + [T + pad_length]
x_rotated_pad = util.pad(x_rotated, axis=-1, back=True, count=pad_length)
dtype = x.dtype
if not dtype.is_complex:
if not dtype.is_floating:
raise TypeError("Argument x must have either float or complex dtype"
" found: {}".format(dtype))
x_rotated_pad = math_ops.complex(x_rotated_pad,
dtype.real_dtype.as_numpy_dtype(0.))
# Autocorrelation is IFFT of power-spectral density (up to some scaling).
fft_x_rotated_pad = fft_ops.fft(x_rotated_pad)
spectral_density = fft_x_rotated_pad * math_ops.conj(fft_x_rotated_pad)
# shifted_product is R[m] from above detailed explanation.
# It is the inner product sum_n X[n] * Conj(X[n - m]).
shifted_product = fft_ops.ifft(spectral_density)
# Cast back to real-valued if x was real to begin with.
shifted_product = math_ops.cast(shifted_product, dtype)
# Figure out if we can deduce the final static shape, and set max_lags.
# Use x_rotated as a reference, because it has the time dimension in the far
# right, and was created before we performed all sorts of crazy shape
# manipulations.
know_static_shape = True
if not x_rotated.shape.is_fully_defined():
know_static_shape = False
if max_lags is None:
max_lags = x_len - 1
else:
max_lags = ops.convert_to_tensor(max_lags, name="max_lags")
max_lags_ = tensor_util.constant_value(max_lags)
if max_lags_ is None or not know_static_shape:
know_static_shape = False
max_lags = math_ops.minimum(x_len - 1, max_lags)
else:
max_lags = min(x_len - 1, max_lags_)
# Chop off the padding.
# We allow users to provide a huge max_lags, but cut it off here.
# shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags]
shifted_product_chopped = shifted_product[..., :max_lags + 1]
# If possible, set shape.
if know_static_shape:
chopped_shape = x_rotated.shape.as_list()
chopped_shape[-1] = min(x_len, max_lags + 1)
shifted_product_chopped.set_shape(chopped_shape)
# Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]). The
# other terms were zeros arising only due to zero padding.
# `denominator = (N / 2 - m)` (defined below) is the proper term to
# divide by to make this an unbiased estimate of the expectation
# E[X[n] Conj(X[n - m])].
x_len = math_ops.cast(x_len, dtype.real_dtype)
max_lags = math_ops.cast(max_lags, dtype.real_dtype)
denominator = x_len - math_ops.range(0., max_lags + 1.)
denominator = math_ops.cast(denominator, dtype)
shifted_product_rotated = shifted_product_chopped / denominator
if normalize:
shifted_product_rotated /= shifted_product_rotated[..., :1]
# Transpose dimensions back to those of x.
return util.rotate_transpose(shifted_product_rotated, -shift)
# TODO(langmore) To make equivalent to numpy.percentile:
# Make work with a sequence of floats or single float for 'q'.
# Make work with "linear", "midpoint" interpolation. (linear should be default)
def percentile(x,
q,
axis=None,
interpolation=None,
keep_dims=False,
validate_args=False,
name=None):
"""Compute the `q`-th percentile of `x`.
Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
way from the minimum to the maximum in a sorted copy of `x`.
The values and distances of the two nearest neighbors as well as the
`interpolation` parameter will determine the percentile if the normalized
ranking does not match the location of `q` exactly.
This function is the same as the median if `q = 50`, the same as the minimum
if `q = 0` and the same as the maximum if `q = 100`.
```python
# Get 30th percentile with default ('nearest') interpolation.
x = [1., 2., 3., 4.]
percentile(x, q=30.)
==> 2.0
# Get 30th percentile with 'lower' interpolation
x = [1., 2., 3., 4.]
percentile(x, q=30., interpolation='lower')
==> 1.0
# Get 100th percentile (maximum). By default, this is computed over every dim
x = [[1., 2.]
[3., 4.]]
percentile(x, q=100.)
==> 4.0
# Treat the leading dim as indexing samples, and find the 100th quantile (max)
# over all such samples.
x = [[1., 2.]
[3., 4.]]
percentile(x, q=100., axis=[0])
==> [3., 4.]
```
Compare to `numpy.percentile`.
Args:
x: Floating point `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,
`x` must have statically known number of dimensions.
q: Scalar `Tensor` in `[0, 100]`. The percentile.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant values.
The axis that hold independent samples over which to return the desired
percentile. If `None` (the default), treat every dimension as a sample
dimension, returning a scalar.
interpolation : {"lower", "higher", "nearest"}. Default: "nearest"
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points `i < j`:
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j`, whichever is nearest.
keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1
If `False`, the last dimension is removed from the output shape.
validate_args: Whether to add runtime checks of argument validity.
If False, and arguments are incorrect, correct behavior is not guaranteed.
name: A Python string name to give this `Op`. Default is "percentile"
Returns:
A `(N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if
`axis` is `None`, a scalar.
Raises:
ValueError: If argument 'interpolation' is not an allowed type.
"""
name = name or "percentile"
allowed_interpolations = {"lower", "higher", "nearest"}
if interpolation is None:
interpolation = "nearest"
else:
if interpolation not in allowed_interpolations:
raise ValueError("Argument 'interpolation' must be in %s. Found %s" %
(allowed_interpolations, interpolation))
with ops.name_scope(name, values=[x, q]):
x = ops.convert_to_tensor(x, name="x")
# Double is needed here and below, else we get the wrong index if the array
# is huge along axis.
q = math_ops.cast(q, dtypes.float64, name="q")
_get_static_ndims(q, expect_ndims=0)
if validate_args:
q = control_flow_ops.with_dependencies([
check_ops.assert_rank(q, 0),
check_ops.assert_greater_equal(q, math_ops.cast(0., dtypes.float64)),
check_ops.assert_less_equal(q, math_ops.cast(100., dtypes.float64))
], q)
if axis is None:
y = array_ops.reshape(x, [-1])
else:
axis = ops.convert_to_tensor(axis, name="axis")
check_ops.assert_integer(axis)
axis_ndims = _get_static_ndims(
axis, expect_static=True, expect_ndims_no_more_than=1)
axis_const = tensor_util.constant_value(axis)
if axis_const is None:
raise ValueError(
"Expected argument 'axis' to be statically available. Found: %s" %
axis)
axis = axis_const
if axis_ndims == 0:
axis = [axis]
axis = [int(a) for a in axis]
x_ndims = _get_static_ndims(
x, expect_static=True, expect_ndims_at_least=1)
axis = _make_static_axis_non_negative(axis, x_ndims)
y = _move_dims_to_flat_end(x, axis, x_ndims)
frac_at_q_or_above = 1. - q / 100.
d = math_ops.cast(array_ops.shape(y)[-1], dtypes.float64)
if interpolation == "lower":
index = math_ops.ceil((d - 1) * frac_at_q_or_above)
elif interpolation == "higher":
index = math_ops.floor((d - 1) * frac_at_q_or_above)
elif interpolation == "nearest":
index = math_ops.round((d - 1) * frac_at_q_or_above)
# If d is gigantic, then we would have d == d - 1, even in double... So
# let's use max/min to avoid out of bounds errors.
d = array_ops.shape(y)[-1]
# d - 1 will be distinct from d in int32.
index = clip_ops.clip_by_value(math_ops.cast(index, dtypes.int32), 0, d - 1)
# Sort everything, not just the top 'k' entries, which allows multiple calls
# to sort only once (under the hood) and use CSE.
sorted_y = _sort_tensor(y)
# result.shape = B
result = sorted_y[..., index]
result.set_shape(y.get_shape()[:-1])
if keep_dims:
if axis is None:
# ones_vec = [1, 1,..., 1], total length = len(S) + len(B).
ones_vec = array_ops.ones(
shape=[_get_best_effort_ndims(x)], dtype=dtypes.int32)
result *= array_ops.ones(ones_vec, dtype=x.dtype)
else:
result = _insert_back_keep_dims(result, axis)
return result
def _get_static_ndims(x,
expect_static=False,
expect_ndims=None,
expect_ndims_no_more_than=None,
expect_ndims_at_least=None):
"""Get static number of dimensions and assert that some expectations are met.
This function returns the number of dimensions "ndims" of x, as a Python int.
The optional expect arguments are used to check the ndims of x, but this is
only done if the static ndims of x is not None.
Args:
x: A Tensor.
expect_static: Expect `x` to have statically defined `ndims`.
expect_ndims: Optional Python integer. If provided, assert that x has
number of dimensions equal to this.
expect_ndims_no_more_than: Optional Python integer. If provided, assert
that x has no more than this many dimensions.
expect_ndims_at_least: Optional Python integer. If provided, assert that
x has at least this many dimensions.
Returns:
ndims: A Python integer.
Raises:
ValueError: If any of the expectations above are violated.
"""
ndims = x.get_shape().ndims
if ndims is None:
shape_const = tensor_util.constant_value(array_ops.shape(x))
if shape_const is not None:
ndims = shape_const.ndim
if ndims is None:
if expect_static:
raise ValueError(
"Expected argument 'x' to have statically defined 'ndims'. Found: " %
x)
return
if expect_ndims is not None:
ndims_message = ("Expected argument 'x' to have ndims %s. Found tensor %s"
% (expect_ndims, x))
if ndims != expect_ndims:
raise ValueError(ndims_message)
if expect_ndims_at_least is not None:
ndims_at_least_message = (
"Expected argument 'x' to have ndims >= %d. Found tensor %s" % (
expect_ndims_at_least, x))
if ndims < expect_ndims_at_least:
raise ValueError(ndims_at_least_message)
if expect_ndims_no_more_than is not None:
ndims_no_more_than_message = (
"Expected argument 'x' to have ndims <= %d. Found tensor %s" % (
expect_ndims_no_more_than, x))
if ndims > expect_ndims_no_more_than:
raise ValueError(ndims_no_more_than_message)
return ndims
def _get_best_effort_ndims(x,
expect_ndims=None,
expect_ndims_at_least=None,
expect_ndims_no_more_than=None):
"""Get static ndims if possible. Fallback on `tf.rank(x)`."""
ndims_static = _get_static_ndims(
x,
expect_ndims=expect_ndims,
expect_ndims_at_least=expect_ndims_at_least,
expect_ndims_no_more_than=expect_ndims_no_more_than)
if ndims_static is not None:
return ndims_static
return array_ops.rank(x)
def _insert_back_keep_dims(x, axis):
"""Insert the dims in `axis` back as singletons after being removed.
Args:
x: `Tensor`.
axis: Python list of integers.
Returns:
`Tensor` with same values as `x`, but additional singleton dimensions.
"""
for i in sorted(axis):
x = array_ops.expand_dims(x, axis=i)
return x
def _make_static_axis_non_negative(axis, ndims):
"""Convert possibly negatively indexed axis to non-negative.
Args:
axis: Iterable over Python integers.
ndims: Number of dimensions into which axis indexes.
Returns:
A list of non-negative Python integers.
Raises:
ValueError: If values in `axis` are too big/small to index into `ndims`.
"""
non_negative_axis = []
for d in axis:
if d >= 0:
if d >= ndims:
raise ValueError("dim %d not in the interval [0, %d]." % (d, ndims - 1))
non_negative_axis.append(d)
else:
if d < -1 * ndims:
raise ValueError(
"Negatively indexed dim %d not in the interval [-%d, -1]" % (d,
ndims))
non_negative_axis.append(ndims + d)
return non_negative_axis
def _move_dims_to_flat_end(x, axis, x_ndims):
"""Move dims corresponding to `axis` in `x` to the end, then flatten.
Args:
x: `Tensor` with shape `[B0,B1,...,Bb]`.
axis: Python list of indices into dimensions of `x`.
x_ndims: Python integer holding number of dimensions in `x`.
Returns:
`Tensor` with value from `x` and dims in `axis` moved to end into one single
dimension.
"""
# Suppose x.shape = [a, b, c, d]
# Suppose axis = [1, 3]
# front_dims = [0, 2] in example above.
front_dims = sorted(set(range(x_ndims)).difference(axis))
# x_permed.shape = [a, c, b, d]
x_permed = array_ops.transpose(x, perm=front_dims + list(axis))
if x.get_shape().is_fully_defined():
x_shape = x.get_shape().as_list()
# front_shape = [a, c], end_shape = [b * d]
front_shape = [x_shape[i] for i in front_dims]
end_shape = [np.prod([x_shape[i] for i in axis])]
full_shape = front_shape + end_shape
else:
front_shape = array_ops.shape(x_permed)[:x_ndims - len(axis)]
end_shape = [-1]
full_shape = array_ops.concat([front_shape, end_shape], axis=0)
return array_ops.reshape(x_permed, shape=full_shape)
def _sort_tensor(tensor):
"""Use `top_k` to sort a `Tensor` along the last dimension."""
sorted_, _ = nn_ops.top_k(tensor, k=array_ops.shape(tensor)[-1])
return sorted_
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/sample_stats.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-dimensional (Vector) SinhArcsinh transformation of a distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.util import deprecation
__all__ = [
"VectorSinhArcsinhDiag",
]
class VectorSinhArcsinhDiag(transformed_distribution.TransformedDistribution):
"""The (diagonal) SinhArcsinh transformation of a distribution on `R^k`.
This distribution models a random vector `Y = (Y1,...,Yk)`, making use of
a `SinhArcsinh` transformation (which has adjustable tailweight and skew),
a rescaling, and a shift.
The `SinhArcsinh` transformation of the Normal is described in great depth in
[Sinh-arcsinh distributions](https://www.jstor.org/stable/27798865).
Here we use a slightly different parameterization, in terms of `tailweight`
and `skewness`. Additionally we allow for distributions other than Normal,
and control over `scale` as well as a "shift" parameter `loc`.
#### Mathematical Details
Given iid random vector `Z = (Z1,...,Zk)`, we define the VectorSinhArcsinhDiag
transformation of `Z`, `Y`, parameterized by
`(loc, scale, skewness, tailweight)`, via the relation (with `@` denoting
matrix multiplication):
```
Y := loc + scale @ F(Z) * (2 / F_0(2))
F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
F_0(Z) := Sinh( Arcsinh(Z) * tailweight )
```
This distribution is similar to the location-scale transformation
`L(Z) := loc + scale @ Z` in the following ways:
* If `skewness = 0` and `tailweight = 1` (the defaults), `F(Z) = Z`, and then
`Y = L(Z)` exactly.
* `loc` is used in both to shift the result by a constant factor.
* The multiplication of `scale` by `2 / F_0(2)` ensures that if `skewness = 0`
`P[Y - loc <= 2 * scale] = P[L(Z) - loc <= 2 * scale]`.
Thus it can be said that the weights in the tails of `Y` and `L(Z)` beyond
`loc + 2 * scale` are the same.
This distribution is different than `loc + scale @ Z` due to the
reshaping done by `F`:
* Positive (negative) `skewness` leads to positive (negative) skew.
* positive skew means, the mode of `F(Z)` is "tilted" to the right.
* positive skew means positive values of `F(Z)` become more likely, and
negative values become less likely.
* Larger (smaller) `tailweight` leads to fatter (thinner) tails.
* Fatter tails mean larger values of `|F(Z)|` become more likely.
* `tailweight < 1` leads to a distribution that is "flat" around `Y = loc`,
and a very steep drop-off in the tails.
* `tailweight > 1` leads to a distribution more peaked at the mode with
heavier tails.
To see the argument about the tails, note that for `|Z| >> 1` and
`|Z| >> (|skewness| * tailweight)**tailweight`, we have
`Y approx 0.5 Z**tailweight e**(sign(Z) skewness * tailweight)`.
To see the argument regarding multiplying `scale` by `2 / F_0(2)`,
```
P[(Y - loc) / scale <= 2] = P[F(Z) * (2 / F_0(2)) <= 2]
= P[F(Z) <= F_0(2)]
= P[Z <= 2] (if F = F_0).
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
skewness=None,
tailweight=None,
distribution=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalLinearOperator"):
"""Construct VectorSinhArcsinhDiag distribution on `R^k`.
The arguments `scale_diag` and `scale_identity_multiplier` combine to
define the diagonal `scale` referred to in this class docstring:
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scale-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scale
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale`
is the `Identity`.
skewness: Skewness parameter. floating-point `Tensor` with shape
broadcastable with `event_shape`.
tailweight: Tailweight parameter. floating-point `Tensor` with shape
broadcastable with `event_shape`.
distribution: `tf.Distribution`-like instance. Distribution from which `k`
iid samples are used as input to transformation `F`. Default is
`tfp.distributions.Normal(loc=0., scale=1.)`.
Must be a scalar-batch, scalar-event distribution. Typically
`distribution.reparameterization_type = FULLY_REPARAMETERIZED` or it is
a function of non-trainable parameters. WARNING: If you backprop through
a VectorSinhArcsinhDiag sample and `distribution` is not
`FULLY_REPARAMETERIZED` yet is a function of trainable variables, then
the gradient will be incorrect!
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = dict(locals())
with ops.name_scope(
name,
values=[
loc, scale_diag, scale_identity_multiplier, skewness, tailweight
]) as name:
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
tailweight = 1. if tailweight is None else tailweight
has_default_skewness = skewness is None
skewness = 0. if skewness is None else skewness
# Recall, with Z a random variable,
# Y := loc + C * F(Z),
# F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
# F_0(Z) := Sinh( Arcsinh(Z) * tailweight )
# C := 2 * scale / F_0(2)
# Construct shapes and 'scale' out of the scale_* and loc kwargs.
# scale_linop is only an intermediary to:
# 1. get shapes from looking at loc and the two scale args.
# 2. combine scale_diag with scale_identity_multiplier, which gives us
# 'scale', which in turn gives us 'C'.
scale_linop = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=False,
assert_positive=False)
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale_linop)
# scale_linop.diag_part() is efficient since it is a diag type linop.
scale_diag_part = scale_linop.diag_part()
dtype = scale_diag_part.dtype
if distribution is None:
distribution = normal.Normal(
loc=array_ops.zeros([], dtype=dtype),
scale=array_ops.ones([], dtype=dtype),
allow_nan_stats=allow_nan_stats)
else:
asserts = distribution_util.maybe_check_scalar_distribution(
distribution, dtype, validate_args)
if asserts:
scale_diag_part = control_flow_ops.with_dependencies(
asserts, scale_diag_part)
# Make the SAS bijector, 'F'.
skewness = ops.convert_to_tensor(skewness, dtype=dtype, name="skewness")
tailweight = ops.convert_to_tensor(
tailweight, dtype=dtype, name="tailweight")
f = bijectors.SinhArcsinh(
skewness=skewness, tailweight=tailweight)
if has_default_skewness:
f_noskew = f
else:
f_noskew = bijectors.SinhArcsinh(
skewness=skewness.dtype.as_numpy_dtype(0.),
tailweight=tailweight)
# Make the Affine bijector, Z --> loc + C * Z.
c = 2 * scale_diag_part / f_noskew.forward(
ops.convert_to_tensor(2, dtype=dtype))
affine = bijectors.Affine(
shift=loc, scale_diag=c, validate_args=validate_args)
bijector = bijectors.Chain([affine, f])
super(VectorSinhArcsinhDiag, self).__init__(
distribution=distribution,
bijector=bijector,
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
self._loc = loc
self._scale = scale_linop
self._tailweight = tailweight
self._skewness = skewness
@property
def loc(self):
"""The `loc` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._loc
@property
def scale(self):
"""The `LinearOperator` `scale` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._scale
@property
def tailweight(self):
"""Controls the tail decay. `tailweight > 1` means faster than Normal."""
return self._tailweight
@property
def skewness(self):
"""Controls the skewness. `Skewness > 0` means right skew."""
return self._skewness
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/vector_sinh_arcsinh_diag.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal distribution: conjugate posterior closed form calculations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import normal
def normal_conjugates_known_scale_posterior(prior, scale, s, n):
"""Posterior Normal distribution with conjugate prior on the mean.
This model assumes that `n` observations (with sum `s`) come from a
Normal with unknown mean `loc` (described by the Normal `prior`)
and known variance `scale**2`. The "known scale posterior" is
the distribution of the unknown `loc`.
Accepts a prior Normal distribution object, having parameters
`loc0` and `scale0`, as well as known `scale` values of the predictive
distribution(s) (also assumed Normal),
and statistical estimates `s` (the sum(s) of the observations) and
`n` (the number(s) of observations).
Returns a posterior (also Normal) distribution object, with parameters
`(loc', scale'**2)`, where:
```
mu ~ N(mu', sigma'**2)
sigma'**2 = 1/(1/sigma0**2 + n/sigma**2),
mu' = (mu0/sigma0**2 + s/sigma**2) * sigma'**2.
```
Distribution parameters from `prior`, as well as `scale`, `s`, and `n`.
will broadcast in the case of multidimensional sets of parameters.
Args:
prior: `Normal` object of type `dtype`:
the prior distribution having parameters `(loc0, scale0)`.
scale: tensor of type `dtype`, taking values `scale > 0`.
The known stddev parameter(s).
s: Tensor of type `dtype`. The sum(s) of observations.
n: Tensor of type `int`. The number(s) of observations.
Returns:
A new Normal posterior distribution object for the unknown observation
mean `loc`.
Raises:
TypeError: if dtype of `s` does not match `dtype`, or `prior` is not a
Normal object.
"""
if not isinstance(prior, normal.Normal):
raise TypeError("Expected prior to be an instance of type Normal")
if s.dtype != prior.dtype:
raise TypeError(
"Observation sum s.dtype does not match prior dtype: %s vs. %s"
% (s.dtype, prior.dtype))
n = math_ops.cast(n, prior.dtype)
scale0_2 = math_ops.square(prior.scale)
scale_2 = math_ops.square(scale)
scalep_2 = 1.0/(1/scale0_2 + n/scale_2)
return normal.Normal(
loc=(prior.loc/scale0_2 + s/scale_2) * scalep_2,
scale=math_ops.sqrt(scalep_2))
def normal_conjugates_known_scale_predictive(prior, scale, s, n):
"""Posterior predictive Normal distribution w. conjugate prior on the mean.
This model assumes that `n` observations (with sum `s`) come from a
Normal with unknown mean `loc` (described by the Normal `prior`)
and known variance `scale**2`. The "known scale predictive"
is the distribution of new observations, conditioned on the existing
observations and our prior.
Accepts a prior Normal distribution object, having parameters
`loc0` and `scale0`, as well as known `scale` values of the predictive
distribution(s) (also assumed Normal),
and statistical estimates `s` (the sum(s) of the observations) and
`n` (the number(s) of observations).
Calculates the Normal distribution(s) `p(x | sigma**2)`:
```
p(x | sigma**2) = int N(x | mu, sigma**2)N(mu | prior.loc, prior.scale**2) dmu
= N(x | prior.loc, 1 / (sigma**2 + prior.scale**2))
```
Returns the predictive posterior distribution object, with parameters
`(loc', scale'**2)`, where:
```
sigma_n**2 = 1/(1/sigma0**2 + n/sigma**2),
mu' = (mu0/sigma0**2 + s/sigma**2) * sigma_n**2.
sigma'**2 = sigma_n**2 + sigma**2,
```
Distribution parameters from `prior`, as well as `scale`, `s`, and `n`.
will broadcast in the case of multidimensional sets of parameters.
Args:
prior: `Normal` object of type `dtype`:
the prior distribution having parameters `(loc0, scale0)`.
scale: tensor of type `dtype`, taking values `scale > 0`.
The known stddev parameter(s).
s: Tensor of type `dtype`. The sum(s) of observations.
n: Tensor of type `int`. The number(s) of observations.
Returns:
A new Normal predictive distribution object.
Raises:
TypeError: if dtype of `s` does not match `dtype`, or `prior` is not a
Normal object.
"""
if not isinstance(prior, normal.Normal):
raise TypeError("Expected prior to be an instance of type Normal")
if s.dtype != prior.dtype:
raise TypeError(
"Observation sum s.dtype does not match prior dtype: %s vs. %s"
% (s.dtype, prior.dtype))
n = math_ops.cast(n, prior.dtype)
scale0_2 = math_ops.square(prior.scale)
scale_2 = math_ops.square(scale)
scalep_2 = 1.0/(1/scale0_2 + n/scale_2)
return normal.Normal(
loc=(prior.loc/scale0_2 + s/scale_2) * scalep_2,
scale=math_ops.sqrt(scalep_2 + scale_2))
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/normal_conjugate_posteriors.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import mvn_linear_operator as mvn_linop
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
from tensorflow.python.util import deprecation
__all__ = [
"MultivariateNormalDiag",
"MultivariateNormalDiagWithSoftplusScale",
]
class MultivariateNormalDiag(
mvn_linop.MultivariateNormalLinearOperator):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T` where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 2-variate Gaussian.
mvn = tfd.MultivariateNormalDiag(
loc=[1., -1],
scale_diag=[1, 2.])
mvn.mean().eval()
# ==> [1., -1]
mvn.stddev().eval()
# ==> [1., 2]
# Evaluate this on an observation in `R^2`, returning a scalar.
mvn.prob([-1., 0]).eval() # shape: []
# Initialize a 3-batch, 2-variate scaled-identity Gaussian.
mvn = tfd.MultivariateNormalDiag(
loc=[1., -1],
scale_identity_multiplier=[1, 2., 3])
mvn.mean().eval() # shape: [3, 2]
# ==> [[1., -1]
# [1, -1],
# [1, -1]]
mvn.stddev().eval() # shape: [3, 2]
# ==> [[1., 1],
# [2, 2],
# [3, 3]]
# Evaluate this on an observation in `R^2`, returning a length-3 vector.
mvn.prob([-1., 0]).eval() # shape: [3]
# Initialize a 2-batch of 3-variate Gaussians.
mvn = tfd.MultivariateNormalDiag(
loc=[[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag=[[1., 2, 3],
[0.5, 1, 1.5]]) # shape: [2, 3]
# Evaluate this on a two observations, each in `R^3`, returning a length-2
# vector.
x = [[-1., 0, 1],
[-11, 0, 11.]] # shape: [2, 3].
mvn.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiag"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`. A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scaled-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scaled
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale` is
the `Identity`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier]):
# No need to validate_args while making diag_scale. The returned
# LinearOperatorDiag has an assert_non_singular method that is called by
# the Bijector.
scale = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=False,
assert_positive=False)
super(MultivariateNormalDiag, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
class MultivariateNormalDiagWithSoftplusScale(MultivariateNormalDiag):
"""MultivariateNormalDiag with `diag_stddev = softplus(diag_stddev)`."""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale_diag,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagWithSoftplusScale"):
parameters = dict(locals())
with ops.name_scope(name, values=[scale_diag]) as name:
super(MultivariateNormalDiagWithSoftplusScale, self).__init__(
loc=loc,
scale_diag=nn.softplus(scale_diag),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/mvn_diag.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution class initialized with a full covariance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import mvn_tril
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.util import deprecation
__all__ = [
"MultivariateNormalFullCovariance",
]
class MultivariateNormalFullCovariance(mvn_tril.MultivariateNormalTriL):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`covariance_matrix` matrices that are the covariance.
This is different than the other multivariate normals, which are parameterized
by a matrix more akin to the standard deviation.
#### Mathematical Details
The probability density function (pdf) is, with `@` as matrix multiplication,
```none
pdf(x; loc, covariance_matrix) = exp(-0.5 y) / Z,
y = (x - loc)^T @ inv(covariance_matrix) @ (x - loc)
Z = (2 pi)**(0.5 k) |det(covariance_matrix)|**(0.5).
```
where:
* `loc` is a vector in `R^k`,
* `covariance_matrix` is an `R^{k x k}` symmetric positive definite matrix,
* `Z` denotes the normalization constant.
Additional leading dimensions (if any) in `loc` and `covariance_matrix` allow
for batch dimensions.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed e.g. as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
scale = Cholesky(covariance_matrix)
Y = scale @ X + loc
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
mvn = tfd.MultivariateNormalFullCovariance(
loc=mu,
covariance_matrix=cov)
mvn.mean().eval()
# ==> [1., 2, 3]
# Covariance agrees with covariance_matrix.
mvn.covariance().eval()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an observation in `R^3` ; return a scalar.
mvn.prob([-1., 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Gaussians.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
covariance_matrix = ... # shape: [2, 3, 3], symmetric, positive definite.
mvn = tfd.MultivariateNormalFullCovariance(
loc=mu,
covariance=covariance_matrix)
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
covariance_matrix=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalFullCovariance"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and
`covariance_matrix` arguments.
The `event_shape` is given by last dimension of the matrix implied by
`covariance_matrix`. The last dimension of `loc` (if provided) must
broadcast with this.
A non-batch `covariance_matrix` matrix is a `k x k` symmetric positive
definite matrix. In other words it is (real) symmetric with all eigenvalues
strictly positive.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
covariance_matrix: Floating-point, symmetric positive definite `Tensor` of
same `dtype` as `loc`. The strict upper triangle of `covariance_matrix`
is ignored, so if `covariance_matrix` is not symmetric no error will be
raised (unless `validate_args is True`). `covariance_matrix` has shape
`[B1, ..., Bb, k, k]` where `b >= 0` and `k` is the event size.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if neither `loc` nor `covariance_matrix` are specified.
"""
parameters = dict(locals())
# Convert the covariance_matrix up to a scale_tril and call MVNTriL.
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[loc, covariance_matrix]):
if covariance_matrix is None:
scale_tril = None
else:
covariance_matrix = ops.convert_to_tensor(
covariance_matrix, name="covariance_matrix")
if validate_args:
covariance_matrix = control_flow_ops.with_dependencies([
check_ops.assert_near(
covariance_matrix,
array_ops.matrix_transpose(covariance_matrix),
message="Matrix was not symmetric")], covariance_matrix)
# No need to validate that covariance_matrix is non-singular.
# LinearOperatorLowerTriangular has an assert_non_singular method that
# is called by the Bijector.
# However, cholesky() ignores the upper triangular part, so we do need
# to separately assert symmetric.
scale_tril = linalg_ops.cholesky(covariance_matrix)
super(MultivariateNormalFullCovariance, self).__init__(
loc=loc,
scale_tril=scale_tril,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/mvn_full_covariance.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Vectorized Exponential distribution class, directly using LinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import exponential
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = ["VectorExponentialLinearOperator"]
_mvn_sample_note = """
`value` is a batch vector with compatible shape if `value` is a `Tensor` whose
shape can be broadcast up to either:
```python
self.batch_shape + self.event_shape
```
or
```python
[M1, ..., Mm] + self.batch_shape + self.event_shape
```
"""
class VectorExponentialLinearOperator(
transformed_distribution.TransformedDistribution):
"""The vectorization of the Exponential distribution on `R^k`.
The vector exponential distribution is defined over a subset of `R^k`, and
parameterized by a (batch of) length-`k` `loc` vector and a (batch of) `k x k`
`scale` matrix: `covariance = scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is
```none
pdf(y; loc, scale) = exp(-||x||_1) / Z, for y in S(loc, scale),
x = inv(scale) @ (y - loc),
Z = |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `S = {loc + scale @ x : x in R^k, x_1 > 0, ..., x_k > 0}`, is an image of
the positive half-space,
* `||x||_1` denotes the `l1` norm of `x`, `sum_i |x_i|`,
* `Z` denotes the normalization constant.
The VectorExponential distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X = (X_1, ..., X_k), each X_i ~ Exponential(rate=1)
Y = (Y_1, ...,Y_k) = scale @ X + loc
```
#### About `VectorExponential` and `Vector` distributions in TensorFlow.
The `VectorExponential` is a non-standard distribution that has useful
properties.
The marginals `Y_1, ..., Y_k` are *not* Exponential random variables, due to
the fact that the sum of Exponential random variables is not Exponential.
Instead, `Y` is a vector whose components are linear combinations of
Exponential random variables. Thus, `Y` lives in the vector space generated
by `vectors` of Exponential distributions. This allows the user to decide the
mean and covariance (by setting `loc` and `scale`), while preserving some
properties of the Exponential distribution. In particular, the tails of `Y_i`
will be (up to polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of `Y_i` is the convolution of
the pdf of `k` independent Exponential random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 2-variate VectorExponential, supported on
# {(x, y) in R^2 : x > 0, y > 0}.
mat = [[1.0, 0.1],
[0.1, 1.0]]
vex = tfd.VectorExponentialLinearOperator(
scale=tf.linalg.LinearOperatorFullMatrix(mat))
# Compute the pdf of an`R^2` observation; return a scalar.
vex.prob([1., 2.]).eval() # shape: []
# Initialize a 2-batch of 3-variate Vector Exponential's.
mu = [[1., 2, 3],
[1., 0, 0]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
vex = tfd.VectorExponentialLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorDiag(scale_diag))
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[1.9, 2.2, 3.1],
[10., 1.0, 9.0]] # shape: [2, 3]
vex.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale=None,
validate_args=False,
allow_nan_stats=True,
name="VectorExponentialLinearOperator"):
"""Construct Vector Exponential distribution supported on a subset of `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
`[B1, ..., Bb, k, k]`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if `scale` is unspecified.
TypeError: if not `scale.dtype.is_floating`
"""
parameters = dict(locals())
if scale is None:
raise ValueError("Missing required `scale` parameter.")
if not scale.dtype.is_floating:
raise TypeError("`scale` parameter must have floating-point dtype.")
with ops.name_scope(name, values=[loc] + scale.graph_parents) as name:
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
super(VectorExponentialLinearOperator, self).__init__(
distribution=exponential.Exponential(rate=array_ops.ones(
[], dtype=scale.dtype), allow_nan_stats=allow_nan_stats),
bijector=bijectors.AffineLinearOperator(
shift=loc, scale=scale, validate_args=validate_args),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def loc(self):
"""The `loc` `Tensor` in `Y = scale @ X + loc`."""
return self.bijector.shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + loc`."""
return self.bijector.scale
@distribution_util.AppendDocstring(_mvn_sample_note)
def _log_prob(self, x):
return super(VectorExponentialLinearOperator, self)._log_prob(x)
@distribution_util.AppendDocstring(_mvn_sample_note)
def _prob(self, x):
return super(VectorExponentialLinearOperator, self)._prob(x)
def _mean(self):
# Let
# W = (w1,...,wk), with wj ~ iid Exponential(0, 1).
# Then this distribution is
# X = loc + LW,
# and then E[X] = loc + L1, where 1 is the vector of ones.
scale_x_ones = self.bijector.scale.matvec(
array_ops.ones(self._mode_mean_shape(), self.dtype))
if self.loc is None:
return scale_x_ones
return array_ops.identity(self.loc) + scale_x_ones
def _covariance(self):
# Let
# W = (w1,...,wk), with wj ~ iid Exponential(0, 1).
# Then this distribution is
# X = loc + LW,
# and then since Cov(wi, wj) = 1 if i=j, and 0 otherwise,
# Cov(X) = L Cov(W W^T) L^T = L L^T.
if distribution_util.is_diagonal_scale(self.scale):
return array_ops.matrix_diag(math_ops.square(self.scale.diag_part()))
else:
return self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense()))
else:
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return math_ops.sqrt(
array_ops.matrix_diag_part(self.scale.matmul(self.scale.to_dense())))
else:
return math_ops.sqrt(
array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
def _mode(self):
scale_x_zeros = self.bijector.scale.matvec(
array_ops.zeros(self._mode_mean_shape(), self.dtype))
if self.loc is None:
return scale_x_zeros
return array_ops.identity(self.loc) + scale_x_zeros
def _mode_mean_shape(self):
"""Shape for the mode/mean Tensors."""
shape = self.batch_shape.concatenate(self.event_shape)
has_static_shape = shape.is_fully_defined()
if not has_static_shape:
shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
return shape
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/vector_exponential_linear_operator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The same-family Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util as distribution_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class MixtureSameFamily(distribution.Distribution):
"""Mixture (same-family) distribution.
The `MixtureSameFamily` distribution implements a (batch of) mixture
distribution where all components are from different parameterizations of the
same distribution type. It is parameterized by a `Categorical` "selecting
distribution" (over `k` components) and a components distribution, i.e., a
`Distribution` with a rightmost batch shape (equal to `[k]`) which indexes
each (batch of) component.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
### Create a mixture of two scalar Gaussians:
gm = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
probs=[0.3, 0.7]),
components_distribution=tfd.Normal(
loc=[-1., 1], # One for each component.
scale=[0.1, 0.5])) # And same here.
gm.mean()
# ==> 0.4
gm.variance()
# ==> 1.018
# Plot PDF.
x = np.linspace(-2., 3., int(1e4), dtype=np.float32)
import matplotlib.pyplot as plt
plt.plot(x, gm.prob(x).eval());
### Create a mixture of two Bivariate Gaussians:
gm = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
probs=[0.3, 0.7]),
components_distribution=tfd.MultivariateNormalDiag(
loc=[[-1., 1], # component 1
[1, -1]], # component 2
scale_identity_multiplier=[.3, .6]))
gm.mean()
# ==> array([ 0.4, -0.4], dtype=float32)
gm.covariance()
# ==> array([[ 1.119, -0.84],
# [-0.84, 1.119]], dtype=float32)
# Plot PDF contours.
def meshgrid(x, y=x):
[gx, gy] = np.meshgrid(x, y, indexing='ij')
gx, gy = np.float32(gx), np.float32(gy)
grid = np.concatenate([gx.ravel()[None, :], gy.ravel()[None, :]], axis=0)
return grid.T.reshape(x.size, y.size, 2)
grid = meshgrid(np.linspace(-2, 2, 100, dtype=np.float32))
plt.contour(grid[..., 0], grid[..., 1], gm.prob(grid).eval());
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
mixture_distribution,
components_distribution,
validate_args=False,
allow_nan_stats=True,
name="MixtureSameFamily"):
"""Construct a `MixtureSameFamily` distribution.
Args:
mixture_distribution: `tfp.distributions.Categorical`-like instance.
Manages the probability of selecting components. The number of
categories must match the rightmost batch dimension of the
`components_distribution`. Must have either scalar `batch_shape` or
`batch_shape` matching `components_distribution.batch_shape[:-1]`.
components_distribution: `tfp.distributions.Distribution`-like instance.
Right-most batch dimension indexes components.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: `if not mixture_distribution.dtype.is_integer`.
ValueError: if mixture_distribution does not have scalar `event_shape`.
ValueError: if `mixture_distribution.batch_shape` and
`components_distribution.batch_shape[:-1]` are both fully defined and
the former is neither scalar nor equal to the latter.
ValueError: if `mixture_distribution` categories does not equal
`components_distribution` rightmost batch shape.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
self._mixture_distribution = mixture_distribution
self._components_distribution = components_distribution
self._runtime_assertions = []
s = components_distribution.event_shape_tensor()
s_dim0 = tensor_shape.dimension_value(s.shape[0])
self._event_ndims = (s_dim0
if s_dim0 is not None
else array_ops.shape(s)[0])
if not mixture_distribution.dtype.is_integer:
raise ValueError(
"`mixture_distribution.dtype` ({}) is not over integers".format(
mixture_distribution.dtype.name))
if (mixture_distribution.event_shape.ndims is not None
and mixture_distribution.event_shape.ndims != 0):
raise ValueError("`mixture_distribution` must have scalar `event_dim`s")
elif validate_args:
self._runtime_assertions += [
control_flow_ops.assert_has_rank(
mixture_distribution.event_shape_tensor(), 0,
message="`mixture_distribution` must have scalar `event_dim`s"),
]
mdbs = mixture_distribution.batch_shape
cdbs = components_distribution.batch_shape.with_rank_at_least(1)[:-1]
if mdbs.is_fully_defined() and cdbs.is_fully_defined():
if mdbs.ndims != 0 and mdbs != cdbs:
raise ValueError(
"`mixture_distribution.batch_shape` (`{}`) is not "
"compatible with `components_distribution.batch_shape` "
"(`{}`)".format(mdbs.as_list(), cdbs.as_list()))
elif validate_args:
mdbs = mixture_distribution.batch_shape_tensor()
cdbs = components_distribution.batch_shape_tensor()[:-1]
self._runtime_assertions += [
control_flow_ops.assert_equal(
distribution_util.pick_vector(
mixture_distribution.is_scalar_batch(), cdbs, mdbs),
cdbs,
message=(
"`mixture_distribution.batch_shape` is not "
"compatible with `components_distribution.batch_shape`"))]
km = tensor_shape.dimension_value(
mixture_distribution.logits.shape.with_rank_at_least(1)[-1])
kc = tensor_shape.dimension_value(
components_distribution.batch_shape.with_rank_at_least(1)[-1])
if km is not None and kc is not None and km != kc:
raise ValueError("`mixture_distribution components` ({}) does not "
"equal `components_distribution.batch_shape[-1]` "
"({})".format(km, kc))
elif validate_args:
km = array_ops.shape(mixture_distribution.logits)[-1]
kc = components_distribution.batch_shape_tensor()[-1]
self._runtime_assertions += [
control_flow_ops.assert_equal(
km, kc,
message=("`mixture_distribution components` does not equal "
"`components_distribution.batch_shape[-1:]`")),
]
elif km is None:
km = array_ops.shape(mixture_distribution.logits)[-1]
self._num_components = km
super(MixtureSameFamily, self).__init__(
dtype=self._components_distribution.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=(
self._mixture_distribution._graph_parents # pylint: disable=protected-access
+ self._components_distribution._graph_parents), # pylint: disable=protected-access
name=name)
@property
def mixture_distribution(self):
return self._mixture_distribution
@property
def components_distribution(self):
return self._components_distribution
def _batch_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return self.components_distribution.batch_shape_tensor()[:-1]
def _batch_shape(self):
return self.components_distribution.batch_shape.with_rank_at_least(1)[:-1]
def _event_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return self.components_distribution.event_shape_tensor()
def _event_shape(self):
return self.components_distribution.event_shape
def _sample_n(self, n, seed):
with ops.control_dependencies(self._runtime_assertions):
x = self.components_distribution.sample(n) # [n, B, k, E]
# TODO(jvdillon): Consider using tf.gather (by way of index unrolling).
npdt = x.dtype.as_numpy_dtype
mask = array_ops.one_hot(
indices=self.mixture_distribution.sample(n), # [n, B]
depth=self._num_components, # == k
on_value=np.ones([], dtype=npdt),
off_value=np.zeros([], dtype=npdt)) # [n, B, k]
mask = distribution_utils.pad_mixture_dimensions(
mask, self, self.mixture_distribution,
self._event_shape().ndims) # [n, B, k, [1]*e]
return math_ops.reduce_sum(
x * mask, axis=-1 - self._event_ndims) # [n, B, E]
def _log_prob(self, x):
with ops.control_dependencies(self._runtime_assertions):
x = self._pad_sample_dims(x)
log_prob_x = self.components_distribution.log_prob(x) # [S, B, k]
log_mix_prob = nn_ops.log_softmax(
self.mixture_distribution.logits, axis=-1) # [B, k]
return math_ops.reduce_logsumexp(
log_prob_x + log_mix_prob, axis=-1) # [S, B]
def _mean(self):
with ops.control_dependencies(self._runtime_assertions):
probs = distribution_utils.pad_mixture_dimensions(
self.mixture_distribution.probs, self, self.mixture_distribution,
self._event_shape().ndims) # [B, k, [1]*e]
return math_ops.reduce_sum(
probs * self.components_distribution.mean(),
axis=-1 - self._event_ndims) # [B, E]
def _log_cdf(self, x):
x = self._pad_sample_dims(x)
log_cdf_x = self.components_distribution.log_cdf(x) # [S, B, k]
log_mix_prob = nn_ops.log_softmax(
self.mixture_distribution.logits, axis=-1) # [B, k]
return math_ops.reduce_logsumexp(
log_cdf_x + log_mix_prob, axis=-1) # [S, B]
def _variance(self):
with ops.control_dependencies(self._runtime_assertions):
# Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
probs = distribution_utils.pad_mixture_dimensions(
self.mixture_distribution.probs, self, self.mixture_distribution,
self._event_shape().ndims) # [B, k, [1]*e]
mean_cond_var = math_ops.reduce_sum(
probs * self.components_distribution.variance(),
axis=-1 - self._event_ndims) # [B, E]
var_cond_mean = math_ops.reduce_sum(
probs * math_ops.squared_difference(
self.components_distribution.mean(),
self._pad_sample_dims(self._mean())),
axis=-1 - self._event_ndims) # [B, E]
return mean_cond_var + var_cond_mean # [B, E]
def _covariance(self):
static_event_ndims = self.event_shape.ndims
if static_event_ndims != 1:
# Covariance is defined only for vector distributions.
raise NotImplementedError("covariance is not implemented")
with ops.control_dependencies(self._runtime_assertions):
# Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
probs = distribution_utils.pad_mixture_dimensions(
distribution_utils.pad_mixture_dimensions(
self.mixture_distribution.probs, self, self.mixture_distribution,
self._event_shape().ndims),
self, self.mixture_distribution,
self._event_shape().ndims) # [B, k, 1, 1]
mean_cond_var = math_ops.reduce_sum(
probs * self.components_distribution.covariance(),
axis=-3) # [B, e, e]
var_cond_mean = math_ops.reduce_sum(
probs * _outer_squared_difference(
self.components_distribution.mean(),
self._pad_sample_dims(self._mean())),
axis=-3) # [B, e, e]
return mean_cond_var + var_cond_mean # [B, e, e]
def _pad_sample_dims(self, x):
with ops.name_scope("pad_sample_dims", values=[x]):
ndims = x.shape.ndims if x.shape.ndims is not None else array_ops.rank(x)
shape = array_ops.shape(x)
d = ndims - self._event_ndims
x = array_ops.reshape(x, shape=array_ops.concat([
shape[:d], [1], shape[d:]], axis=0))
return x
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _outer_squared_difference(x, y):
"""Convenience function analogous to tf.squared_difference."""
z = x - y
return z[..., array_ops.newaxis, :] * z[..., array_ops.newaxis]
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/mixture_same_family.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A helper class for inferring Distribution shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class _DistributionShape(object):
"""Manage and manipulate `Distribution` shape.
#### Terminology
Recall that a `Tensor` has:
- `shape`: size of `Tensor` dimensions,
- `ndims`: size of `shape`; number of `Tensor` dimensions,
- `dims`: indexes into `shape`; useful for transpose, reduce.
`Tensor`s sampled from a `Distribution` can be partitioned by `sample_dims`,
`batch_dims`, and `event_dims`. To understand the semantics of these
dimensions, consider when two of the three are fixed and the remaining
is varied:
- `sample_dims`: indexes independent draws from identical
parameterizations of the `Distribution`.
- `batch_dims`: indexes independent draws from non-identical
parameterizations of the `Distribution`.
- `event_dims`: indexes event coordinates from one sample.
The `sample`, `batch`, and `event` dimensions constitute the entirety of a
`Distribution` `Tensor`'s shape.
The dimensions are always in `sample`, `batch`, `event` order.
#### Purpose
This class partitions `Tensor` notions of `shape`, `ndims`, and `dims` into
`Distribution` notions of `sample,` `batch,` and `event` dimensions. That
is, it computes any of:
```
sample_shape batch_shape event_shape
sample_dims batch_dims event_dims
sample_ndims batch_ndims event_ndims
```
for a given `Tensor`, e.g., the result of
`Distribution.sample(sample_shape=...)`.
For a given `Tensor`, this class computes the above table using minimal
information: `batch_ndims` and `event_ndims`.
#### Examples
We show examples of distribution shape semantics.
- Sample dimensions:
Computing summary statistics, i.e., the average is a reduction over sample
dimensions.
```python
sample_dims = [0]
tf.reduce_mean(Normal(loc=1.3, scale=1.).sample_n(1000),
axis=sample_dims) # ~= 1.3
```
- Batch dimensions:
Monte Carlo estimation of a marginal probability:
Average over batch dimensions where batch dimensions are associated with
random draws from a prior.
E.g., suppose we want to find the Monte Carlo estimate of the marginal
distribution of a `Normal` with a random `Laplace` location:
```
P(X=x) = integral P(X=x|y) P(Y=y) dy
~= 1/n sum_{i=1}^n P(X=x|y_i), y_i ~iid Laplace(0,1)
= tf.reduce_mean(Normal(loc=Laplace(0., 1.).sample_n(n=1000),
scale=tf.ones(1000)).prob(x),
axis=batch_dims)
```
The `Laplace` distribution generates a `Tensor` of shape `[1000]`. When
fed to a `Normal`, this is interpreted as 1000 different locations, i.e.,
1000 non-identical Normals. Therefore a single call to `prob(x)` yields
1000 probabilities, one for every location. The average over this batch
yields the marginal.
- Event dimensions:
Computing the determinant of the Jacobian of a function of a random
variable involves a reduction over event dimensions.
E.g., Jacobian of the transform `Y = g(X) = exp(X)`:
```python
tf.compat.v1.div(1., tf.reduce_prod(x, event_dims))
```
We show examples using this class.
Write `S, B, E` for `sample_shape`, `batch_shape`, and `event_shape`.
```python
# 150 iid samples from one multivariate Normal with two degrees of freedom.
mu = [0., 0]
sigma = [[1., 0],
[0, 1]]
mvn = MultivariateNormal(mu, sigma)
rand_mvn = mvn.sample(sample_shape=[3, 50])
shaper = DistributionShape(batch_ndims=0, event_ndims=1)
S, B, E = shaper.get_shape(rand_mvn)
# S = [3, 50]
# B = []
# E = [2]
# 12 iid samples from one Wishart with 2x2 events.
sigma = [[1., 0],
[2, 1]]
wishart = Wishart(df=5, scale=sigma)
rand_wishart = wishart.sample(sample_shape=[3, 4])
shaper = DistributionShape(batch_ndims=0, event_ndims=2)
S, B, E = shaper.get_shape(rand_wishart)
# S = [3, 4]
# B = []
# E = [2, 2]
# 100 iid samples from two, non-identical trivariate Normal distributions.
mu = ... # shape(2, 3)
sigma = ... # shape(2, 3, 3)
X = MultivariateNormal(mu, sigma).sample(shape=[4, 25])
# S = [4, 25]
# B = [2]
# E = [3]
```
#### Argument Validation
When `validate_args=False`, checks that cannot be done during
graph construction are performed at graph execution. This may result in a
performance degradation because data must be switched from GPU to CPU.
For example, when `validate_args=False` and `event_ndims` is a
non-constant `Tensor`, it is checked to be a non-negative integer at graph
execution. (Same for `batch_ndims`). Constant `Tensor`s and non-`Tensor`
arguments are always checked for correctness since this can be done for
"free," i.e., during graph construction.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
batch_ndims=None,
event_ndims=None,
validate_args=False,
name="DistributionShape"):
"""Construct `DistributionShape` with fixed `batch_ndims`, `event_ndims`.
`batch_ndims` and `event_ndims` are fixed throughout the lifetime of a
`Distribution`. They may only be known at graph execution.
If both `batch_ndims` and `event_ndims` are python scalars (rather than
either being a `Tensor`), functions in this class automatically perform
sanity checks during graph construction.
Args:
batch_ndims: `Tensor`. Number of `dims` (`rank`) of the batch portion of
indexes of a `Tensor`. A "batch" is a non-identical distribution, i.e,
Normal with different parameters.
event_ndims: `Tensor`. Number of `dims` (`rank`) of the event portion of
indexes of a `Tensor`. An "event" is what is sampled from a
distribution, i.e., a trivariate Normal has an event shape of [3] and a
4 dimensional Wishart has an event shape of [4, 4].
validate_args: Python `bool`, default `False`. When `True`,
non-`tf.constant` `Tensor` arguments are checked for correctness.
(`tf.constant` arguments are always checked.)
name: Python `str`. The name prepended to Ops created by this class.
Raises:
ValueError: if either `batch_ndims` or `event_ndims` are: `None`,
negative, not `int32`.
"""
if batch_ndims is None: raise ValueError("batch_ndims cannot be None")
if event_ndims is None: raise ValueError("event_ndims cannot be None")
self._batch_ndims = batch_ndims
self._event_ndims = event_ndims
self._validate_args = validate_args
with ops.name_scope(name):
self._name = name
with ops.name_scope("init"):
self._batch_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
batch_ndims, name="batch_ndims"))
self._batch_ndims_static, self._batch_ndims_is_0 = (
self._introspect_ndims(self._batch_ndims))
self._event_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
event_ndims, name="event_ndims"))
self._event_ndims_static, self._event_ndims_is_0 = (
self._introspect_ndims(self._event_ndims))
@property
def name(self):
"""Name given to ops created by this class."""
return self._name
@property
def batch_ndims(self):
"""Returns number of dimensions corresponding to non-identical draws."""
return self._batch_ndims
@property
def event_ndims(self):
"""Returns number of dimensions needed to index a sample's coordinates."""
return self._event_ndims
@property
def validate_args(self):
"""Returns True if graph-runtime `Tensor` checks are enabled."""
return self._validate_args
def get_ndims(self, x, name="get_ndims"):
"""Get `Tensor` number of dimensions (rank).
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
ndims: Scalar number of dimensions associated with a `Tensor`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
ndims = x.get_shape().ndims
if ndims is None:
return array_ops.rank(x, name="ndims")
return ops.convert_to_tensor(ndims, dtype=dtypes.int32, name="ndims")
def get_sample_ndims(self, x, name="get_sample_ndims"):
"""Returns number of dimensions corresponding to iid draws ("sample").
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_ndims: `Tensor` (0D, `int32`).
Raises:
ValueError: if `sample_ndims` is calculated to be negative.
"""
with self._name_scope(name, values=[x]):
ndims = self.get_ndims(x, name=name)
if self._is_all_constant_helper(ndims, self.batch_ndims,
self.event_ndims):
ndims = tensor_util.constant_value(ndims)
sample_ndims = (ndims - self._batch_ndims_static -
self._event_ndims_static)
if sample_ndims < 0:
raise ValueError(
"expected batch_ndims(%d) + event_ndims(%d) <= ndims(%d)" %
(self._batch_ndims_static, self._event_ndims_static, ndims))
return ops.convert_to_tensor(sample_ndims, name="sample_ndims")
else:
with ops.name_scope(name="sample_ndims"):
sample_ndims = ndims - self.batch_ndims - self.event_ndims
if self.validate_args:
sample_ndims = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(sample_ndims)], sample_ndims)
return sample_ndims
def get_dims(self, x, name="get_dims"):
"""Returns dimensions indexing `sample_shape`, `batch_shape`, `event_shape`.
Example:
```python
x = ... # Tensor with shape [4, 3, 2, 1]
sample_dims, batch_dims, event_dims = _DistributionShape(
batch_ndims=2, event_ndims=1).get_dims(x)
# sample_dims == [0]
# batch_dims == [1, 2]
# event_dims == [3]
# Note that these are not the shape parts, but rather indexes into shape.
```
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_dims: `Tensor` (1D, `int32`).
batch_dims: `Tensor` (1D, `int32`).
event_dims: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
def make_dims(start_sum, size, name):
"""Closure to make dims range."""
start_sum = start_sum if start_sum else [
array_ops.zeros([], dtype=dtypes.int32, name="zero")]
if self._is_all_constant_helper(size, *start_sum):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
return ops.convert_to_tensor(
list(range(start, stop)), dtype=dtypes.int32, name=name)
else:
start = sum(start_sum)
return math_ops.range(start, start + size)
sample_ndims = self.get_sample_ndims(x, name=name)
return (make_dims([], sample_ndims, name="sample_dims"),
make_dims([sample_ndims], self.batch_ndims, name="batch_dims"),
make_dims([sample_ndims, self.batch_ndims],
self.event_ndims, name="event_dims"))
def get_shape(self, x, name="get_shape"):
"""Returns `Tensor`'s shape partitioned into `sample`, `batch`, `event`.
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_shape: `Tensor` (1D, `int32`).
batch_shape: `Tensor` (1D, `int32`).
event_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
def slice_shape(start_sum, size, name):
"""Closure to slice out shape."""
start_sum = start_sum if start_sum else [
array_ops.zeros([], dtype=dtypes.int32, name="zero")]
if (x.get_shape().ndims is not None and
self._is_all_constant_helper(size, *start_sum)):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
slice_ = x.get_shape()[start:stop].as_list()
if all(s is not None for s in slice_):
return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name)
return array_ops.slice(array_ops.shape(x), [sum(start_sum)], [size])
sample_ndims = self.get_sample_ndims(x, name=name)
return (slice_shape([], sample_ndims,
name="sample_shape"),
slice_shape([sample_ndims], self.batch_ndims,
name="batch_shape"),
slice_shape([sample_ndims, self.batch_ndims], self.event_ndims,
name="event_shape"))
# TODO(jvdillon): Make remove expand_batch_dim and make expand_batch_dim=False
# the default behavior.
def make_batch_of_event_sample_matrices(
self, x, expand_batch_dim=True,
name="make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from S+B+E to B_+E_+S_.
Where:
- `B_ = B if B or not expand_batch_dim else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
Args:
x: `Tensor`.
expand_batch_dim: Python `bool`. If `True` the batch dims will be expanded
such that `batch_ndims >= 1`.
name: Python `str`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# x.shape: S+B+E
sample_shape, batch_shape, event_shape = self.get_shape(x)
event_shape = distribution_util.pick_vector(
self._event_ndims_is_0, [1], event_shape)
if expand_batch_dim:
batch_shape = distribution_util.pick_vector(
self._batch_ndims_is_0, [1], batch_shape)
new_shape = array_ops.concat([[-1], batch_shape, event_shape], 0)
x = array_ops.reshape(x, shape=new_shape)
# x.shape: [prod(S)]+B_+E_
x = distribution_util.rotate_transpose(x, shift=-1)
# x.shape: B_+E_+[prod(S)]
return x, sample_shape
# TODO(jvdillon): Make remove expand_batch_dim and make expand_batch_dim=False
# the default behavior.
def undo_make_batch_of_event_sample_matrices(
self, x, sample_shape, expand_batch_dim=True,
name="undo_make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from B_+E_+S_ to S+B+E.
Where:
- `B_ = B if B or not expand_batch_dim else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
This function "reverses" `make_batch_of_event_sample_matrices`.
Args:
x: `Tensor` of shape `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
expand_batch_dim: Python `bool`. If `True` the batch dims will be expanded
such that `batch_ndims>=1`.
name: Python `str`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `S+B+E`.
"""
with self._name_scope(name, values=[x, sample_shape]):
x = ops.convert_to_tensor(x, name="x")
# x.shape: _B+_E+[prod(S)]
sample_shape = ops.convert_to_tensor(sample_shape, name="sample_shape")
x = distribution_util.rotate_transpose(x, shift=1)
# x.shape: [prod(S)]+_B+_E
if self._is_all_constant_helper(self.batch_ndims, self.event_ndims):
if self._batch_ndims_is_0 or self._event_ndims_is_0:
squeeze_dims = []
if self._event_ndims_is_0:
squeeze_dims += [-1]
if self._batch_ndims_is_0 and expand_batch_dim:
squeeze_dims += [1]
if squeeze_dims:
x = array_ops.squeeze(x, axis=squeeze_dims)
# x.shape: [prod(S)]+B+E
_, batch_shape, event_shape = self.get_shape(x)
else:
s = (x.get_shape().as_list() if x.get_shape().is_fully_defined()
else array_ops.shape(x))
batch_shape = s[1:1+self.batch_ndims]
# Since sample_dims=1 and is left-most, we add 1 to the number of
# batch_ndims to get the event start dim.
event_start = array_ops.where(
math_ops.logical_and(expand_batch_dim, self._batch_ndims_is_0),
2, 1 + self.batch_ndims)
event_shape = s[event_start:event_start+self.event_ndims]
new_shape = array_ops.concat([sample_shape, batch_shape, event_shape], 0)
x = array_ops.reshape(x, shape=new_shape)
# x.shape: S+B+E
return x
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
(values or []) + [self.batch_ndims, self.event_ndims])) as scope:
yield scope
def _is_all_constant_helper(self, *args):
"""Helper which returns True if all inputs are constant_value."""
return all(tensor_util.constant_value(x) is not None for x in args)
def _assert_non_negative_int32_scalar(self, x):
"""Helper which ensures that input is a non-negative, int32, scalar."""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != dtypes.int32.base_dtype:
raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32))
x_value_static = tensor_util.constant_value(x)
if x.get_shape().ndims is not None and x_value_static is not None:
if x.get_shape().ndims != 0:
raise ValueError("%s.ndims=%d is not 0 (scalar)" %
(x.name, x.get_shape().ndims))
if x_value_static < 0:
raise ValueError("%s.value=%d cannot be negative" %
(x.name, x_value_static))
return x
if self.validate_args:
x = control_flow_ops.with_dependencies([
check_ops.assert_rank(x, 0),
check_ops.assert_non_negative(x)], x)
return x
def _introspect_ndims(self, ndims):
"""Helper to establish some properties of input ndims args."""
if self._is_all_constant_helper(ndims):
return (tensor_util.constant_value(ndims),
tensor_util.constant_value(ndims) == 0)
return None, math_ops.equal(ndims, 0)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/shape.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to bridge `Distribution`s and `tf.contrib.learn.estimator` APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators.head import _compute_weighted_loss
from tensorflow.contrib.learn.python.learn.estimators.head import _RegressionHead
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.util import deprecation
__all__ = [
"estimator_head_distribution_regression",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def estimator_head_distribution_regression(make_distribution_fn,
label_dimension=1,
logits_dimension=None,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None):
"""Creates a `Head` for regression under a generic distribution.
Args:
make_distribution_fn: Python `callable` which returns a `tf.Distribution`
instance created using only logits.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
logits_dimension: Number of logits per example. This is the size of the last
dimension of the logits `Tensor` (typically, this has shape
`[batch_size, logits_dimension]`).
Default value: `label_dimension`.
label_name: Python `str`, name of the key in label `dict`. Can be `None` if
label is a `Tensor` (single headed models).
weight_column_name: Python `str` defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: Python `bool`. If `True`, estimator will learn a
centered bias variable for each class. Rest of the model structure learns
the residual after centered bias.
head_name: Python `str`, name of the head. Predictions, summary and metrics
keys are suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
Returns:
An instance of `Head` for generic regression.
"""
return _DistributionRegressionHead(
make_distribution_fn=make_distribution_fn,
label_dimension=label_dimension,
logits_dimension=logits_dimension,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name)
class _DistributionRegressionHead(_RegressionHead):
"""Creates a _RegressionHead instance from an arbitrary `Distribution`."""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
make_distribution_fn,
label_dimension,
logits_dimension=None,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None):
"""`Head` for regression.
Args:
make_distribution_fn: Python `callable` which returns a `tf.Distribution`
instance created using only logits.
label_dimension: Number of regression labels per example. This is the
size of the last dimension of the labels `Tensor` (typically, this has
shape `[batch_size, label_dimension]`).
logits_dimension: Number of logits per example. This is the size of the
last dimension of the logits `Tensor` (typically, this has shape
`[batch_size, logits_dimension]`).
Default value: `label_dimension`.
label_name: Python `str`, name of the key in label `dict`. Can be `None`
if label is a tensor (single headed models).
weight_column_name: Python `str` defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: Python `bool`. If `True`, estimator will learn a
centered bias variable for each class. Rest of the model structure
learns the residual after centered bias.
head_name: Python `str`, name of the head. Predictions, summary and
metrics keys are suffixed by `"/" + head_name` and the default variable
scope is `head_name`.
Raises:
TypeError: if `make_distribution_fn` is not `callable`.
"""
if not callable(make_distribution_fn):
raise TypeError("`make_distribution_fn` must be a callable function.")
self._distributions = {}
self._make_distribution_fn = make_distribution_fn
def static_value(x):
"""Returns the static value of a `Tensor` or `None`."""
return tensor_util.constant_value(ops.convert_to_tensor(x))
def concat_vectors(*args):
"""Concatenates input vectors, statically if possible."""
args_ = [static_value(x) for x in args]
if any(vec is None for vec in args_):
return array_ops.concat(args, axis=0)
return [val for vec in args_ for val in vec]
def loss_fn(labels, logits, weights=None):
"""Returns the loss of using `logits` to predict `labels`."""
d = self.distribution(logits)
labels_batch_shape = labels.shape.with_rank_at_least(1)[:-1]
labels_batch_shape = (
labels_batch_shape.as_list() if labels_batch_shape.is_fully_defined()
else array_ops.shape(labels)[:-1])
labels = array_ops.reshape(
labels,
shape=concat_vectors(labels_batch_shape, d.event_shape_tensor()))
return _compute_weighted_loss(
loss_unweighted=-d.log_prob(labels),
weight=weights)
def link_fn(logits):
"""Returns the inverse link function at `logits`."""
# Note: What the API calls a "link function" is really the inverse-link
# function, i.e., the "mean".
d = self.distribution(logits)
return d.mean()
super(_DistributionRegressionHead, self).__init__(
label_dimension=label_dimension,
loss_fn=loss_fn,
link_fn=link_fn,
logits_dimension=logits_dimension,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name)
@property
def distributions(self):
"""Returns all distributions created by `DistributionRegressionHead`."""
return self._distributions
def distribution(self, logits, name=None):
"""Retrieves a distribution instance, parameterized by `logits`.
Args:
logits: `float`-like `Tensor` representing the parameters of the
underlying distribution.
name: The Python `str` name to given to this op.
Default value: "distribution".
Returns:
distribution: `tf.Distribution` instance parameterized by `logits`.
"""
with ops.name_scope(name, "distribution", [logits]):
d = self._distributions.get(logits, None)
if d is None:
d = self._make_distribution_fn(logits)
self._distributions[logits] = d
return d
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/estimator.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Statistical test assertions calibrated for their error rates.
Statistical tests have an inescapable probability of error: a correct
sampler can still fail a test by chance, and an incorrect sampler can
still pass a test by chance. This library is about bounding both of
those error rates. This requires admitting a task-specific notion of
"discrepancy": Correct code will fail rarely, code that misbehaves by
more than the discrepancy will pass rarely, and nothing reliable can
be said about code that misbehaves, but misbehaves by less than the
discrepancy.
# Example
Consider testing that the mean of a scalar probability distribution P
is some expected constant. Suppose the support of P is the interval
`[0, 1]`. Then you might do this:
```python
from tensorflow_probability.python.distributions.internal import statistical_testing
expected_mean = ...
num_samples = 5000
samples = ... draw 5000 samples from P
# Check that the mean looks right
check1 = statistical_testing.assert_true_mean_equal_by_dkwm(
samples, low=0., high=1., expected=expected_mean,
false_fail_rate=1e-6)
# Check that the difference in means detectable with 5000 samples is
# small enough
check2 = tf.compat.v1.assert_less(
statistical_testing.min_discrepancy_of_true_means_detectable_by_dkwm(
num_samples, low=0., high=1.0,
false_fail_rate=1e-6, false_pass_rate=1e-6),
0.01)
# Be sure to execute both assertion ops
sess.run([check1, check2])
```
The second assertion is an instance of experiment design. It's a
deterministic computation (independent of the code under test) that
checks that `5000` samples is enough to reliably resolve mean
differences of `0.01` or more. Here "reliably" means that if the code
under test is correct, the probability of drawing an unlucky sample
that causes this test to fail is at most 1e-6; and if the code under
test is incorrect enough that its true mean is 0.01 more or less than
expected, then the probability of drawing a "lucky" sample that causes
the test to false-pass is also at most 1e-6.
# Overview
Every function in this library can be characterized in terms of:
- The property being tested, such as the full density of the
distribution under test, or just its true mean, or a single
Bernoulli probability, etc.
- The relation being asserted, e.g., whether the mean is less, more,
or equal to the given expected value.
- The stochastic bound being relied upon, such as the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
or the CDF of the binomial distribution (for assertions about
Bernoulli probabilities).
- The number of sample sets in the statistical test. For example,
testing equality of means has a one-sample variant, where the
expected mean is given exactly, and a two-sample variant, where the
expected mean is itself given by a set of samples (e.g., from an
alternative algorithm).
- What operation(s) of the test are to be performed. Each test has
three of these:
1. `assert` executes the test. Specifically, it creates a TF op that
produces an error if it has enough evidence to prove that the
property under test is violated. These functions depend on the
desired false failure rate, because that determines the sizes of
appropriate confidence intervals, etc.
2. `min_discrepancy` computes the smallest difference reliably
detectable by that test, given the sample count and error rates.
What it's a difference of is test-specific. For example, a test
for equality of means would make detection guarantees about the
difference the true means.
3. `min_num_samples` computes the minimum number of samples needed
to reliably detect a given discrepancy with given error rates.
The latter two are for experimental design, and are meant to be
usable either interactively or inline in the overall test method.
This library follows a naming convention, to make room for every
combination of the above. A name mentions the operation first, then
the property, then the relation, then the bound, then, if the test
takes more than one set of samples, a token indicating this. For
example, `assert_true_mean_equal_by_dkwm` (which is implicitly
one-sample). Each name is a grammatically sound noun phrase (or verb
phrase, for the asserts).
# Asymptotic properties
The number of samples needed tends to scale as `O(1/discrepancy**2)` and
as `O(log(1/error_rate))`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
__all__ = [
"true_mean_confidence_interval_by_dkwm",
"assert_true_mean_equal_by_dkwm",
"min_discrepancy_of_true_means_detectable_by_dkwm",
"min_num_samples_for_dkwm_mean_test",
"assert_true_mean_in_interval_by_dkwm",
"assert_true_mean_equal_by_dkwm_two_sample",
"min_discrepancy_of_true_means_detectable_by_dkwm_two_sample",
"min_num_samples_for_dkwm_mean_two_sample_test",
]
def _batch_sort_vector(x, ascending=True, name=None):
with ops.name_scope(name, "_batch_sort_vector", [x]):
x = ops.convert_to_tensor(x, name="x")
n = array_ops.shape(x)[-1]
if ascending:
y, _ = nn_ops.top_k(-x, k=n, sorted=True)
y = -y
else:
y, _ = nn_ops.top_k(x, k=n, sorted=True)
y.set_shape(x.shape)
return y
def _do_maximum_mean(samples, envelope, high, name=None):
"""Common code between maximum_mean and minimum_mean."""
with ops.name_scope(name, "do_maximum_mean", [samples, envelope, high]):
n = array_ops.rank(samples)
# Move the batch dimension of `samples` to the rightmost position,
# where the _batch_sort_vector function wants it.
perm = array_ops.concat([math_ops.range(1, n), [0]], axis=0)
samples = array_ops.transpose(samples, perm)
samples = _batch_sort_vector(samples)
# The maximum mean is given by taking `envelope`-worth of
# probability from the smallest samples and moving it to the
# maximum value. This amounts to:
# - ignoring the smallest k samples, where `k/n < envelope`
# - taking a `1/n - (envelope - k/n)` part of the index k sample
# - taking all the other samples
# - and adding `envelope * high` at the end.
# The following is a vectorized and batched way of computing this.
# `max_mean_contrib` is a mask implementing the previous.
batch_size = array_ops.shape(samples)[-1]
batch_size = math_ops.cast(batch_size, dtype=samples.dtype.base_dtype)
step = 1. / batch_size
cum_steps = step * math_ops.range(
1, batch_size + 1, dtype=samples.dtype.base_dtype)
max_mean_contrib = clip_ops.clip_by_value(
cum_steps - envelope[..., array_ops.newaxis],
clip_value_min=0.,
clip_value_max=step)
return math_ops.reduce_sum(
samples * max_mean_contrib, axis=-1) + envelope * high
def _maximum_mean(samples, envelope, high, name=None):
"""Returns a stochastic upper bound on the mean of a scalar distribution.
The idea is that if the true CDF is within an `eps`-envelope of the
empirical CDF of the samples, and the support is bounded above, then
the mean is bounded above as well. In symbols,
```none
sup_x(|F_n(x) - F(x)|) < eps
```
The 0th dimension of `samples` is interpreted as independent and
identically distributed samples. The remaining dimensions are
broadcast together with `envelope` and `high`, and operated on
separately.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `envelope` and `high`.
envelope: Floating-point `Tensor` of sizes of admissible CDF
envelopes (i.e., the `eps` above).
high: Floating-point `Tensor` of upper bounds on the distributions'
supports. `samples <= high`.
name: A name for this operation (optional).
Returns:
bound: Floating-point `Tensor` of upper bounds on the true means.
Raises:
InvalidArgumentError: If some `sample` is found to be larger than
the corresponding `high`.
"""
with ops.name_scope(name, "maximum_mean", [samples, envelope, high]):
samples = ops.convert_to_tensor(samples, name="samples")
envelope = ops.convert_to_tensor(envelope, name="envelope")
high = ops.convert_to_tensor(high, name="high")
xmax = math_ops.reduce_max(samples, axis=[0])
msg = "Given sample maximum value exceeds expectations"
check_op = check_ops.assert_less_equal(xmax, high, message=msg)
with ops.control_dependencies([check_op]):
return array_ops.identity(_do_maximum_mean(samples, envelope, high))
def _minimum_mean(samples, envelope, low, name=None):
"""Returns a stochastic lower bound on the mean of a scalar distribution.
The idea is that if the true CDF is within an `eps`-envelope of the
empirical CDF of the samples, and the support is bounded below, then
the mean is bounded below as well. In symbols,
```none
sup_x(|F_n(x) - F(x)|) < eps
```
The 0th dimension of `samples` is interpreted as independent and
identically distributed samples. The remaining dimensions are
broadcast together with `envelope` and `low`, and operated on
separately.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `envelope` and `low`.
envelope: Floating-point `Tensor` of sizes of admissible CDF
envelopes (i.e., the `eps` above).
low: Floating-point `Tensor` of lower bounds on the distributions'
supports. `samples >= low`.
name: A name for this operation (optional).
Returns:
bound: Floating-point `Tensor` of lower bounds on the true means.
Raises:
InvalidArgumentError: If some `sample` is found to be smaller than
the corresponding `low`.
"""
with ops.name_scope(name, "minimum_mean", [samples, envelope, low]):
samples = ops.convert_to_tensor(samples, name="samples")
envelope = ops.convert_to_tensor(envelope, name="envelope")
low = ops.convert_to_tensor(low, name="low")
xmin = math_ops.reduce_min(samples, axis=[0])
msg = "Given sample minimum value falls below expectations"
check_op = check_ops.assert_greater_equal(xmin, low, message=msg)
with ops.control_dependencies([check_op]):
return - _do_maximum_mean(-samples, envelope, -low)
def _dkwm_cdf_envelope(n, error_rate, name=None):
"""Computes the CDF envelope that the DKWM inequality licenses.
The [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
gives a stochastic bound on the distance between the true cumulative
distribution function (CDF) of any distribution and its empirical
CDF. To wit, for `n` iid samples from any distribution with CDF F,
```none
P(sup_x |F_n(x) - F(x)| > eps) < 2exp(-2n eps^2)
```
This function computes the envelope size `eps` as a function of the
number of samples `n` and the desired limit on the left-hand
probability above.
Args:
n: `Tensor` of numbers of samples drawn.
error_rate: Floating-point `Tensor` of admissible rates of mistakes.
name: A name for this operation (optional).
Returns:
eps: `Tensor` of maximum distances the true CDF can be from the
empirical CDF. This scales as `O(sqrt(-log(error_rate)))` and
as `O(1 / sqrt(n))`. The shape is the broadcast of `n` and
`error_rate`.
"""
with ops.name_scope(name, "dkwm_cdf_envelope", [n, error_rate]):
n = math_ops.cast(n, dtype=error_rate.dtype)
return math_ops.sqrt(-gen_math_ops.log(error_rate / 2.) / (2. * n))
def _check_shape_dominates(samples, parameters):
"""Check that broadcasting `samples` against `parameters` does not expand it.
Why? Because I want to be very sure that the samples tensor is not
accidentally enlarged by broadcasting against tensors that are
supposed to be describing the distribution(s) sampled from, lest the
sample counts end up inflated.
Args:
samples: A `Tensor` whose shape is to be protected against broadcasting.
parameters: A list of `Tensor`s who are parameters for the statistical test.
Returns:
samples: Return original `samples` with control dependencies attached
to ensure no broadcasting.
"""
def check(t):
samples_batch_shape = array_ops.shape(samples)[1:]
broadcasted_batch_shape = array_ops.broadcast_dynamic_shape(
samples_batch_shape, array_ops.shape(t))
# This rank check ensures that I don't get a wrong answer from the
# _shapes_ broadcasting against each other.
samples_batch_ndims = array_ops.size(samples_batch_shape)
ge = check_ops.assert_greater_equal(
samples_batch_ndims, array_ops.rank(t))
eq = check_ops.assert_equal(samples_batch_shape, broadcasted_batch_shape)
return ge, eq
checks = list(itertools.chain(*[check(t) for t in parameters]))
with ops.control_dependencies(checks):
return array_ops.identity(samples)
def true_mean_confidence_interval_by_dkwm(
samples, low, high, error_rate=1e-6, name=None):
"""Computes a confidence interval for the mean of a scalar distribution.
In batch mode, computes confidence intervals for all distributions
in the batch (which need not be identically distributed).
Relies on the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
The probability (over the randomness of drawing the given samples)
that any true mean is outside the corresponding returned interval is
no more than the given `error_rate`. The size of the intervals
scale as
`O(1 / sqrt(#samples))`, as `O(high - low)`, and as `O(-log(error_rate))`.
Note that `error_rate` is a total error rate for all the confidence
intervals in the batch. As such, if the batch is nontrivial, the
error rate is not broadcast but divided (evenly) among the batch
members.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `low` and `high`.
The support is bounded: `low <= samples <= high`.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
error_rate: *Scalar* floating-point `Tensor` admissible total rate
of mistakes.
name: A name for this operation (optional).
Returns:
low: A floating-point `Tensor` of stochastic lower bounds on the
true means.
high: A floating-point `Tensor` of stochastic upper bounds on the
true means.
"""
with ops.name_scope(
name, "true_mean_confidence_interval_by_dkwm",
[samples, low, high, error_rate]):
samples = ops.convert_to_tensor(samples, name="samples")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
error_rate = ops.convert_to_tensor(error_rate, name="error_rate")
samples = _check_shape_dominates(samples, [low, high])
check_ops.assert_scalar(error_rate) # Static shape
error_rate = _itemwise_error_rate(error_rate, [low, high], samples)
n = array_ops.shape(samples)[0]
envelope = _dkwm_cdf_envelope(n, error_rate)
min_mean = _minimum_mean(samples, envelope, low)
max_mean = _maximum_mean(samples, envelope, high)
return min_mean, max_mean
def _itemwise_error_rate(
total_error_rate, param_tensors, sample_tensor=None, name=None):
with ops.name_scope(
name, "itemwise_error_rate",
[total_error_rate, param_tensors, sample_tensor]):
result_shape = [1]
for p_tensor in param_tensors:
result_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(p_tensor), result_shape)
if sample_tensor is not None:
result_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(sample_tensor)[1:], result_shape)
num_items = math_ops.reduce_prod(result_shape)
return total_error_rate / math_ops.cast(
num_items, dtype=total_error_rate.dtype)
def assert_true_mean_equal_by_dkwm(
samples, low, high, expected, false_fail_rate=1e-6, name=None):
"""Asserts the mean of the given distribution is as expected.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the true mean of some distribution from which the given samples are
drawn is _not_ the given expected mean with statistical significance
`false_fail_rate` or stronger, otherwise passes. If you also want to
check that you are gathering enough evidence that a pass is not
spurious, see `min_num_samples_for_dkwm_mean_test` and
`min_discrepancy_of_true_means_detectable_by_dkwm`.
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `low` and `high`.
The support is bounded: `low <= samples <= high`.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
expected: Floating-point `Tensor` of expected true means.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any expected mean is
outside the corresponding confidence interval.
"""
with ops.name_scope(
name, "assert_true_mean_equal_by_dkwm",
[samples, low, high, expected, false_fail_rate]):
return assert_true_mean_in_interval_by_dkwm(
samples, low, high, expected, expected, false_fail_rate)
def min_discrepancy_of_true_means_detectable_by_dkwm(
n, low, high, false_fail_rate, false_pass_rate, name=None):
"""Returns the minimum mean discrepancy that a DKWM-based test can detect.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Note that `false_fail_rate` is a total false failure rate for all
the tests in the batch. As such, if the batch is nontrivial, each
member will demand more samples. The `false_pass_rate` is also
interpreted as a total, but is treated asymmetrically: If each test
in the batch detects its corresponding discrepancy with probability
at least `1 - false_pass_rate`, then running all those tests and
failing if any one fails will jointly detect all those discrepancies
with the same `false_pass_rate`.
Args:
n: `Tensor` of numbers of samples to be drawn from the distributions
of interest.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
discr: `Tensor` of lower bounds on the distances between true
means detectable by a DKWM-based test.
For each batch member `i`, of `K` total, drawing `n[i]` samples from
some scalar distribution supported on `[low[i], high[i]]` is enough
to detect a difference in means of size `discr[i]` or more.
Specifically, we guarantee that (a) if the true mean is the expected
mean (resp. in the expected interval), then `assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will fail with
probability at most `false_fail_rate / K` (which amounts to
`false_fail_rate` if applied to the whole batch at once), and (b) if
the true mean differs from the expected mean (resp. falls outside
the expected interval) by at least `discr[i]`,
`assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will pass with
probability at most `false_pass_rate`.
The detectable discrepancy scales as
- `O(high[i] - low[i])`,
- `O(1 / sqrt(n[i]))`,
- `O(-log(false_fail_rate/K))`, and
- `O(-log(false_pass_rate))`.
"""
with ops.name_scope(
name, "min_discrepancy_of_true_means_detectable_by_dkwm",
[n, low, high, false_fail_rate, false_pass_rate]):
n = ops.convert_to_tensor(n, name="n")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
# Algorithm: Assume a true CDF F. The DKWM inequality gives a
# stochastic bound on how far the observed empirical CDF F_n can be.
# Then, using the DKWM inequality again gives a stochastic bound on
# the farthest candidate true CDF F' that
# true_mean_confidence_interval_by_dkwm might consider. At worst, these
# errors may go in the same direction, so the distance between F and
# F' is bounded by the sum.
# On batching: false fail rates sum, so I need to reduce
# the input to account for the batching. False pass rates
# max, so I don't.
sampling_envelope = _dkwm_cdf_envelope(n, false_pass_rate)
false_fail_rate = _itemwise_error_rate(false_fail_rate, [n, low, high])
analysis_envelope = _dkwm_cdf_envelope(n, false_fail_rate)
return (high - low) * (sampling_envelope + analysis_envelope)
def min_num_samples_for_dkwm_mean_test(
discrepancy, low, high,
false_fail_rate=1e-6, false_pass_rate=1e-6, name=None):
"""Returns how many samples suffice for a one-sample DKWM mean test.
To wit, returns an upper bound on the number of samples necessary to
guarantee detecting a mean difference of at least the given
`discrepancy`, with the given `false_fail_rate` and `false_pass_rate`,
using the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
on a scalar distribution supported on `[low, high]`.
Args:
discrepancy: Floating-point `Tensor` of desired upper limits on mean
differences that may go undetected with probability higher than
`1 - false_pass_rate`.
low: `Tensor` of lower bounds on the distributions' support.
high: `Tensor` of upper bounds on the distributions' support.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
n: `Tensor` of numbers of samples to be drawn from the distributions
of interest.
The `discrepancy`, `low`, and `high` tensors must have
broadcast-compatible shapes.
For each batch member `i`, of `K` total, drawing `n[i]` samples from
some scalar distribution supported on `[low[i], high[i]]` is enough
to detect a difference in means of size `discrepancy[i]` or more.
Specifically, we guarantee that (a) if the true mean is the expected
mean (resp. in the expected interval), then `assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will fail with
probability at most `false_fail_rate / K` (which amounts to
`false_fail_rate` if applied to the whole batch at once), and (b) if
the true mean differs from the expected mean (resp. falls outside
the expected interval) by at least `discrepancy[i]`,
`assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will pass with
probability at most `false_pass_rate`.
The required number of samples scales
as `O((high[i] - low[i])**2)`, `O(-log(false_fail_rate/K))`,
`O(-log(false_pass_rate))`, and `O(1 / discrepancy[i]**2)`.
"""
with ops.name_scope(
name, "min_num_samples_for_dkwm_mean_test",
[low, high, false_fail_rate, false_pass_rate, discrepancy]):
discrepancy = ops.convert_to_tensor(
discrepancy, name="discrepancy")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
# Could choose to cleverly allocate envelopes, but this is sound.
envelope1 = discrepancy / (2. * (high - low))
envelope2 = envelope1
false_fail_rate = _itemwise_error_rate(
false_fail_rate, [low, high, discrepancy])
n1 = -math_ops.log(false_fail_rate / 2.) / (2. * envelope1**2)
n2 = -math_ops.log(false_pass_rate / 2.) / (2. * envelope2**2)
return math_ops.maximum(n1, n2)
def assert_true_mean_in_interval_by_dkwm(
samples, low, high, expected_low, expected_high,
false_fail_rate=1e-6, name=None):
"""Asserts the mean of the given distribution is in the given interval.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the mean of the distribution from which the given samples are
drawn is _outside_ the given interval with statistical significance
`false_fail_rate` or stronger, otherwise passes. If you also want
to check that you are gathering enough evidence that a pass is not
spurious, see `min_num_samples_for_dkwm_mean_test` and
`min_discrepancy_of_true_means_detectable_by_dkwm`.
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `low` and `high`.
The support is bounded: `low <= samples <= high`.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
expected_low: Floating-point `Tensor` of lower bounds on the
expected true means.
expected_high: Floating-point `Tensor` of upper bounds on the
expected true means.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any expected mean
interval does not overlap with the corresponding confidence
interval.
"""
with ops.name_scope(
name, "assert_true_mean_in_interval_by_dkwm",
[samples, low, high, expected_low, expected_high, false_fail_rate]):
samples = ops.convert_to_tensor(samples, name="samples")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
expected_low = ops.convert_to_tensor(expected_low, name="expected_low")
expected_high = ops.convert_to_tensor(expected_high, name="expected_high")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
samples = _check_shape_dominates(
samples, [low, high, expected_low, expected_high])
min_mean, max_mean = true_mean_confidence_interval_by_dkwm(
samples, low, high, false_fail_rate)
# Assert that the interval [min_mean, max_mean] intersects the
# interval [expected_low, expected_high]. This is true if
# max_mean >= expected_low and min_mean <= expected_high.
# By DeMorgan's law, that's also equivalent to
# not (max_mean < expected_low or min_mean > expected_high),
# which is a way of saying the two intervals are not disjoint.
check_confidence_interval_can_intersect = check_ops.assert_greater_equal(
max_mean, expected_low, message="Confidence interval does not "
"intersect: true mean smaller than expected")
with ops.control_dependencies([check_confidence_interval_can_intersect]):
return check_ops.assert_less_equal(
min_mean, expected_high, message="Confidence interval does not "
"intersect: true mean greater than expected")
def assert_true_mean_equal_by_dkwm_two_sample(
samples1, low1, high1, samples2, low2, high2,
false_fail_rate=1e-6, name=None):
"""Asserts the means of the given distributions are equal.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the means of the distributions from which the given samples are
drawn are _not_ equal with statistical significance `false_fail_rate`
or stronger, otherwise passes. If you also want to check that you
are gathering enough evidence that a pass is not spurious, see
`min_num_samples_for_dkwm_mean_two_sample_test` and
`min_discrepancy_of_true_means_detectable_by_dkwm_two_sample`.
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples1: Floating-point `Tensor` of samples from the
distribution(s) A. Entries are assumed IID across the 0th
dimension. The other dimensions must broadcast with `low1`,
`high1`, `low2`, and `high2`.
The support is bounded: `low1 <= samples1 <= high1`.
low1: Floating-point `Tensor` of lower bounds on the supports of the
distributions A.
high1: Floating-point `Tensor` of upper bounds on the supports of
the distributions A.
samples2: Floating-point `Tensor` of samples from the
distribution(s) B. Entries are assumed IID across the 0th
dimension. The other dimensions must broadcast with `low1`,
`high1`, `low2`, and `high2`.
The support is bounded: `low2 <= samples2 <= high2`.
low2: Floating-point `Tensor` of lower bounds on the supports of the
distributions B.
high2: Floating-point `Tensor` of upper bounds on the supports of
the distributions B.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any pair of confidence
intervals true for corresponding true means do not overlap.
"""
with ops.name_scope(
name, "assert_true_mean_equal_by_dkwm_two_sample",
[samples1, low1, high1, samples2, low2, high2, false_fail_rate]):
samples1 = ops.convert_to_tensor(samples1, name="samples1")
low1 = ops.convert_to_tensor(low1, name="low1")
high1 = ops.convert_to_tensor(high1, name="high1")
samples2 = ops.convert_to_tensor(samples2, name="samples2")
low2 = ops.convert_to_tensor(low2, name="low2")
high2 = ops.convert_to_tensor(high2, name="high2")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
samples1 = _check_shape_dominates(samples1, [low1, high1])
samples2 = _check_shape_dominates(samples2, [low2, high2])
compatible_samples = check_ops.assert_equal(
array_ops.shape(samples1)[1:], array_ops.shape(samples2)[1:])
with ops.control_dependencies([compatible_samples]):
# Could in principle play games with cleverly allocating
# significance instead of the even split below. It may be possible
# to get tighter intervals, in order to obtain a higher power test.
# Any allocation strategy that depends only on the support bounds
# and sample counts should be valid; however, because the intervals
# scale as O(-log(false_fail_rate)), there doesn't seem to be much
# room to win.
min_mean_2, max_mean_2 = true_mean_confidence_interval_by_dkwm(
samples2, low2, high2, false_fail_rate / 2.)
return assert_true_mean_in_interval_by_dkwm(
samples1, low1, high1, min_mean_2, max_mean_2, false_fail_rate / 2.)
def min_discrepancy_of_true_means_detectable_by_dkwm_two_sample(
n1, low1, high1, n2, low2, high2,
false_fail_rate, false_pass_rate, name=None):
"""Returns the minimum mean discrepancy for a two-sample DKWM-based test.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Note that `false_fail_rate` is a total false failure rate for all
the tests in the batch. As such, if the batch is nontrivial, each
member will demand more samples. The `false_pass_rate` is also
interpreted as a total, but is treated asymmetrically: If each test
in the batch detects its corresponding discrepancy with probability
at least `1 - false_pass_rate`, then running all those tests and
failing if any one fails will jointly detect all those discrepancies
with the same `false_pass_rate`.
Args:
n1: `Tensor` of numbers of samples to be drawn from the distributions A.
low1: Floating-point `Tensor` of lower bounds on the supports of the
distributions A.
high1: Floating-point `Tensor` of upper bounds on the supports of
the distributions A.
n2: `Tensor` of numbers of samples to be drawn from the distributions B.
low2: Floating-point `Tensor` of lower bounds on the supports of the
distributions B.
high2: Floating-point `Tensor` of upper bounds on the supports of
the distributions B.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
discr: `Tensor` of lower bounds on the distances between true means
detectable by a two-sample DKWM-based test.
For each batch member `i`, of `K` total, drawing `n1[i]` samples
from scalar distribution A supported on `[low1[i], high1[i]]` and `n2[i]`
samples from scalar distribution B supported on `[low2[i], high2[i]]`
is enough to detect a difference in their true means of size
`discr[i]` or more. Specifically, we guarantee that (a) if their
true means are equal, `assert_true_mean_equal_by_dkwm_two_sample`
will fail with probability at most `false_fail_rate/K` (which
amounts to `false_fail_rate` if applied to the whole batch at once),
and (b) if their true means differ by at least `discr[i]`,
`assert_true_mean_equal_by_dkwm_two_sample` will pass with
probability at most `false_pass_rate`.
The detectable distribution scales as
- `O(high1[i] - low1[i])`, `O(high2[i] - low2[i])`,
- `O(1 / sqrt(n1[i]))`, `O(1 / sqrt(n2[i]))`,
- `O(-log(false_fail_rate/K))`, and
- `O(-log(false_pass_rate))`.
"""
with ops.name_scope(
name, "min_discrepancy_of_true_means_detectable_by_dkwm_two_sample",
[n1, low1, high1, n2, low2, high2, false_fail_rate, false_pass_rate]):
n1 = ops.convert_to_tensor(n1, name="n1")
low1 = ops.convert_to_tensor(low1, name="low1")
high1 = ops.convert_to_tensor(high1, name="high1")
n2 = ops.convert_to_tensor(n2, name="n2")
low2 = ops.convert_to_tensor(low2, name="low2")
high2 = ops.convert_to_tensor(high2, name="high2")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
det_disc1 = min_discrepancy_of_true_means_detectable_by_dkwm(
n1, low1, high1, false_fail_rate / 2., false_pass_rate / 2.)
det_disc2 = min_discrepancy_of_true_means_detectable_by_dkwm(
n2, low2, high2, false_fail_rate / 2., false_pass_rate / 2.)
return det_disc1 + det_disc2
def min_num_samples_for_dkwm_mean_two_sample_test(
discrepancy, low1, high1, low2, high2,
false_fail_rate=1e-6, false_pass_rate=1e-6, name=None):
"""Returns how many samples suffice for a two-sample DKWM mean test.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Args:
discrepancy: Floating-point `Tensor` of desired upper limits on mean
differences that may go undetected with probability higher than
`1 - false_pass_rate`.
low1: Floating-point `Tensor` of lower bounds on the supports of the
distributions A.
high1: Floating-point `Tensor` of upper bounds on the supports of
the distributions A.
low2: Floating-point `Tensor` of lower bounds on the supports of the
distributions B.
high2: Floating-point `Tensor` of upper bounds on the supports of
the distributions B.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
n1: `Tensor` of numbers of samples to be drawn from the distributions A.
n2: `Tensor` of numbers of samples to be drawn from the distributions B.
For each batch member `i`, of `K` total, drawing `n1[i]` samples
from scalar distribution A supported on `[low1[i], high1[i]]` and `n2[i]`
samples from scalar distribution B supported on `[low2[i], high2[i]]`
is enough to detect a difference in their true means of size
`discr[i]` or more. Specifically, we guarantee that (a) if their
true means are equal, `assert_true_mean_equal_by_dkwm_two_sample`
will fail with probability at most `false_fail_rate/K` (which
amounts to `false_fail_rate` if applied to the whole batch at once),
and (b) if their true means differ by at least `discr[i]`,
`assert_true_mean_equal_by_dkwm_two_sample` will pass with
probability at most `false_pass_rate`.
The required number of samples scales as
- `O((high1[i] - low1[i])**2)`, `O((high2[i] - low2[i])**2)`,
- `O(-log(false_fail_rate/K))`,
- `O(-log(false_pass_rate))`, and
- `O(1 / discrepancy[i]**2)`.
"""
with ops.name_scope(
name, "min_num_samples_for_dkwm_mean_two_sample_test",
[low1, high1, low2, high2,
false_fail_rate, false_pass_rate, discrepancy]):
discrepancy = ops.convert_to_tensor(discrepancy, name="discrepancy")
low1 = ops.convert_to_tensor(low1, name="low1")
high1 = ops.convert_to_tensor(high1, name="high1")
low2 = ops.convert_to_tensor(low2, name="low2")
high2 = ops.convert_to_tensor(high2, name="high2")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
# Could choose to cleverly allocate discrepancy tolerances and
# failure probabilities, but this is sound.
n1 = min_num_samples_for_dkwm_mean_test(
discrepancy / 2., low1, high1,
false_fail_rate / 2., false_pass_rate / 2.)
n2 = min_num_samples_for_dkwm_mean_test(
discrepancy / 2., low2, high2,
false_fail_rate / 2., false_pass_rate / 2.)
return n1, n2
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/statistical_testing.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distributions
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
__all__ = ["QuantizedDistribution"]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _logsum_expbig_minus_expsmall(big, small):
"""Stable evaluation of `Log[exp{big} - exp{small}]`.
To work correctly, we should have the pointwise relation: `small <= big`.
Args:
big: Floating-point `Tensor`
small: Floating-point `Tensor` with same `dtype` as `big` and broadcastable
shape.
Returns:
`Tensor` of same `dtype` of `big` and broadcast shape.
"""
with ops.name_scope("logsum_expbig_minus_expsmall", values=[small, big]):
return math_ops.log(1. - math_ops.exp(small - big)) + big
_prob_base_note = """
For whole numbers `y`,
```
P[Y = y] := P[X <= low], if y == low,
:= P[X > high - 1], y == high,
:= 0, if j < low or y > high,
:= P[y - 1 < X <= y], all other y.
```
"""
_prob_note = _prob_base_note + """
The base distribution's `cdf` method must be defined on `y - 1`. If the
base distribution has a `survival_function` method, results will be more
accurate for large values of `y`, and in this case the `survival_function` must
also be defined on `y - 1`.
"""
_log_prob_note = _prob_base_note + """
The base distribution's `log_cdf` method must be defined on `y - 1`. If the
base distribution has a `log_survival_function` method results will be more
accurate for large values of `y`, and in this case the `log_survival_function`
must also be defined on `y - 1`.
"""
_cdf_base_note = """
For whole numbers `y`,
```
cdf(y) := P[Y <= y]
= 1, if y >= high,
= 0, if y < low,
= P[X <= y], otherwise.
```
Since `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`.
This dictates that fractional `y` are first floored to a whole number, and
then above definition applies.
"""
_cdf_note = _cdf_base_note + """
The base distribution's `cdf` method must be defined on `y - 1`.
"""
_log_cdf_note = _cdf_base_note + """
The base distribution's `log_cdf` method must be defined on `y - 1`.
"""
_sf_base_note = """
For whole numbers `y`,
```
survival_function(y) := P[Y > y]
= 0, if y >= high,
= 1, if y < low,
= P[X <= y], otherwise.
```
Since `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`.
This dictates that fractional `y` are first floored to a whole number, and
then above definition applies.
"""
_sf_note = _sf_base_note + """
The base distribution's `cdf` method must be defined on `y - 1`.
"""
_log_sf_note = _sf_base_note + """
The base distribution's `log_cdf` method must be defined on `y - 1`.
"""
class QuantizedDistribution(distributions.Distribution):
"""Distribution representing the quantization `Y = ceiling(X)`.
#### Definition in Terms of Sampling
```
1. Draw X
2. Set Y <-- ceiling(X)
3. If Y < low, reset Y <-- low
4. If Y > high, reset Y <-- high
5. Return Y
```
#### Definition in Terms of the Probability Mass Function
Given scalar random variable `X`, we define a discrete random variable `Y`
supported on the integers as follows:
```
P[Y = j] := P[X <= low], if j == low,
:= P[X > high - 1], j == high,
:= 0, if j < low or j > high,
:= P[j - 1 < X <= j], all other j.
```
Conceptually, without cutoffs, the quantization process partitions the real
line `R` into half open intervals, and identifies an integer `j` with the
right endpoints:
```
R = ... (-2, -1](-1, 0](0, 1](1, 2](2, 3](3, 4] ...
j = ... -1 0 1 2 3 4 ...
```
`P[Y = j]` is the mass of `X` within the `jth` interval.
If `low = 0`, and `high = 2`, then the intervals are redrawn
and `j` is re-assigned:
```
R = (-infty, 0](0, 1](1, infty)
j = 0 1 2
```
`P[Y = j]` is still the mass of `X` within the `jth` interval.
#### Examples
We illustrate a mixture of discretized logistic distributions
[(Salimans et al., 2017)][1]. This is used, for example, for capturing 16-bit
audio in WaveNet [(van den Oord et al., 2017)][2]. The values range in
a 1-D integer domain of `[0, 2**16-1]`, and the discretization captures
`P(x - 0.5 < X <= x + 0.5)` for all `x` in the domain excluding the endpoints.
The lowest value has probability `P(X <= 0.5)` and the highest value has
probability `P(2**16 - 1.5 < X)`.
Below we assume a `wavenet` function. It takes as `input` right-shifted audio
samples of shape `[..., sequence_length]`. It returns a real-valued tensor of
shape `[..., num_mixtures * 3]`, i.e., each mixture component has a `loc` and
`scale` parameter belonging to the logistic distribution, and a `logits`
parameter determining the unnormalized probability of that component.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
net = wavenet(inputs)
loc, unconstrained_scale, logits = tf.split(net,
num_or_size_splits=3,
axis=-1)
scale = tf.nn.softplus(unconstrained_scale)
# Form mixture of discretized logistic distributions. Note we shift the
# logistic distribution by -0.5. This lets the quantization capture "rounding"
# intervals, `(x-0.5, x+0.5]`, and not "ceiling" intervals, `(x-1, x]`.
discretized_logistic_dist = tfd.QuantizedDistribution(
distribution=tfd.TransformedDistribution(
distribution=tfd.Logistic(loc=loc, scale=scale),
bijector=tfb.AffineScalar(shift=-0.5)),
low=0.,
high=2**16 - 1.)
mixture_dist = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(logits=logits),
components_distribution=discretized_logistic_dist)
neg_log_likelihood = -tf.reduce_sum(mixture_dist.log_prob(targets))
train_op = tf.train.AdamOptimizer().minimize(neg_log_likelihood)
```
After instantiating `mixture_dist`, we illustrate maximum likelihood by
calculating its log-probability of audio samples as `target` and optimizing.
#### References
[1]: Tim Salimans, Andrej Karpathy, Xi Chen, and Diederik P. Kingma.
PixelCNN++: Improving the PixelCNN with discretized logistic mixture
likelihood and other modifications.
_International Conference on Learning Representations_, 2017.
https://arxiv.org/abs/1701.05517
[2]: Aaron van den Oord et al. Parallel WaveNet: Fast High-Fidelity Speech
Synthesis. _arXiv preprint arXiv:1711.10433_, 2017.
https://arxiv.org/abs/1711.10433
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
distribution,
low=None,
high=None,
validate_args=False,
name="QuantizedDistribution"):
"""Construct a Quantized Distribution representing `Y = ceiling(X)`.
Some properties are inherited from the distribution defining `X`. Example:
`allow_nan_stats` is determined for this `QuantizedDistribution` by reading
the `distribution`.
Args:
distribution: The base distribution class to transform. Typically an
instance of `Distribution`.
low: `Tensor` with same `dtype` as this distribution and shape
able to be added to samples. Should be a whole number. Default `None`.
If provided, base distribution's `prob` should be defined at
`low`.
high: `Tensor` with same `dtype` as this distribution and shape
able to be added to samples. Should be a whole number. Default `None`.
If provided, base distribution's `prob` should be defined at
`high - 1`.
`high` must be strictly greater than `low`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: If `dist_cls` is not a subclass of
`Distribution` or continuous.
NotImplementedError: If the base distribution does not implement `cdf`.
"""
parameters = dict(locals())
values = (
list(distribution.parameters.values()) +
[low, high])
with ops.name_scope(name, values=values) as name:
self._dist = distribution
if low is not None:
low = ops.convert_to_tensor(low, name="low")
if high is not None:
high = ops.convert_to_tensor(high, name="high")
check_ops.assert_same_float_dtype(
tensors=[self.distribution, low, high])
# We let QuantizedDistribution access _graph_parents since this class is
# more like a baseclass.
graph_parents = self._dist._graph_parents # pylint: disable=protected-access
checks = []
if validate_args and low is not None and high is not None:
message = "low must be strictly less than high."
checks.append(
check_ops.assert_less(
low, high, message=message))
self._validate_args = validate_args # self._check_integer uses this.
with ops.control_dependencies(checks if validate_args else []):
if low is not None:
self._low = self._check_integer(low)
graph_parents += [self._low]
else:
self._low = None
if high is not None:
self._high = self._check_integer(high)
graph_parents += [self._high]
else:
self._high = None
super(QuantizedDistribution, self).__init__(
dtype=self._dist.dtype,
reparameterization_type=distributions.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=self._dist.allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=name)
@property
def distribution(self):
"""Base distribution, p(x)."""
return self._dist
@property
def low(self):
"""Lowest value that quantization returns."""
return self._low
@property
def high(self):
"""Highest value that quantization returns."""
return self._high
def _batch_shape_tensor(self):
return self.distribution.batch_shape_tensor()
def _batch_shape(self):
return self.distribution.batch_shape
def _event_shape_tensor(self):
return self.distribution.event_shape_tensor()
def _event_shape(self):
return self.distribution.event_shape
def _sample_n(self, n, seed=None):
low = self._low
high = self._high
with ops.name_scope("transform"):
n = ops.convert_to_tensor(n, name="n")
x_samps = self.distribution.sample(n, seed=seed)
ones = array_ops.ones_like(x_samps)
# Snap values to the intervals (j - 1, j].
result_so_far = math_ops.ceil(x_samps)
if low is not None:
result_so_far = array_ops.where(result_so_far < low,
low * ones, result_so_far)
if high is not None:
result_so_far = array_ops.where(result_so_far > high,
high * ones, result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_log_prob_note)
def _log_prob(self, y):
if not hasattr(self.distribution, "_log_cdf"):
raise NotImplementedError(
"'log_prob' not implemented unless the base distribution implements "
"'log_cdf'")
y = self._check_integer(y)
try:
return self._log_prob_with_logsf_and_logcdf(y)
except NotImplementedError:
return self._log_prob_with_logcdf(y)
def _log_prob_with_logcdf(self, y):
return _logsum_expbig_minus_expsmall(self.log_cdf(y), self.log_cdf(y - 1))
def _log_prob_with_logsf_and_logcdf(self, y):
"""Compute log_prob(y) using log survival_function and cdf together."""
# There are two options that would be equal if we had infinite precision:
# Log[ sf(y - 1) - sf(y) ]
# = Log[ exp{logsf(y - 1)} - exp{logsf(y)} ]
# Log[ cdf(y) - cdf(y - 1) ]
# = Log[ exp{logcdf(y)} - exp{logcdf(y - 1)} ]
logsf_y = self.log_survival_function(y)
logsf_y_minus_1 = self.log_survival_function(y - 1)
logcdf_y = self.log_cdf(y)
logcdf_y_minus_1 = self.log_cdf(y - 1)
# Important: Here we use select in a way such that no input is inf, this
# prevents the troublesome case where the output of select can be finite,
# but the output of grad(select) will be NaN.
# In either case, we are doing Log[ exp{big} - exp{small} ]
# We want to use the sf items precisely when we are on the right side of the
# median, which occurs when logsf_y < logcdf_y.
big = array_ops.where(logsf_y < logcdf_y, logsf_y_minus_1, logcdf_y)
small = array_ops.where(logsf_y < logcdf_y, logsf_y, logcdf_y_minus_1)
return _logsum_expbig_minus_expsmall(big, small)
@distribution_util.AppendDocstring(_prob_note)
def _prob(self, y):
if not hasattr(self.distribution, "_cdf"):
raise NotImplementedError(
"'prob' not implemented unless the base distribution implements "
"'cdf'")
y = self._check_integer(y)
try:
return self._prob_with_sf_and_cdf(y)
except NotImplementedError:
return self._prob_with_cdf(y)
def _prob_with_cdf(self, y):
return self.cdf(y) - self.cdf(y - 1)
def _prob_with_sf_and_cdf(self, y):
# There are two options that would be equal if we had infinite precision:
# sf(y - 1) - sf(y)
# cdf(y) - cdf(y - 1)
sf_y = self.survival_function(y)
sf_y_minus_1 = self.survival_function(y - 1)
cdf_y = self.cdf(y)
cdf_y_minus_1 = self.cdf(y - 1)
# sf_prob has greater precision iff we're on the right side of the median.
return array_ops.where(
sf_y < cdf_y, # True iff we're on the right side of the median.
sf_y_minus_1 - sf_y,
cdf_y - cdf_y_minus_1)
@distribution_util.AppendDocstring(_log_cdf_note)
def _log_cdf(self, y):
low = self._low
high = self._high
# Recall the promise:
# cdf(y) := P[Y <= y]
# = 1, if y >= high,
# = 0, if y < low,
# = P[X <= y], otherwise.
# P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
# between.
j = math_ops.floor(y)
result_so_far = self.distribution.log_cdf(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
neg_inf = -np.inf * array_ops.ones_like(result_so_far)
result_so_far = array_ops.where(j < low, neg_inf, result_so_far)
if high is not None:
result_so_far = array_ops.where(j >= high,
array_ops.zeros_like(result_so_far),
result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_cdf_note)
def _cdf(self, y):
low = self._low
high = self._high
# Recall the promise:
# cdf(y) := P[Y <= y]
# = 1, if y >= high,
# = 0, if y < low,
# = P[X <= y], otherwise.
# P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
# between.
j = math_ops.floor(y)
# P[X <= j], used when low < X < high.
result_so_far = self.distribution.cdf(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = array_ops.where(j < low,
array_ops.zeros_like(result_so_far),
result_so_far)
if high is not None:
result_so_far = array_ops.where(j >= high,
array_ops.ones_like(result_so_far),
result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_log_sf_note)
def _log_survival_function(self, y):
low = self._low
high = self._high
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= high,
# = 1, if y < low,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when low < X < high.
result_so_far = self.distribution.log_survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = array_ops.where(j < low,
array_ops.zeros_like(result_so_far),
result_so_far)
if high is not None:
neg_inf = -np.inf * array_ops.ones_like(result_so_far)
result_so_far = array_ops.where(j >= high, neg_inf, result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_sf_note)
def _survival_function(self, y):
low = self._low
high = self._high
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= high,
# = 1, if y < low,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when low < X < high.
result_so_far = self.distribution.survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = array_ops.where(j < low,
array_ops.ones_like(result_so_far),
result_so_far)
if high is not None:
result_so_far = array_ops.where(j >= high,
array_ops.zeros_like(result_so_far),
result_so_far)
return result_so_far
def _check_integer(self, value):
with ops.name_scope("check_integer", values=[value]):
value = ops.convert_to_tensor(value, name="value")
if not self.validate_args:
return value
dependencies = [distribution_util.assert_integer_form(
value, message="value has non-integer components.")]
return control_flow_ops.with_dependencies(dependencies, value)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/quantized_distribution.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Wishart distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"WishartCholesky",
"WishartFull",
]
class _WishartLinearOperator(distribution.Distribution):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar number of degrees of freedom `df` and
an instance of `LinearOperator`, which provides matrix-free access to a
symmetric positive definite operator, which defines the scale matrix.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(X; df, scale) = det(X)**(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / Z
Z = 2**(0.5 df k) |det(scale)|**(0.5 df) Gamma_k(0.5 df)
```
where:
* `df >= k` denotes the degrees of freedom,
* `scale` is a symmetric, positive definite, `k x k` matrix,
* `Z` is the normalizing constant, and,
* `Gamma_k` is the [multivariate Gamma function](
https://en.wikipedia.org/wiki/Multivariate_gamma_function).
#### Examples
See `WishartFull`, `WishartCholesky` for examples of initializing and using
this class.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
scale_operator,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Construct Wishart distributions.
Args:
df: `float` or `double` tensor, the degrees of freedom of the
distribution(s). `df` must be greater than or equal to `k`.
scale_operator: `float` or `double` instance of `LinearOperator`.
cholesky_input_output_matrices: Python `bool`. Any function which whose
input or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example `log_prob` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if scale is not floating-type
TypeError: if scale.dtype != df.dtype
ValueError: if df < k, where scale operator event shape is
`(k, k)`
"""
parameters = dict(locals())
self._cholesky_input_output_matrices = cholesky_input_output_matrices
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[df, scale_operator]):
if not scale_operator.dtype.is_floating:
raise TypeError(
"scale_operator.dtype=%s is not a floating-point type" %
scale_operator.dtype)
if not scale_operator.is_square:
print(scale_operator.to_dense().eval())
raise ValueError("scale_operator must be square.")
self._scale_operator = scale_operator
self._df = ops.convert_to_tensor(
df,
dtype=scale_operator.dtype,
name="df")
contrib_tensor_util.assert_same_float_dtype(
(self._df, self._scale_operator))
if (self._scale_operator.shape.ndims is None or
self._scale_operator.shape.dims[-1].value is None):
self._dimension = math_ops.cast(
self._scale_operator.domain_dimension_tensor(),
dtype=self._scale_operator.dtype, name="dimension")
else:
self._dimension = ops.convert_to_tensor(
self._scale_operator.shape.dims[-1].value,
dtype=self._scale_operator.dtype, name="dimension")
df_val = tensor_util.constant_value(self._df)
dim_val = tensor_util.constant_value(self._dimension)
if df_val is not None and dim_val is not None:
df_val = np.asarray(df_val)
if not df_val.shape:
df_val = [df_val]
if any(df_val < dim_val):
raise ValueError(
"Degrees of freedom (df = %s) cannot be less than "
"dimension of scale matrix (scale.dimension = %s)"
% (df_val, dim_val))
elif validate_args:
assertions = check_ops.assert_less_equal(
self._dimension, self._df,
message=("Degrees of freedom (df = %s) cannot be "
"less than dimension of scale matrix "
"(scale.dimension = %s)" %
(self._dimension, self._df)))
self._df = control_flow_ops.with_dependencies(
[assertions], self._df)
super(_WishartLinearOperator, self).__init__(
dtype=self._scale_operator.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
parameters=parameters,
graph_parents=([self._df, self._dimension] +
self._scale_operator.graph_parents),
name=name)
@property
def df(self):
"""Wishart distribution degree(s) of freedom."""
return self._df
def _square_scale_operator(self):
return self.scale_operator.matmul(
self.scale_operator.to_dense(), adjoint_arg=True)
def scale(self):
"""Wishart distribution scale matrix."""
if self._cholesky_input_output_matrices:
return self.scale_operator.to_dense()
else:
return self._square_scale_operator()
@property
def scale_operator(self):
"""Wishart distribution scale matrix as an Linear Operator."""
return self._scale_operator
@property
def cholesky_input_output_matrices(self):
"""Boolean indicating if `Tensor` input/outputs are Cholesky factorized."""
return self._cholesky_input_output_matrices
@property
def dimension(self):
"""Dimension of underlying vector space. The `p` in `R^(p*p)`."""
return self._dimension
def _event_shape_tensor(self):
dimension = self.scale_operator.domain_dimension_tensor()
return array_ops.stack([dimension, dimension])
def _event_shape(self):
dimension = self.scale_operator.domain_dimension
return tensor_shape.TensorShape([dimension, dimension])
def _batch_shape_tensor(self):
return self.scale_operator.batch_shape_tensor()
def _batch_shape(self):
return self.scale_operator.batch_shape
def _sample_n(self, n, seed):
batch_shape = self.batch_shape_tensor()
event_shape = self.event_shape_tensor()
batch_ndims = array_ops.shape(batch_shape)[0]
ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2
shape = array_ops.concat([[n], batch_shape, event_shape], 0)
# Complexity: O(nbk**2)
x = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed)
# Complexity: O(nbk)
# This parametrization is equivalent to Chi2, i.e.,
# ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
expanded_df = self.df * array_ops.ones(
self.scale_operator.batch_shape_tensor(),
dtype=self.df.dtype.base_dtype)
g = random_ops.random_gamma(shape=[n],
alpha=self._multi_gamma_sequence(
0.5 * expanded_df, self.dimension),
beta=0.5,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(
seed, "wishart"))
# Complexity: O(nbk**2)
x = array_ops.matrix_band_part(x, -1, 0) # Tri-lower.
# Complexity: O(nbk)
x = array_ops.matrix_set_diag(x, math_ops.sqrt(g))
# Make batch-op ready.
# Complexity: O(nbk**2)
perm = array_ops.concat([math_ops.range(1, ndims), [0]], 0)
x = array_ops.transpose(x, perm)
shape = array_ops.concat([batch_shape, [event_shape[0]], [-1]], 0)
x = array_ops.reshape(x, shape)
# Complexity: O(nbM) where M is the complexity of the operator solving a
# vector system. E.g., for LinearOperatorDiag, each matmul is O(k**2), so
# this complexity is O(nbk**2). For LinearOperatorLowerTriangular,
# each matmul is O(k^3) so this step has complexity O(nbk^3).
x = self.scale_operator.matmul(x)
# Undo make batch-op ready.
# Complexity: O(nbk**2)
shape = array_ops.concat([batch_shape, event_shape, [n]], 0)
x = array_ops.reshape(x, shape)
perm = array_ops.concat([[ndims - 1], math_ops.range(0, ndims - 1)], 0)
x = array_ops.transpose(x, perm)
if not self.cholesky_input_output_matrices:
# Complexity: O(nbk^3)
x = math_ops.matmul(x, x, adjoint_b=True)
return x
def _log_prob(self, x):
if self.cholesky_input_output_matrices:
x_sqrt = x
else:
# Complexity: O(nbk^3)
x_sqrt = linalg_ops.cholesky(x)
batch_shape = self.batch_shape_tensor()
event_shape = self.event_shape_tensor()
ndims = array_ops.rank(x_sqrt)
# sample_ndims = ndims - batch_ndims - event_ndims
sample_ndims = ndims - array_ops.shape(batch_shape)[0] - 2
sample_shape = array_ops.strided_slice(
array_ops.shape(x_sqrt), [0], [sample_ndims])
# We need to be able to pre-multiply each matrix by its corresponding
# batch scale matrix. Since a Distribution Tensor supports multiple
# samples per batch, this means we need to reshape the input matrix `x`
# so that the first b dimensions are batch dimensions and the last two
# are of shape [dimension, dimensions*number_of_samples]. Doing these
# gymnastics allows us to do a batch_solve.
#
# After we're done with sqrt_solve (the batch operation) we need to undo
# this reshaping so what we're left with is a Tensor partitionable by
# sample, batch, event dimensions.
# Complexity: O(nbk**2) since transpose must access every element.
scale_sqrt_inv_x_sqrt = x_sqrt
perm = array_ops.concat([math_ops.range(sample_ndims, ndims),
math_ops.range(0, sample_ndims)], 0)
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
shape = array_ops.concat(
(batch_shape, (math_ops.cast(
self.dimension, dtype=dtypes.int32), -1)),
0)
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
# Complexity: O(nbM*k) where M is the complexity of the operator solving
# a vector system. E.g., for LinearOperatorDiag, each solve is O(k), so
# this complexity is O(nbk**2). For LinearOperatorLowerTriangular,
# each solve is O(k**2) so this step has complexity O(nbk^3).
scale_sqrt_inv_x_sqrt = self.scale_operator.solve(
scale_sqrt_inv_x_sqrt)
# Undo make batch-op ready.
# Complexity: O(nbk**2)
shape = array_ops.concat([batch_shape, event_shape, sample_shape], 0)
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
perm = array_ops.concat([math_ops.range(ndims - sample_ndims, ndims),
math_ops.range(0, ndims - sample_ndims)], 0)
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
# Write V = SS', X = LL'. Then:
# tr[inv(V) X] = tr[inv(S)' inv(S) L L']
# = tr[inv(S) L L' inv(S)']
# = tr[(inv(S) L) (inv(S) L)']
# = sum_{ik} (inv(S) L)_{ik}**2
# The second equality follows from the cyclic permutation property.
# Complexity: O(nbk**2)
trace_scale_inv_x = math_ops.reduce_sum(
math_ops.square(scale_sqrt_inv_x_sqrt),
axis=[-2, -1])
# Complexity: O(nbk)
half_log_det_x = math_ops.reduce_sum(
math_ops.log(array_ops.matrix_diag_part(x_sqrt)),
axis=[-1])
# Complexity: O(nbk**2)
log_prob = ((self.df - self.dimension - 1.) * half_log_det_x -
0.5 * trace_scale_inv_x -
self.log_normalization())
# Set shape hints.
# Try to merge what we know from the input then what we know from the
# parameters of this distribution.
if x.get_shape().ndims is not None:
log_prob.set_shape(x.get_shape()[:-2])
if (log_prob.get_shape().ndims is not None and
self.batch_shape.ndims is not None and
self.batch_shape.ndims > 0):
log_prob.get_shape()[-self.batch_shape.ndims:].merge_with(
self.batch_shape)
return log_prob
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _entropy(self):
half_dp1 = 0.5 * self.dimension + 0.5
half_df = 0.5 * self.df
return (self.dimension * (half_df + half_dp1 * math.log(2.)) +
2 * half_dp1 * self.scale_operator.log_abs_determinant() +
self._multi_lgamma(half_df, self.dimension) +
(half_dp1 - half_df) * self._multi_digamma(half_df, self.dimension))
def _mean(self):
if self.cholesky_input_output_matrices:
return (math_ops.sqrt(self.df)
* self.scale_operator.to_dense())
return self.df * self._square_scale_operator()
def _variance(self):
x = math_ops.sqrt(self.df) * self._square_scale_operator()
d = array_ops.expand_dims(array_ops.matrix_diag_part(x), -1)
v = math_ops.square(x) + math_ops.matmul(d, d, adjoint_b=True)
if self.cholesky_input_output_matrices:
return linalg_ops.cholesky(v)
return v
def _stddev(self):
if self.cholesky_input_output_matrices:
raise ValueError(
"Computing std. dev. when is cholesky_input_output_matrices=True "
"does not make sense.")
return linalg_ops.cholesky(self.variance())
def _mode(self):
s = self.df - self.dimension - 1.
s = array_ops.where(
math_ops.less(s, 0.),
constant_op.constant(float("NaN"), dtype=self.dtype, name="nan"),
s)
if self.cholesky_input_output_matrices:
return math_ops.sqrt(s) * self.scale_operator.to_dense()
return s * self._square_scale_operator()
def mean_log_det(self, name="mean_log_det"):
"""Computes E[log(det(X))] under this Wishart distribution."""
with self._name_scope(name):
return (self._multi_digamma(0.5 * self.df, self.dimension) +
self.dimension * math.log(2.) +
2 * self.scale_operator.log_abs_determinant())
def log_normalization(self, name="log_normalization"):
"""Computes the log normalizing constant, log(Z)."""
with self._name_scope(name):
return (self.df * self.scale_operator.log_abs_determinant() +
0.5 * self.df * self.dimension * math.log(2.) +
self._multi_lgamma(0.5 * self.df, self.dimension))
def _multi_gamma_sequence(self, a, p, name="multi_gamma_sequence"):
"""Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p]."""
with self._name_scope(name, values=[a, p]):
# Linspace only takes scalars, so we'll add in the offset afterwards.
seq = math_ops.linspace(
constant_op.constant(0., dtype=self.dtype),
0.5 - 0.5 * p,
math_ops.cast(p, dtypes.int32))
return seq + array_ops.expand_dims(a, [-1])
def _multi_lgamma(self, a, p, name="multi_lgamma"):
"""Computes the log multivariate gamma function; log(Gamma_p(a))."""
with self._name_scope(name, values=[a, p]):
seq = self._multi_gamma_sequence(a, p)
return (0.25 * p * (p - 1.) * math.log(math.pi) +
math_ops.reduce_sum(math_ops.lgamma(seq),
axis=[-1]))
def _multi_digamma(self, a, p, name="multi_digamma"):
"""Computes the multivariate digamma function; Psi_p(a)."""
with self._name_scope(name, values=[a, p]):
seq = self._multi_gamma_sequence(a, p)
return math_ops.reduce_sum(math_ops.digamma(seq),
axis=[-1])
class WishartCholesky(_WishartLinearOperator):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar degrees of freedom `df` and a
lower, triangular Cholesky factor which characterizes the scale matrix.
Using WishartCholesky is a constant-time improvement over WishartFull. It
saves an O(nbk^3) operation, i.e., a matrix-product operation for sampling
and a Cholesky factorization in log_prob. For most use-cases it often saves
another O(nbk^3) operation since most uses of Wishart will also use the
Cholesky factorization.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(X; df, scale) = det(X)**(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / Z
Z = 2**(0.5 df k) |det(scale)|**(0.5 df) Gamma_k(0.5 df)
```
where:
* `df >= k` denotes the degrees of freedom,
* `scale` is a symmetric, positive definite, `k x k` matrix,
* `Z` is the normalizing constant, and,
* `Gamma_k` is the [multivariate Gamma function](
https://en.wikipedia.org/wiki/Multivariate_gamma_function).
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3x3 Wishart with Cholesky factored scale matrix and 5
# degrees-of-freedom.(*)
df = 5
chol_scale = tf.linalg.cholesky(...) # Shape is [3, 3].
dist = tfd.WishartCholesky(df=df, scale=chol_scale)
# Evaluate this on an observation in R^3, returning a scalar.
x = ... # A 3x3 positive definite matrix.
dist.prob(x) # Shape is [], a scalar.
# Evaluate this on a two observations, each in R^{3x3}, returning a length two
# Tensor.
x = [x0, x1] # Shape is [2, 3, 3].
dist.prob(x) # Shape is [2].
# Initialize two 3x3 Wisharts with Cholesky factored scale matrices.
df = [5, 4]
chol_scale = tf.linalg.cholesky(...) # Shape is [2, 3, 3].
dist = tfd.WishartCholesky(df=df, scale=chol_scale)
# Evaluate this on four observations.
x = [[x0, x1], [x2, x3]] # Shape is [2, 2, 3, 3].
dist.prob(x) # Shape is [2, 2].
# (*) - To efficiently create a trainable covariance matrix, see the example
# in tfp.distributions.matrix_diag_transform.
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
scale,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name="WishartCholesky"):
"""Construct Wishart distributions.
Args:
df: `float` or `double` `Tensor`. Degrees of freedom, must be greater than
or equal to dimension of the scale matrix.
scale: `float` or `double` `Tensor`. The Cholesky factorization of
the symmetric positive definite scale matrix of the distribution.
cholesky_input_output_matrices: Python `bool`. Any function which whose
input or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example `log_prob` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[scale]) as name:
with ops.name_scope("init", values=[scale]):
scale = ops.convert_to_tensor(scale)
if validate_args:
scale = control_flow_ops.with_dependencies([
check_ops.assert_positive(
array_ops.matrix_diag_part(scale),
message="scale must be positive definite"),
check_ops.assert_equal(
array_ops.shape(scale)[-1],
array_ops.shape(scale)[-2],
message="scale must be square")
] if validate_args else [], scale)
super(WishartCholesky, self).__init__(
df=df,
scale_operator=linalg.LinearOperatorLowerTriangular(
tril=scale,
is_non_singular=True,
is_positive_definite=True,
is_square=True),
cholesky_input_output_matrices=cholesky_input_output_matrices,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
class WishartFull(_WishartLinearOperator):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar degrees of freedom `df` and a
symmetric, positive definite scale matrix.
Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations
where `(k, k)` is the event space shape.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(X; df, scale) = det(X)**(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / Z
Z = 2**(0.5 df k) |det(scale)|**(0.5 df) Gamma_k(0.5 df)
```
where:
* `df >= k` denotes the degrees of freedom,
* `scale` is a symmetric, positive definite, `k x k` matrix,
* `Z` is the normalizing constant, and,
* `Gamma_k` is the [multivariate Gamma function](
https://en.wikipedia.org/wiki/Multivariate_gamma_function).
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3x3 Wishart with Full factored scale matrix and 5
# degrees-of-freedom.(*)
df = 5
scale = ... # Shape is [3, 3]; positive definite.
dist = tfd.WishartFull(df=df, scale=scale)
# Evaluate this on an observation in R^3, returning a scalar.
x = ... # A 3x3 positive definite matrix.
dist.prob(x) # Shape is [], a scalar.
# Evaluate this on a two observations, each in R^{3x3}, returning a length two
# Tensor.
x = [x0, x1] # Shape is [2, 3, 3].
dist.prob(x) # Shape is [2].
# Initialize two 3x3 Wisharts with Full factored scale matrices.
df = [5, 4]
scale = ... # Shape is [2, 3, 3].
dist = tfd.WishartFull(df=df, scale=scale)
# Evaluate this on four observations.
x = [[x0, x1], [x2, x3]] # Shape is [2, 2, 3, 3]; xi is positive definite.
dist.prob(x) # Shape is [2, 2].
# (*) - To efficiently create a trainable covariance matrix, see the example
# in tfd.matrix_diag_transform.
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
scale,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name="WishartFull"):
"""Construct Wishart distributions.
Args:
df: `float` or `double` `Tensor`. Degrees of freedom, must be greater than
or equal to dimension of the scale matrix.
scale: `float` or `double` `Tensor`. The symmetric positive definite
scale matrix of the distribution.
cholesky_input_output_matrices: Python `bool`. Any function which whose
input or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example `log_prob` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[scale]):
scale = ops.convert_to_tensor(scale)
if validate_args:
scale = distribution_util.assert_symmetric(scale)
chol = linalg_ops.cholesky(scale)
chol = control_flow_ops.with_dependencies([
check_ops.assert_positive(array_ops.matrix_diag_part(chol))
] if validate_args else [], chol)
super(WishartFull, self).__init__(
df=df,
scale_operator=linalg.LinearOperatorLowerTriangular(
tril=chol,
is_non_singular=True,
is_positive_definite=True,
is_square=True),
cholesky_input_output_matrices=cholesky_input_output_matrices,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/wishart.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import mvn_linear_operator as mvn_linop
from tensorflow.python.framework import ops
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"MultivariateNormalDiagPlusLowRank",
]
class MultivariateNormalDiagPlusLowRank(
mvn_linop.MultivariateNormalLinearOperator):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T` where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier ones(k)) +
scale_perturb_factor @ diag(scale_perturb_diag) @ scale_perturb_factor.T
```
where:
* `scale_diag.shape = [k]`,
* `scale_identity_multiplier.shape = []`,
* `scale_perturb_factor.shape = [k, r]`, typically `k >> r`, and,
* `scale_perturb_diag.shape = [r]`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate Gaussian with covariance `cov = S @ S.T`,
# `S = diag(d) + U @ diag(m) @ U.T`. The perturbation, `U @ diag(m) @ U.T`, is
# a rank-2 update.
mu = [-0.5., 0, 0.5] # shape: [3]
d = [1.5, 0.5, 2] # shape: [3]
U = [[1., 2],
[-1, 1],
[2, -0.5]] # shape: [3, 2]
m = [4., 5] # shape: [2]
mvn = tfd.MultivariateNormalDiagPlusLowRank(
loc=mu
scale_diag=d
scale_perturb_factor=U,
scale_perturb_diag=m)
# Evaluate this on an observation in `R^3`, returning a scalar.
mvn.prob([-1, 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Gaussians; `S = diag(d) + U @ U.T`.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [b, k] = [2, 3]
U = [[[1., 2],
[3, 4],
[5, 6]],
[[0.5, 0.75],
[1,0, 0.25],
[1.5, 1.25]]] # shape: [b, k, r] = [2, 3, 2]
m = [[0.1, 0.2],
[0.4, 0.5]] # shape: [b, r] = [2, 2]
mvn = tfd.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_perturb_factor=U,
scale_perturb_diag=m)
mvn.covariance().eval() # shape: [2, 3, 3]
# ==> [[[ 15.63 31.57 48.51]
# [ 31.57 69.31 105.05]
# [ 48.51 105.05 162.59]]
#
# [[ 2.59 1.41 3.35]
# [ 1.41 2.71 3.34]
# [ 3.35 3.34 8.35]]]
# Compute the pdf of two `R^3` observations (one from each batch);
# return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagPlusLowRank"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`. A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier ones(k)) +
scale_perturb_factor @ diag(scale_perturb_diag) @ scale_perturb_factor.T
```
where:
* `scale_diag.shape = [k]`,
* `scale_identity_multiplier.shape = []`,
* `scale_perturb_factor.shape = [k, r]`, typically `k >> r`, and,
* `scale_perturb_diag.shape = [r]`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scaled-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scaled
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale` is
the `Identity`.
scale_perturb_factor: Floating-point `Tensor` representing a rank-`r`
perturbation added to `scale`. May have shape `[B1, ..., Bb, k, r]`,
`b >= 0`, and characterizes `b`-batches of rank-`r` updates to `scale`.
When `None`, no rank-`r` update is added to `scale`.
scale_perturb_diag: Floating-point `Tensor` representing a diagonal matrix
inside the rank-`r` perturbation added to `scale`. May have shape
`[B1, ..., Bb, r]`, `b >= 0`, and characterizes `b`-batches of `r x r`
diagonal matrices inside the perturbation added to `scale`. When
`None`, an identity matrix is used inside the perturbation. Can only be
specified if `scale_perturb_factor` is also specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = dict(locals())
def _convert_to_tensor(x, name):
return None if x is None else ops.convert_to_tensor(x, name=name)
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier, scale_perturb_factor,
scale_perturb_diag]):
has_low_rank = (scale_perturb_factor is not None or
scale_perturb_diag is not None)
scale = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=validate_args,
assert_positive=has_low_rank)
scale_perturb_factor = _convert_to_tensor(
scale_perturb_factor,
name="scale_perturb_factor")
scale_perturb_diag = _convert_to_tensor(
scale_perturb_diag,
name="scale_perturb_diag")
if has_low_rank:
scale = linalg.LinearOperatorLowRankUpdate(
scale,
u=scale_perturb_factor,
diag_update=scale_perturb_diag,
is_diag_update_positive=scale_perturb_diag is None,
is_non_singular=True, # Implied by is_positive_definite=True.
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
super(MultivariateNormalDiagPlusLowRank, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/mvn_diag_plus_low_rank.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.ops.distributions import distribution as distribution_lib
# The following two lines are redundant, in a sense. The first enables
# good coding practice *within* this file (`util.prefer_static_value`
# rather than `prefer_static_value`). The second ensures that users
# also get the core utils when they import this file.
from tensorflow.python.ops.distributions import util
from tensorflow.python.ops.distributions.util import * # pylint: disable=wildcard-import
def _convert_to_tensor(x, name):
return None if x is None else ops.convert_to_tensor(x, name=name)
def mixture_stddev(mixture_weight_vector, mean_vector, stddev_vector):
"""Computes the standard deviation of a mixture distribution.
This function works regardless of the component distribution, so long as
each component's mean and standard deviation can be provided.
Args:
mixture_weight_vector: A 2D tensor with shape [batch_size, num_components]
mean_vector: A 2D tensor of mixture component means. Has shape
`[batch_size, num_components]`.
stddev_vector: A 2D tensor of mixture component standard deviations. Has
shape `[batch_size, num_components]`.
Returns:
A 1D tensor of shape `[batch_size]` representing the standard deviation of
the mixture distribution with given weights and component means and standard
deviations.
Raises:
ValueError: If the shapes of the input tensors are not as expected.
"""
mixture_weight_vector.shape.assert_has_rank(2)
if not mean_vector.shape.is_compatible_with(mixture_weight_vector.shape):
raise ValueError("Expecting means to have same shape as mixture weights.")
if not stddev_vector.shape.is_compatible_with(mixture_weight_vector.shape):
raise ValueError("Expecting stddevs to have same shape as mixture weights.")
# Reshape the distribution parameters for batched vectorized dot products.
pi_for_dot_prod = array_ops.expand_dims(mixture_weight_vector, axis=1)
mu_for_dot_prod = array_ops.expand_dims(mean_vector, axis=2)
sigma_for_dot_prod = array_ops.expand_dims(stddev_vector, axis=2)
# weighted average of component means under mixture distribution.
mean_wa = math_ops.matmul(pi_for_dot_prod, mu_for_dot_prod)
mean_wa = array_ops.reshape(mean_wa, (-1,))
# weighted average of component variances under mixture distribution.
var_wa = math_ops.matmul(pi_for_dot_prod,
math_ops.square(sigma_for_dot_prod))
var_wa = array_ops.reshape(var_wa, (-1,))
# weighted average of component squared means under mixture distribution.
sq_mean_wa = math_ops.matmul(pi_for_dot_prod,
math_ops.square(mu_for_dot_prod))
sq_mean_wa = array_ops.reshape(sq_mean_wa, (-1,))
mixture_variance = var_wa + sq_mean_wa - math_ops.square(mean_wa)
return math_ops.sqrt(mixture_variance)
def make_tril_scale(
loc=None,
scale_tril=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None):
"""Creates a LinOp representing a lower triangular matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to the LinOp.
The upper triangular elements above the diagonal are ignored.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to the LinOp.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
assert_positive: Python `bool` indicating whether LinOp should be checked
for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
array_ops.matrix_diag_part(x),
message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
check_ops.assert_none_equal(
array_ops.matrix_diag_part(x),
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)
with ops.name_scope(name, "make_tril_scale",
values=[loc, scale_diag, scale_identity_multiplier]):
loc = _convert_to_tensor(loc, name="loc")
scale_tril = _convert_to_tensor(scale_tril, name="scale_tril")
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag")
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier")
if scale_tril is not None:
scale_tril = array_ops.matrix_band_part(scale_tril, -1, 0) # Zero out TriU.
tril_diag = array_ops.matrix_diag_part(scale_tril)
if scale_diag is not None:
tril_diag += scale_diag
if scale_identity_multiplier is not None:
tril_diag += scale_identity_multiplier[..., array_ops.newaxis]
scale_tril = array_ops.matrix_set_diag(scale_tril, tril_diag)
return linalg.LinearOperatorLowerTriangular(
tril=_maybe_attach_assertion(scale_tril),
is_non_singular=True,
is_self_adjoint=False,
is_positive_definite=assert_positive)
return make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
shape_hint=shape_hint,
validate_args=validate_args,
assert_positive=assert_positive,
name=name)
def make_diag_scale(
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None):
"""Creates a LinOp representing a diagonal matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to the LinOp.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
assert_positive: Python `bool` indicating whether LinOp should be checked
for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
x, message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
check_ops.assert_none_equal(
x,
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero")], x)
with ops.name_scope(name, "make_diag_scale",
values=[loc, scale_diag, scale_identity_multiplier]):
loc = _convert_to_tensor(loc, name="loc")
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag")
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier")
if scale_diag is not None:
if scale_identity_multiplier is not None:
scale_diag += scale_identity_multiplier[..., array_ops.newaxis]
return linalg.LinearOperatorDiag(
diag=_maybe_attach_assertion(scale_diag),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive)
if loc is None and shape_hint is None:
raise ValueError(
"Cannot infer `event_shape` unless `loc` or "
"`shape_hint` is specified.")
if shape_hint is None:
shape_hint = loc.shape[-1]
if scale_identity_multiplier is None:
return linalg.LinearOperatorIdentity(
num_rows=shape_hint,
dtype=loc.dtype.base_dtype,
is_self_adjoint=True,
is_positive_definite=True,
assert_proper_shapes=validate_args)
return linalg.LinearOperatorScaledIdentity(
num_rows=shape_hint,
multiplier=_maybe_attach_assertion(scale_identity_multiplier),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive,
assert_proper_shapes=validate_args)
def shapes_from_loc_and_scale(loc, scale, name="shapes_from_loc_and_scale"):
"""Infer distribution batch and event shapes from a location and scale.
Location and scale family distributions determine their batch/event shape by
broadcasting the `loc` and `scale` args. This helper does that broadcast,
statically if possible.
Batch shape broadcasts as per the normal rules.
We allow the `loc` event shape to broadcast up to that of `scale`. We do not
allow `scale`'s event shape to change. Therefore, the last dimension of `loc`
must either be size `1`, or the same as `scale.range_dimension`.
See `MultivariateNormalLinearOperator` for a usage example.
Args:
loc: `N-D` `Tensor` with `N >= 1` (already converted to tensor) or `None`.
If `None`, both batch and event shape are determined by `scale`.
scale: A `LinearOperator` instance.
name: A string name to prepend to created ops.
Returns:
batch_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
event_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
Raises:
ValueError: If the last dimension of `loc` is determined statically to be
different than the range of `scale`.
"""
with ops.name_scope(name, values=[loc] + scale.graph_parents):
# Get event shape.
event_size = scale.range_dimension_tensor()
event_size_const = tensor_util.constant_value(event_size)
if event_size_const is not None:
event_shape = event_size_const.reshape([1])
else:
event_shape = event_size[array_ops.newaxis]
# Static check that event shapes match.
if loc is not None:
loc_event_size = tensor_shape.dimension_value(loc.get_shape()[-1])
if loc_event_size is not None and event_size_const is not None:
if loc_event_size != 1 and loc_event_size != event_size_const:
raise ValueError(
"Event size of 'scale' (%d) could not be broadcast up to that of "
"'loc' (%d)." % (loc_event_size, event_size_const))
# Get batch shape.
batch_shape = scale.batch_shape_tensor()
if loc is None:
batch_shape_const = tensor_util.constant_value(batch_shape)
batch_shape = (
batch_shape_const if batch_shape_const is not None else batch_shape)
else:
loc_batch_shape = loc.get_shape().with_rank_at_least(1)[:-1]
if (loc.get_shape().ndims is None or
not loc_batch_shape.is_fully_defined()):
loc_batch_shape = array_ops.shape(loc)[:-1]
else:
loc_batch_shape = ops.convert_to_tensor(loc_batch_shape,
name="loc_batch_shape")
# This is defined in the core util module.
# pylint: disable=undefined-variable
batch_shape = prefer_static_broadcast_shape(batch_shape, loc_batch_shape)
# pylint: enable=undefined-variable
return batch_shape, event_shape
def get_broadcast_shape(*tensors):
"""Get broadcast shape as a Python list of integers (preferred) or `Tensor`.
Args:
*tensors: One or more `Tensor` objects (already converted!).
Returns:
broadcast shape: Python list (if shapes determined statically), otherwise
an `int32` `Tensor`.
"""
# Try static.
s_shape = tensors[0].shape
for t in tensors[1:]:
s_shape = array_ops.broadcast_static_shape(s_shape, t.shape)
if s_shape.is_fully_defined():
return s_shape.as_list()
# Fallback on dynamic.
d_shape = array_ops.shape(tensors[0])
for t in tensors[1:]:
d_shape = array_ops.broadcast_dynamic_shape(d_shape, array_ops.shape(t))
return d_shape
def is_diagonal_scale(scale):
"""Returns `True` if `scale` is a `LinearOperator` that is known to be diag.
Args:
scale: `LinearOperator` instance.
Returns:
Python `bool`.
Raises:
TypeError: If `scale` is not a `LinearOperator`.
"""
if not isinstance(scale, linalg.LinearOperator):
raise TypeError("Expected argument 'scale' to be instance of LinearOperator"
". Found: %s" % scale)
return (isinstance(scale, linalg.LinearOperatorIdentity) or
isinstance(scale, linalg.LinearOperatorScaledIdentity) or
isinstance(scale, linalg.LinearOperatorDiag))
def maybe_check_scalar_distribution(
distribution, expected_base_dtype, validate_args):
"""Helper which checks validity of a scalar `distribution` init arg.
Valid here means:
* `distribution` has scalar batch and event shapes.
* `distribution` is `FULLY_REPARAMETERIZED`
* `distribution` has expected dtype.
Args:
distribution: `Distribution`-like object.
expected_base_dtype: `TensorFlow` `dtype`.
validate_args: Python `bool`. Whether to do additional checks:
(i) check that reparameterization_type is `FULLY_REPARAMETERIZED`.
(ii) add `tf.Assert` ops to the graph to enforce that distribution
is scalar in the event that this cannot be determined statically.
Returns:
List of `tf.Assert` ops to run to enforce validity checks that could not
be statically determined. Empty if `not validate_args`.
Raises:
ValueError: If validate_args and distribution is not FULLY_REPARAMETERIZED
ValueError: If distribution is statically determined to not have both
scalar batch and scalar event shapes.
"""
if distribution.dtype != expected_base_dtype:
raise TypeError("dtype mismatch; "
"distribution.dtype=\"{}\" is not \"{}\"".format(
distribution.dtype.name, expected_base_dtype.name))
# Although `reparameterization_type` is a static property, we guard it by
# `validate_args`. This allows users to use a `distribution` which is not
# reparameterized itself. However, we tacitly assume that although the
# distribution is not reparameterized, it only depends on non-trainable
# variables.
if validate_args and (distribution.reparameterization_type
!= distribution_lib.FULLY_REPARAMETERIZED):
raise ValueError("Base distribution should be reparameterized or be "
"a function of non-trainable variables; "
"distribution.reparameterization_type = \"{}\" "
"!= \"FULLY_REPARAMETERIZED\".".format(
distribution.reparameterization_type))
with ops.name_scope(name="check_distribution"):
assertions = []
def check_is_scalar(is_scalar, name):
is_scalar_ = static_value(is_scalar)
if is_scalar_ is not None:
if not is_scalar_:
raise ValueError("distribution must be scalar; "
"distribution.{}=False is not True".format(name))
elif validate_args:
assertions.append(check_ops.assert_equal(
is_scalar, True,
message=("distribution must be scalar; "
"distribution.{}=False is not True".format(name))))
check_is_scalar(distribution.is_scalar_event(), "is_scalar_event")
check_is_scalar(distribution.is_scalar_batch(), "is_scalar_batch")
return assertions
def pad_mixture_dimensions(x, mixture_distribution, categorical_distribution,
event_ndims):
"""Pad dimensions of event tensors for mixture distributions.
See `Mixture._sample_n` and `MixtureSameFamily._sample_n` for usage examples.
Args:
x: event tensor to pad.
mixture_distribution: Base distribution of the mixture.
categorical_distribution: `Categorical` distribution that mixes the base
distribution.
event_ndims: Integer specifying the number of event dimensions in the event
tensor.
Returns:
A padded version of `x` that can broadcast with `categorical_distribution`.
"""
with ops.name_scope("pad_mix_dims", values=[x]):
def _get_ndims(d):
if d.batch_shape.ndims is not None:
return d.batch_shape.ndims
return array_ops.shape(d.batch_shape_tensor())[0]
dist_batch_ndims = _get_ndims(mixture_distribution)
cat_batch_ndims = _get_ndims(categorical_distribution)
pad_ndims = array_ops.where(
categorical_distribution.is_scalar_batch(),
dist_batch_ndims,
dist_batch_ndims - cat_batch_ndims)
s = array_ops.shape(x)
x = array_ops.reshape(x, shape=array_ops.concat([
s[:-1],
array_ops.ones([pad_ndims], dtype=dtypes.int32),
s[-1:],
array_ops.ones([event_ndims], dtype=dtypes.int32),
], axis=0))
return x
def static_value(x):
"""Returns the static value of a `Tensor` or `None`."""
return tensor_util.constant_value(ops.convert_to_tensor(x))
def move_dimension(x, source_idx, dest_idx):
"""Move a single tensor dimension within its shape.
This is a special case of `tf.transpose()`, which applies
arbitrary permutations to tensor dimensions.
Args:
x: Tensor of rank `ndims`.
source_idx: Integer index into `x.shape` (negative indexing is
supported).
dest_idx: Integer index into `x.shape` (negative indexing is
supported).
Returns:
x_perm: Tensor of rank `ndims`, in which the dimension at original
index `source_idx` has been moved to new index `dest_idx`, with
all other dimensions retained in their original order.
Example:
```python
x = tf.compat.v1.placeholder(shape=[200, 30, 4, 1, 6])
x_perm = _move_dimension(x, 1, 1) # no-op
x_perm = _move_dimension(x, 0, 3) # result shape [30, 4, 1, 200, 6]
x_perm = _move_dimension(x, 0, -2) # equivalent to previous
x_perm = _move_dimension(x, 4, 2) # result shape [200, 30, 6, 4, 1]
```
"""
ndims = util.prefer_static_rank(x)
if isinstance(source_idx, int):
dtype = dtypes.int32
else:
dtype = dtypes.as_dtype(source_idx.dtype)
# Handle negative indexing. Since ndims might be dynamic, this makes
# source_idx and dest_idx also possibly dynamic.
if source_idx < 0:
source_idx = ndims + source_idx
if dest_idx < 0:
dest_idx = ndims + dest_idx
# Construct the appropriate permutation of dimensions, depending
# whether the source is before or after the destination.
def move_left_permutation():
return util.prefer_static_value(
array_ops.concat([
math_ops.range(0, dest_idx, dtype=dtype),
[source_idx],
math_ops.range(dest_idx, source_idx, dtype=dtype),
math_ops.range(source_idx+1, ndims, dtype=dtype)], axis=0))
def move_right_permutation():
return util.prefer_static_value(
array_ops.concat([
math_ops.range(0, source_idx, dtype=dtype),
math_ops.range(source_idx+1, dest_idx+1, dtype=dtype),
[source_idx],
math_ops.range(dest_idx+1, ndims, dtype=dtype)], axis=0))
def x_permuted():
return array_ops.transpose(
x, perm=smart_cond.smart_cond(source_idx < dest_idx,
move_right_permutation,
move_left_permutation))
# One final conditional to handle the special case where source
# and destination indices are equal.
return smart_cond.smart_cond(math_ops.equal(source_idx, dest_idx),
lambda: x,
x_permuted)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/distribution_util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The BatchReshape distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.util import deprecation
__all__ = [
"BatchReshape",
]
class BatchReshape(distribution_lib.Distribution):
"""The Batch-Reshaping distribution.
This "meta-distribution" reshapes the batch dimensions of another
distribution.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
dtype = np.float32
dims = 2
new_batch_shape = [1, 2, -1]
old_batch_shape = [6]
scale = np.ones(old_batch_shape + [dims], dtype)
mvn = tfd.MultivariateNormalDiag(scale_diag=scale)
reshape_mvn = tfd.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape,
validate_args=True)
reshape_mvn.batch_shape
# ==> [1, 2, 3]
x = reshape_mvn.sample(sample_shape=[4, 5])
x.shape
# ==> [4, 5, 1, 2, 3, 2] == sample_shape + new_batch_shape + [dims]
reshape_mvn.log_prob(x).shape
# ==> [4, 5, 1, 2, 3] == sample_shape + new_batch_shape
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
distribution,
batch_shape,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Construct BatchReshape distribution.
Args:
distribution: The base distribution instance to reshape. Typically an
instance of `Distribution`.
batch_shape: Positive `int`-like vector-shaped `Tensor` representing
the new shape of the batch dimensions. Up to one dimension may contain
`-1`, meaning the remainder of the batch size.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: The name to give Ops created by the initializer.
Default value: `"BatchReshape" + distribution.name`.
Raises:
ValueError: if `batch_shape` is not a vector.
ValueError: if `batch_shape` has non-positive elements.
ValueError: if `batch_shape` size is not the same as a
`distribution.batch_shape` size.
"""
parameters = dict(locals())
name = name or "BatchReshape" + distribution.name
with ops.name_scope(name, values=[batch_shape]) as name:
# The unexpanded batch shape may contain up to one dimension of -1.
self._batch_shape_unexpanded = ops.convert_to_tensor(
batch_shape, dtype=dtypes.int32, name="batch_shape")
validate_init_args_statically(distribution, self._batch_shape_unexpanded)
batch_shape, batch_shape_static, runtime_assertions = calculate_reshape(
distribution.batch_shape_tensor(), self._batch_shape_unexpanded,
validate_args)
self._distribution = distribution
self._batch_shape_ = batch_shape
self._batch_shape_static = batch_shape_static
self._runtime_assertions = runtime_assertions
super(BatchReshape, self).__init__(
dtype=distribution.dtype,
reparameterization_type=distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=(
[self._batch_shape_unexpanded] + distribution._graph_parents), # pylint: disable=protected-access
name=name)
@property
def distribution(self):
return self._distribution
def _batch_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return array_ops.identity(self._batch_shape_)
def _batch_shape(self):
return self._batch_shape_static
def _event_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return array_ops.identity(self.distribution.event_shape_tensor())
def _event_shape(self):
return self.distribution.event_shape
def _sample_n(self, n, seed=None):
with ops.control_dependencies(self._runtime_assertions):
x = self.distribution.sample(sample_shape=n, seed=seed)
new_shape = array_ops.concat(
[
[n],
self._batch_shape_unexpanded,
self.event_shape_tensor(),
],
axis=0)
return array_ops.reshape(x, new_shape)
def _log_prob(self, x):
return self._call_reshape_input_output(
self.distribution.log_prob, x)
def _prob(self, x):
return self._call_reshape_input_output(
self.distribution.prob, x)
def _log_cdf(self, x):
return self._call_reshape_input_output(
self.distribution.log_cdf, x)
def _cdf(self, x):
return self._call_reshape_input_output(
self.distribution.cdf, x)
def _log_survival_function(self, x):
return self._call_reshape_input_output(
self.distribution.log_survival_function, x)
def _survival_function(self, x):
return self._call_reshape_input_output(
self.distribution.survival_function, x)
def _entropy(self):
return self._call_and_reshape_output(
self.distribution.entropy,
[],
[tensor_shape.scalar()])
def _mean(self):
return self._call_and_reshape_output(self.distribution.mean)
def _mode(self):
return self._call_and_reshape_output(self.distribution.mode)
def _stddev(self):
return self._call_and_reshape_output(self.distribution.stddev)
def _variance(self):
return self._call_and_reshape_output(self.distribution.variance)
def _covariance(self):
return self._call_and_reshape_output(
self.distribution.covariance,
[self.event_shape_tensor()]*2,
[self.event_shape]*2)
def _sample_shape(self, x):
"""Computes graph and static `sample_shape`."""
x_ndims = (array_ops.rank(x) if x.shape.ndims is None else x.shape.ndims)
event_ndims = (array_ops.size(self.event_shape_tensor())
if self.event_shape.ndims is None
else self.event_shape.ndims)
batch_ndims = (
array_ops.size(self._batch_shape_unexpanded)
if self.batch_shape.ndims is None else self.batch_shape.ndims)
sample_ndims = x_ndims - batch_ndims - event_ndims
if isinstance(sample_ndims, int):
static_sample_shape = x.shape[:sample_ndims]
else:
static_sample_shape = tensor_shape.TensorShape(None)
if static_sample_shape.is_fully_defined():
sample_shape = np.int32(static_sample_shape.as_list())
else:
sample_shape = array_ops.shape(x)[:sample_ndims]
return sample_shape, static_sample_shape
def _call_reshape_input_output(self, fn, x):
"""Calls `fn`, appropriately reshaping its input `x` and output."""
with ops.control_dependencies(
self._runtime_assertions + self._validate_sample_arg(x)):
sample_shape, static_sample_shape = self._sample_shape(x)
old_shape = array_ops.concat([
sample_shape,
self.distribution.batch_shape_tensor(),
self.event_shape_tensor(),
], axis=0)
result = fn(array_ops.reshape(x, old_shape))
new_shape = array_ops.concat(
[
sample_shape,
self._batch_shape_unexpanded,
], axis=0)
result = array_ops.reshape(result, new_shape)
if (static_sample_shape.ndims is not None and
self.batch_shape.ndims is not None):
new_shape = static_sample_shape.concatenate(self.batch_shape)
result.set_shape(result.shape.merge_with(new_shape))
return result
def _call_and_reshape_output(
self,
fn,
event_shape_list=None,
static_event_shape_list=None):
"""Calls `fn` and appropriately reshapes its output."""
with ops.control_dependencies(self._runtime_assertions):
if event_shape_list is None:
event_shape_list = [self._event_shape_tensor()]
if static_event_shape_list is None:
static_event_shape_list = [self.event_shape]
new_shape = array_ops.concat(
[self._batch_shape_unexpanded] + event_shape_list, axis=0)
result = array_ops.reshape(fn(), new_shape)
if (self.batch_shape.ndims is not None and
self.event_shape.ndims is not None):
event_shape = tensor_shape.TensorShape([])
for rss in static_event_shape_list:
event_shape = event_shape.concatenate(rss)
static_shape = result.shape.merge_with(
self.batch_shape.concatenate(event_shape))
result.set_shape(static_shape)
return result
def _validate_sample_arg(self, x):
"""Helper which validates sample arg, e.g., input to `log_prob`."""
with ops.name_scope(name="validate_sample_arg", values=[x]):
x_ndims = (array_ops.rank(x) if x.shape.ndims is None else x.shape.ndims)
event_ndims = (array_ops.size(self.event_shape_tensor())
if self.event_shape.ndims is None
else self.event_shape.ndims)
batch_ndims = (
array_ops.size(self._batch_shape_unexpanded)
if self.batch_shape.ndims is None else self.batch_shape.ndims)
expected_batch_event_ndims = batch_ndims + event_ndims
if (isinstance(x_ndims, int) and
isinstance(expected_batch_event_ndims, int)):
if x_ndims < expected_batch_event_ndims:
raise NotImplementedError(
"Broadcasting is not supported; too few batch and event dims "
"(expected at least {}, saw {}).".format(
expected_batch_event_ndims, x_ndims))
ndims_assertion = []
elif self.validate_args:
ndims_assertion = [
check_ops.assert_greater_equal(
x_ndims,
expected_batch_event_ndims,
message=("Broadcasting is not supported; too few "
"batch and event dims."),
name="assert_batch_and_event_ndims_large_enough"),
]
if (self.batch_shape.is_fully_defined() and
self.event_shape.is_fully_defined()):
expected_batch_event_shape = np.int32(self.batch_shape.concatenate(
self.event_shape).as_list())
else:
expected_batch_event_shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], axis=0)
sample_ndims = x_ndims - expected_batch_event_ndims
if isinstance(sample_ndims, int):
sample_ndims = max(sample_ndims, 0)
if (isinstance(sample_ndims, int) and
x.shape[sample_ndims:].is_fully_defined()):
actual_batch_event_shape = np.int32(x.shape[sample_ndims:].as_list())
else:
sample_ndims = math_ops.maximum(sample_ndims, 0)
actual_batch_event_shape = array_ops.shape(x)[sample_ndims:]
if (isinstance(expected_batch_event_shape, np.ndarray) and
isinstance(actual_batch_event_shape, np.ndarray)):
if any(expected_batch_event_shape != actual_batch_event_shape):
raise NotImplementedError("Broadcasting is not supported; "
"unexpected batch and event shape "
"(expected {}, saw {}).".format(
expected_batch_event_shape,
actual_batch_event_shape))
# We need to set the final runtime-assertions to `ndims_assertion` since
# its possible this assertion was created. We could add a condition to
# only do so if `self.validate_args == True`, however this is redundant
# as `ndims_assertion` already encodes this information.
runtime_assertions = ndims_assertion
elif self.validate_args:
# We need to make the `ndims_assertion` a control dep because otherwise
# TF itself might raise an exception owing to this assertion being
# ill-defined, ie, one cannot even compare different rank Tensors.
with ops.control_dependencies(ndims_assertion):
shape_assertion = check_ops.assert_equal(
expected_batch_event_shape,
actual_batch_event_shape,
message=("Broadcasting is not supported; "
"unexpected batch and event shape."),
name="assert_batch_and_event_shape_same")
runtime_assertions = [shape_assertion]
else:
runtime_assertions = []
return runtime_assertions
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def calculate_reshape(original_shape, new_shape, validate=False, name=None):
"""Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
if batch_shape_static.is_fully_defined():
return np.int32(batch_shape_static.as_list()), batch_shape_static, []
with ops.name_scope(name, "calculate_reshape", [original_shape, new_shape]):
original_size = math_ops.reduce_prod(original_shape)
implicit_dim = math_ops.equal(new_shape, -1)
size_implicit_dim = (
original_size // math_ops.maximum(1, -math_ops.reduce_prod(new_shape)))
new_ndims = array_ops.shape(new_shape)
expanded_new_shape = array_ops.where( # Assumes exactly one `-1`.
implicit_dim, array_ops.fill(new_ndims, size_implicit_dim), new_shape)
validations = [] if not validate else [
check_ops.assert_rank(
original_shape, 1, message="Original shape must be a vector."),
check_ops.assert_rank(
new_shape, 1, message="New shape must be a vector."),
check_ops.assert_less_equal(
math_ops.count_nonzero(implicit_dim, dtype=dtypes.int32),
1,
message="At most one dimension can be unknown."),
check_ops.assert_positive(
expanded_new_shape, message="Shape elements must be >=-1."),
check_ops.assert_equal(
math_ops.reduce_prod(expanded_new_shape),
original_size,
message="Shape sizes do not match."),
]
return expanded_new_shape, batch_shape_static, validations
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def validate_init_args_statically(distribution, batch_shape):
"""Helper to __init__ which makes or raises assertions."""
if batch_shape.shape.ndims is not None:
if batch_shape.shape.ndims != 1:
raise ValueError("`batch_shape` must be a vector "
"(saw rank: {}).".format(batch_shape.shape.ndims))
batch_shape_static = tensor_util.constant_value_as_shape(batch_shape)
batch_size_static = batch_shape_static.num_elements()
dist_batch_size_static = distribution.batch_shape.num_elements()
if batch_size_static is not None and dist_batch_size_static is not None:
if batch_size_static != dist_batch_size_static:
raise ValueError("`batch_shape` size ({}) must match "
"`distribution.batch_shape` size ({}).".format(
batch_size_static, dist_batch_size_static))
if batch_shape_static.dims is not None:
if any(
dim.value is not None and
dim.value < 1 for dim in batch_shape_static.dims):
raise ValueError("`batch_shape` elements must be >=-1.")
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/batch_reshape.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Chi2 distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import gamma
from tensorflow.python.util import deprecation
__all__ = [
"Chi2",
"Chi2WithAbsDf",
]
class Chi2(gamma.Gamma):
"""Chi2 distribution.
The Chi2 distribution is defined over positive real numbers using a degrees of
freedom ("df") parameter.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; df, x > 0) = x**(0.5 df - 1) exp(-0.5 x) / Z
Z = 2**(0.5 df) Gamma(0.5 df)
```
where:
* `df` denotes the degrees of freedom,
* `Z` is the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The Chi2 distribution is a special case of the Gamma distribution, i.e.,
```python
Chi2(df) = Gamma(concentration=0.5 * df, rate=0.5)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
validate_args=False,
allow_nan_stats=True,
name="Chi2"):
"""Construct Chi2 distributions with parameter `df`.
Args:
df: Floating point tensor, the degrees of freedom of the
distribution(s). `df` must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
# Even though all stats of chi2 are defined for valid parameters, this is
# not true in the parent class "gamma." therefore, passing
# allow_nan_stats=True
# through to the parent class results in unnecessary asserts.
with ops.name_scope(name, values=[df]) as name:
with ops.control_dependencies([
check_ops.assert_positive(df),
] if validate_args else []):
self._df = array_ops.identity(df, name="df")
super(Chi2, self).__init__(
concentration=0.5 * self._df,
rate=constant_op.constant(0.5, dtype=self._df.dtype),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@staticmethod
def _param_shapes(sample_shape):
return {"df": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def df(self):
return self._df
class Chi2WithAbsDf(Chi2):
"""Chi2 with parameter transform `df = floor(abs(df))`."""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
validate_args=False,
allow_nan_stats=True,
name="Chi2WithAbsDf"):
parameters = dict(locals())
with ops.name_scope(name, values=[df]) as name:
super(Chi2WithAbsDf, self).__init__(
df=math_ops.floor(
math_ops.abs(df, name="abs_df"),
name="floor_abs_df"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/chi2.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Binomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
_binomial_sample_note = """
For each batch member of counts `value`, `P[value]` is the probability that
after sampling `self.total_count` draws from this Binomial distribution, the
number of successes is `value`. Since different sequences of draws can result in
the same counts, the probability includes a combinatorial coefficient.
Note: `value` must be a non-negative tensor with dtype `dtype` and whose shape
can be broadcast with `self.probs` and `self.total_count`. `value` is only legal
if it is less than or equal to `self.total_count` and its components are equal
to integer values.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _bdtr(k, n, p):
"""The binomial cumulative distribution function.
Args:
k: floating point `Tensor`.
n: floating point `Tensor`.
p: floating point `Tensor`.
Returns:
`sum_{j=0}^k p^j (1 - p)^(n - j)`.
"""
# Trick for getting safe backprop/gradients into n, k when
# betainc(a = 0, ..) = nan
# Write:
# where(unsafe, safe_output, betainc(where(unsafe, safe_input, input)))
ones = array_ops.ones_like(n - k)
k_eq_n = math_ops.equal(k, n)
safe_dn = array_ops.where(k_eq_n, ones, n - k)
dk = math_ops.betainc(a=safe_dn, b=k + 1, x=1 - p)
return array_ops.where(k_eq_n, ones, dk)
class Binomial(distribution.Distribution):
"""Binomial distribution.
This distribution is parameterized by `probs`, a (batch of) probabilities for
drawing a `1` and `total_count`, the number of trials per draw from the
Binomial.
#### Mathematical Details
The Binomial is a distribution over the number of `1`'s in `total_count`
independent trials, with each trial having the same probability of `1`, i.e.,
`probs`.
The probability mass function (pmf) is,
```none
pmf(k; n, p) = p**k (1 - p)**(n - k) / Z
Z = k! (n - k)! / n!
```
where:
* `total_count = n`,
* `probs = p`,
* `Z` is the normalizing constant, and,
* `n!` is the factorial of `n`.
#### Examples
Create a single distribution, corresponding to 5 coin flips.
```python
dist = Binomial(total_count=5., probs=.5)
```
Create a single distribution (using logits), corresponding to 5 coin flips.
```python
dist = Binomial(total_count=5., logits=0.)
```
Creates 3 distributions with the third distribution most likely to have
successes.
```python
p = [.2, .3, .8]
# n will be broadcast to [4., 4., 4.], to match p.
dist = Binomial(total_count=4., probs=p)
```
The distribution functions can be evaluated on counts.
```python
# counts same shape as p.
counts = [1., 2, 3]
dist.prob(counts) # Shape [3]
# p will be broadcast to [[.2, .3, .8], [.2, .3, .8]] to match counts.
counts = [[1., 2, 1], [2, 2, 4]]
dist.prob(counts) # Shape [2, 3]
# p will be broadcast to shape [5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7, 3]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
total_count,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="Binomial"):
"""Initialize a batch of Binomial distributions.
Args:
total_count: Non-negative floating point tensor with shape broadcastable
to `[N1,..., Nm]` with `m >= 0` and the same dtype as `probs` or
`logits`. Defines this as a batch of `N1 x ... x Nm` different Binomial
distributions. Its components should be equal to integer values.
logits: Floating point tensor representing the log-odds of a
positive event with shape broadcastable to `[N1,..., Nm]` `m >= 0`, and
the same dtype as `total_count`. Each entry represents logits for the
probability of success for independent Binomial distributions. Only one
of `logits` or `probs` should be passed in.
probs: Positive floating point tensor with shape broadcastable to
`[N1,..., Nm]` `m >= 0`, `probs in [0, 1]`. Each entry represents the
probability of success for independent Binomial distributions. Only one
of `logits` or `probs` should be passed in.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[total_count, logits, probs]) as name:
self._total_count = self._maybe_assert_valid_total_count(
ops.convert_to_tensor(total_count, name="total_count"),
validate_args)
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits,
probs=probs,
validate_args=validate_args,
name=name)
super(Binomial, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._total_count,
self._logits,
self._probs],
name=name)
@property
def total_count(self):
"""Number of trials."""
return self._total_count
@property
def logits(self):
"""Log-odds of drawing a `1`."""
return self._logits
@property
def probs(self):
"""Probability of drawing a `1`."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.total_count),
array_ops.shape(self.probs))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.total_count.get_shape(),
self.probs.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(_binomial_sample_note)
def _log_prob(self, counts):
return self._log_unnormalized_prob(counts) - self._log_normalization(counts)
@distribution_util.AppendDocstring(_binomial_sample_note)
def _prob(self, counts):
return math_ops.exp(self._log_prob(counts))
def _cdf(self, counts):
counts = self._maybe_assert_valid_sample(counts)
probs = self.probs
if not (counts.shape.is_fully_defined()
and self.probs.shape.is_fully_defined()
and counts.shape.is_compatible_with(self.probs.shape)):
# If both shapes are well defined and equal, we skip broadcasting.
probs += array_ops.zeros_like(counts)
counts += array_ops.zeros_like(self.probs)
return _bdtr(k=counts, n=self.total_count, p=probs)
def _log_unnormalized_prob(self, counts):
counts = self._maybe_assert_valid_sample(counts)
return (counts * math_ops.log(self.probs) +
(self.total_count - counts) * math_ops.log1p(-self.probs))
def _log_normalization(self, counts):
counts = self._maybe_assert_valid_sample(counts)
return (math_ops.lgamma(1. + self.total_count - counts)
+ math_ops.lgamma(1. + counts)
- math_ops.lgamma(1. + self.total_count))
def _mean(self):
return self.total_count * self.probs
def _variance(self):
return self._mean() * (1. - self.probs)
@distribution_util.AppendDocstring(
"""Note that when `(1 + total_count) * probs` is an integer, there are
actually two modes. Namely, `(1 + total_count) * probs` and
`(1 + total_count) * probs - 1` are both modes. Here we return only the
larger of the two modes.""")
def _mode(self):
return math_ops.floor((1. + self.total_count) * self.probs)
def _maybe_assert_valid_total_count(self, total_count, validate_args):
if not validate_args:
return total_count
return control_flow_ops.with_dependencies([
check_ops.assert_non_negative(
total_count,
message="total_count must be non-negative."),
distribution_util.assert_integer_form(
total_count,
message="total_count cannot contain fractional components."),
], total_count)
def _maybe_assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return counts
counts = distribution_util.embed_check_nonnegative_integer_form(counts)
return control_flow_ops.with_dependencies([
check_ops.assert_less_equal(
counts, self.total_count,
message="counts are not less than or equal to n."),
], counts)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/binomial.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MaskedAutoregressiveFlow bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import core as layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import template as template_ops
from tensorflow.python.ops import variable_scope as variable_scope_lib
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"MaskedAutoregressiveFlow",
"masked_autoregressive_default_template",
"masked_dense",
]
class MaskedAutoregressiveFlow(bijector.Bijector):
"""Affine MaskedAutoregressiveFlow bijector for vector-valued events.
The affine autoregressive flow [(Papamakarios et al., 2016)][3] provides a
relatively simple framework for user-specified (deep) architectures to learn
a distribution over vector-valued events. Regarding terminology,
"Autoregressive models decompose the joint density as a product of
conditionals, and model each conditional in turn. Normalizing flows
transform a base density (e.g. a standard Gaussian) into the target density
by an invertible transformation with tractable Jacobian."
[(Papamakarios et al., 2016)][3]
In other words, the "autoregressive property" is equivalent to the
decomposition, `p(x) = prod{ p(x[i] | x[0:i]) : i=0, ..., d }`. The provided
`shift_and_log_scale_fn`, `masked_autoregressive_default_template`, achieves
this property by zeroing out weights in its `masked_dense` layers.
In the `tfp` framework, a "normalizing flow" is implemented as a
`tfp.bijectors.Bijector`. The `forward` "autoregression"
is implemented using a `tf.while_loop` and a deep neural network (DNN) with
masked weights such that the autoregressive property is automatically met in
the `inverse`.
A `TransformedDistribution` using `MaskedAutoregressiveFlow(...)` uses the
(expensive) forward-mode calculation to draw samples and the (cheap)
reverse-mode calculation to compute log-probabilities. Conversely, a
`TransformedDistribution` using `Invert(MaskedAutoregressiveFlow(...))` uses
the (expensive) forward-mode calculation to compute log-probabilities and the
(cheap) reverse-mode calculation to compute samples. See "Example Use"
[below] for more details.
Given a `shift_and_log_scale_fn`, the forward and inverse transformations are
(a sequence of) affine transformations. A "valid" `shift_and_log_scale_fn`
must compute each `shift` (aka `loc` or "mu" in [Germain et al. (2015)][1])
and `log(scale)` (aka "alpha" in [Germain et al. (2015)][1]) such that each
are broadcastable with the arguments to `forward` and `inverse`, i.e., such
that the calculations in `forward`, `inverse` [below] are possible.
For convenience, `masked_autoregressive_default_template` is offered as a
possible `shift_and_log_scale_fn` function. It implements the MADE
architecture [(Germain et al., 2015)][1]. MADE is a feed-forward network that
computes a `shift` and `log(scale)` using `masked_dense` layers in a deep
neural network. Weights are masked to ensure the autoregressive property. It
is possible that this architecture is suboptimal for your task. To build
alternative networks, either change the arguments to
`masked_autoregressive_default_template`, use the `masked_dense` function to
roll-out your own, or use some other architecture, e.g., using `tf.layers`.
Warning: no attempt is made to validate that the `shift_and_log_scale_fn`
enforces the "autoregressive property".
Assuming `shift_and_log_scale_fn` has valid shape and autoregressive
semantics, the forward transformation is
```python
def forward(x):
y = zeros_like(x)
event_size = x.shape[-1]
for _ in range(event_size):
shift, log_scale = shift_and_log_scale_fn(y)
y = x * math_ops.exp(log_scale) + shift
return y
```
and the inverse transformation is
```python
def inverse(y):
shift, log_scale = shift_and_log_scale_fn(y)
return (y - shift) / math_ops.exp(log_scale)
```
Notice that the `inverse` does not need a for-loop. This is because in the
forward pass each calculation of `shift` and `log_scale` is based on the `y`
calculated so far (not `x`). In the `inverse`, the `y` is fully known, thus is
equivalent to the scaling used in `forward` after `event_size` passes, i.e.,
the "last" `y` used to compute `shift`, `log_scale`. (Roughly speaking, this
also proves the transform is bijective.)
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
dims = 5
# A common choice for a normalizing flow is to use a Gaussian for the base
# distribution. (However, any continuous distribution would work.) E.g.,
maf = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0., scale=1.),
bijector=tfb.MaskedAutoregressiveFlow(
shift_and_log_scale_fn=tfb.masked_autoregressive_default_template(
hidden_layers=[512, 512])),
event_shape=[dims])
x = maf.sample() # Expensive; uses `tf.while_loop`, no Bijector caching.
maf.log_prob(x) # Almost free; uses Bijector caching.
maf.log_prob(0.) # Cheap; no `tf.while_loop` despite no Bijector caching.
# [Papamakarios et al. (2016)][3] also describe an Inverse Autoregressive
# Flow [(Kingma et al., 2016)][2]:
iaf = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0., scale=1.),
bijector=tfb.Invert(tfb.MaskedAutoregressiveFlow(
shift_and_log_scale_fn=tfb.masked_autoregressive_default_template(
hidden_layers=[512, 512]))),
event_shape=[dims])
x = iaf.sample() # Cheap; no `tf.while_loop` despite no Bijector caching.
iaf.log_prob(x) # Almost free; uses Bijector caching.
iaf.log_prob(0.) # Expensive; uses `tf.while_loop`, no Bijector caching.
# In many (if not most) cases the default `shift_and_log_scale_fn` will be a
# poor choice. Here's an example of using a "shift only" version and with a
# different number/depth of hidden layers.
shift_only = True
maf_no_scale_hidden2 = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0., scale=1.),
bijector=tfb.MaskedAutoregressiveFlow(
tfb.masked_autoregressive_default_template(
hidden_layers=[32],
shift_only=shift_only),
is_constant_jacobian=shift_only),
event_shape=[dims])
```
#### References
[1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE:
Masked Autoencoder for Distribution Estimation. In _International
Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
[2]: Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya
Sutskever, and Max Welling. Improving Variational Inference with Inverse
Autoregressive Flow. In _Neural Information Processing Systems_, 2016.
https://arxiv.org/abs/1606.04934
[3]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
shift_and_log_scale_fn,
is_constant_jacobian=False,
validate_args=False,
unroll_loop=False,
name=None):
"""Creates the MaskedAutoregressiveFlow bijector.
Args:
shift_and_log_scale_fn: Python `callable` which computes `shift` and
`log_scale` from both the forward domain (`x`) and the inverse domain
(`y`). Calculation must respect the "autoregressive property" (see class
docstring). Suggested default
`masked_autoregressive_default_template(hidden_layers=...)`. Typically
the function contains `tf.Variables` and is wrapped using
`tf.compat.v1.make_template`. Returning `None` for either (both)
`shift`, `log_scale` is equivalent to (but more efficient than)
returning zero.
is_constant_jacobian: Python `bool`. Default: `False`. When `True` the
implementation assumes `log_scale` does not depend on the forward domain
(`x`) or inverse domain (`y`) values. (No validation is made;
`is_constant_jacobian=False` is always safe but possibly computationally
inefficient.)
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
unroll_loop: Python `bool` indicating whether the `tf.while_loop` in
`_forward` should be replaced with a static for loop. Requires that the
final dimension of `x` be known at graph construction time. Defaults to
`False`.
name: Python `str`, name given to ops managed by this object.
"""
name = name or "masked_autoregressive_flow"
self._shift_and_log_scale_fn = shift_and_log_scale_fn
self._unroll_loop = unroll_loop
super(MaskedAutoregressiveFlow, self).__init__(
forward_min_event_ndims=1,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
def _forward(self, x):
if self._unroll_loop:
event_size = tensor_shape.dimension_value(
x.shape.with_rank_at_least(1)[-1])
if event_size is None:
raise ValueError(
"The final dimension of `x` must be known at graph construction "
"time if `unroll_loop=True`. `x.shape: %r`" % x.shape)
y = array_ops.zeros_like(x, name="y0")
for _ in range(event_size):
shift, log_scale = self._shift_and_log_scale_fn(y)
# next_y = scale * x + shift
next_y = x
if log_scale is not None:
next_y *= math_ops.exp(log_scale)
if shift is not None:
next_y += shift
y = next_y
return y
event_size = array_ops.shape(x)[-1]
# If the event size is available at graph construction time, we can inform
# the graph compiler of the maximum number of steps. If not,
# static_event_size will be None, and the maximum_iterations argument will
# have no effect.
static_event_size = tensor_shape.dimension_value(
x.shape.with_rank_at_least(1)[-1])
y0 = array_ops.zeros_like(x, name="y0")
# call the template once to ensure creation
_ = self._shift_and_log_scale_fn(y0)
def _loop_body(index, y0):
"""While-loop body for autoregression calculation."""
# Set caching device to avoid re-getting the tf.Variable for every while
# loop iteration.
with variable_scope_lib.variable_scope(
variable_scope_lib.get_variable_scope()) as vs:
if vs.caching_device is None:
vs.set_caching_device(lambda op: op.device)
shift, log_scale = self._shift_and_log_scale_fn(y0)
y = x
if log_scale is not None:
y *= math_ops.exp(log_scale)
if shift is not None:
y += shift
return index + 1, y
_, y = control_flow_ops.while_loop(
cond=lambda index, _: index < event_size,
body=_loop_body,
loop_vars=(0, y0),
maximum_iterations=static_event_size)
return y
def _inverse(self, y):
shift, log_scale = self._shift_and_log_scale_fn(y)
x = y
if shift is not None:
x -= shift
if log_scale is not None:
x *= math_ops.exp(-log_scale)
return x
def _inverse_log_det_jacobian(self, y):
_, log_scale = self._shift_and_log_scale_fn(y)
if log_scale is None:
return constant_op.constant(0., dtype=y.dtype, name="ildj")
return -math_ops.reduce_sum(log_scale, axis=-1)
MASK_INCLUSIVE = "inclusive"
MASK_EXCLUSIVE = "exclusive"
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _gen_slices(num_blocks, n_in, n_out, mask_type=MASK_EXCLUSIVE):
"""Generate the slices for building an autoregressive mask."""
# TODO(b/67594795): Better support of dynamic shape.
slices = []
col = 0
d_in = n_in // num_blocks
d_out = n_out // num_blocks
row = d_out if mask_type == MASK_EXCLUSIVE else 0
for _ in range(num_blocks):
row_slice = slice(row, None)
col_slice = slice(col, col + d_in)
slices.append([row_slice, col_slice])
col += d_in
row += d_out
return slices
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _gen_mask(num_blocks,
n_in,
n_out,
mask_type=MASK_EXCLUSIVE,
dtype=dtypes.float32):
"""Generate the mask for building an autoregressive dense layer."""
# TODO(b/67594795): Better support of dynamic shape.
mask = np.zeros([n_out, n_in], dtype=dtype.as_numpy_dtype())
slices = _gen_slices(num_blocks, n_in, n_out, mask_type=mask_type)
for [row_slice, col_slice] in slices:
mask[row_slice, col_slice] = 1
return mask
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def masked_dense(inputs,
units,
num_blocks=None,
exclusive=False,
kernel_initializer=None,
reuse=None,
name=None,
*args,
**kwargs):
"""A autoregressively masked dense layer.
Analogous to `tf.compat.v1.layers.dense`.
See [Germain et al. (2015)][1] for detailed explanation.
Arguments:
inputs: Tensor input.
units: Python `int` scalar representing the dimensionality of the output
space.
num_blocks: Python `int` scalar representing the number of blocks for the
MADE masks.
exclusive: Python `bool` scalar representing whether to zero the diagonal of
the mask, used for the first layer of a MADE.
kernel_initializer: Initializer function for the weight matrix. If `None`
(default), weights are initialized using the
`tf.glorot_random_initializer`.
reuse: Python `bool` scalar representing whether to reuse the weights of a
previous layer by the same name.
name: Python `str` used to describe ops managed by this function.
*args: `tf.compat.v1.layers.dense` arguments.
**kwargs: `tf.compat.v1.layers.dense` keyword arguments.
Returns:
Output tensor.
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution.
#### References
[1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE:
Masked Autoencoder for Distribution Estimation. In _International
Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
"""
# TODO(b/67594795): Better support of dynamic shape.
input_depth = tensor_shape.dimension_value(
inputs.shape.with_rank_at_least(1)[-1])
if input_depth is None:
raise NotImplementedError(
"Rightmost dimension must be known prior to graph execution.")
mask = _gen_mask(num_blocks, input_depth, units,
MASK_EXCLUSIVE if exclusive else MASK_INCLUSIVE).T
if kernel_initializer is None:
kernel_initializer = init_ops.glorot_normal_initializer()
def masked_initializer(shape, dtype=None, partition_info=None):
return mask * kernel_initializer(shape, dtype, partition_info)
with ops.name_scope(name, "masked_dense", [inputs, units, num_blocks]):
layer = layers.Dense(
units,
kernel_initializer=masked_initializer,
kernel_constraint=lambda x: mask * x,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse,
*args,
**kwargs)
return layer.apply(inputs)
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def masked_autoregressive_default_template(
hidden_layers,
shift_only=False,
activation=nn_ops.relu,
log_scale_min_clip=-5.,
log_scale_max_clip=3.,
log_scale_clip_gradient=False,
name=None,
*args,
**kwargs):
"""Build the Masked Autoregressive Density Estimator (Germain et al., 2015).
This will be wrapped in a make_template to ensure the variables are only
created once. It takes the input and returns the `loc` ("mu" in [Germain et
al. (2015)][1]) and `log_scale` ("alpha" in [Germain et al. (2015)][1]) from
the MADE network.
Warning: This function uses `masked_dense` to create randomly initialized
`tf.Variables`. It is presumed that these will be fit, just as you would any
other neural architecture which uses `tf.compat.v1.layers.dense`.
#### About Hidden Layers
Each element of `hidden_layers` should be greater than the `input_depth`
(i.e., `input_depth = tf.shape(input)[-1]` where `input` is the input to the
neural network). This is necessary to ensure the autoregressivity property.
#### About Clipping
This function also optionally clips the `log_scale` (but possibly not its
gradient). This is useful because if `log_scale` is too small/large it might
underflow/overflow making it impossible for the `MaskedAutoregressiveFlow`
bijector to implement a bijection. Additionally, the `log_scale_clip_gradient`
`bool` indicates whether the gradient should also be clipped. The default does
not clip the gradient; this is useful because it still provides gradient
information (for fitting) yet solves the numerical stability problem. I.e.,
`log_scale_clip_gradient = False` means
`grad[exp(clip(x))] = grad[x] exp(clip(x))` rather than the usual
`grad[clip(x)] exp(clip(x))`.
Args:
hidden_layers: Python `list`-like of non-negative integer, scalars
indicating the number of units in each hidden layer. Default: `[512, 512].
shift_only: Python `bool` indicating if only the `shift` term shall be
computed. Default: `False`.
activation: Activation function (callable). Explicitly setting to `None`
implies a linear activation.
log_scale_min_clip: `float`-like scalar `Tensor`, or a `Tensor` with the
same shape as `log_scale`. The minimum value to clip by. Default: -5.
log_scale_max_clip: `float`-like scalar `Tensor`, or a `Tensor` with the
same shape as `log_scale`. The maximum value to clip by. Default: 3.
log_scale_clip_gradient: Python `bool` indicating that the gradient of
`tf.clip_by_value` should be preserved. Default: `False`.
name: A name for ops managed by this function. Default:
"masked_autoregressive_default_template".
*args: `tf.compat.v1.layers.dense` arguments.
**kwargs: `tf.compat.v1.layers.dense` keyword arguments.
Returns:
shift: `Float`-like `Tensor` of shift terms (the "mu" in
[Germain et al. (2015)][1]).
log_scale: `Float`-like `Tensor` of log(scale) terms (the "alpha" in
[Germain et al. (2015)][1]).
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution.
#### References
[1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE:
Masked Autoencoder for Distribution Estimation. In _International
Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
"""
name = name or "masked_autoregressive_default_template"
with ops.name_scope(name, values=[log_scale_min_clip, log_scale_max_clip]):
def _fn(x):
"""MADE parameterized via `masked_autoregressive_default_template`."""
# TODO(b/67594795): Better support of dynamic shape.
input_depth = tensor_shape.dimension_value(
x.shape.with_rank_at_least(1)[-1])
if input_depth is None:
raise NotImplementedError(
"Rightmost dimension must be known prior to graph execution.")
input_shape = (
np.int32(x.shape.as_list())
if x.shape.is_fully_defined() else array_ops.shape(x))
for i, units in enumerate(hidden_layers):
x = masked_dense(
inputs=x,
units=units,
num_blocks=input_depth,
exclusive=True if i == 0 else False,
activation=activation,
*args,
**kwargs)
x = masked_dense(
inputs=x,
units=(1 if shift_only else 2) * input_depth,
num_blocks=input_depth,
activation=None,
*args,
**kwargs)
if shift_only:
x = array_ops.reshape(x, shape=input_shape)
return x, None
x = array_ops.reshape(
x, shape=array_ops.concat([input_shape, [2]], axis=0))
shift, log_scale = array_ops.unstack(x, num=2, axis=-1)
which_clip = (
math_ops.clip_by_value
if log_scale_clip_gradient else _clip_by_value_preserve_grad)
log_scale = which_clip(log_scale, log_scale_min_clip, log_scale_max_clip)
return shift, log_scale
return template_ops.make_template(name, _fn)
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _clip_by_value_preserve_grad(x, clip_value_min, clip_value_max, name=None):
"""Clips input while leaving gradient unaltered."""
with ops.name_scope(name, "clip_by_value_preserve_grad",
[x, clip_value_min, clip_value_max]):
clip_x = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
return x + array_ops.stop_gradient(clip_x - x)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batch Norm bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.layers import normalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"BatchNormalization",
]
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _undo_batch_normalization(x,
mean,
variance,
offset,
scale,
variance_epsilon,
name=None):
r"""Inverse of tf.nn.batch_normalization.
Args:
x: Input `Tensor` of arbitrary dimensionality.
mean: A mean `Tensor`.
variance: A variance `Tensor`.
offset: An offset `Tensor`, often denoted `beta` in equations, or None. If
present, will be added to the normalized tensor.
scale: A scale `Tensor`, often denoted `gamma` in equations, or `None`. If
present, the scale is applied to the normalized tensor.
variance_epsilon: A small `float` added to the minibatch `variance` to
prevent dividing by zero.
name: A name for this operation (optional).
Returns:
batch_unnormalized: The de-normalized, de-scaled, de-offset `Tensor`.
"""
with ops.name_scope(name, "undo_batchnorm",
[x, mean, variance, scale, offset]):
# inv = math_ops.rsqrt(variance + variance_epsilon)
# if scale is not None:
# inv *= scale
# return x * inv + (
# offset - mean * inv if offset is not None else -mean * inv)
rescale = math_ops.sqrt(variance + variance_epsilon)
if scale is not None:
rescale /= scale
batch_unnormalized = x * rescale + (
mean - offset * rescale if offset is not None else mean)
return batch_unnormalized
class BatchNormalization(bijector.Bijector):
"""Compute `Y = g(X) s.t.
X = g^-1(Y) = (Y - mean(Y)) / std(Y)`.
Applies Batch Normalization [(Ioffe and Szegedy, 2015)][1] to samples from a
data distribution. This can be used to stabilize training of normalizing
flows ([Papamakarios et al., 2016][3]; [Dinh et al., 2017][2])
When training Deep Neural Networks (DNNs), it is common practice to
normalize or whiten features by shifting them to have zero mean and
scaling them to have unit variance.
The `inverse()` method of the `BatchNormalization` bijector, which is used in
the log-likelihood computation of data samples, implements the normalization
procedure (shift-and-scale) using the mean and standard deviation of the
current minibatch.
Conversely, the `forward()` method of the bijector de-normalizes samples (e.g.
`X*std(Y) + mean(Y)` with the running-average mean and standard deviation
computed at training-time. De-normalization is useful for sampling.
```python
dist = tfd.TransformedDistribution(
distribution=tfd.Normal()),
bijector=tfb.BatchNorm())
y = tfd.MultivariateNormalDiag(loc=1., scale=2.).sample(100) # ~ N(1, 2)
x = dist.bijector.inverse(y) # ~ N(0, 1)
y = dist.sample() # ~ N(1, 2)
```
During training time, `BatchNorm.inverse` and `BatchNorm.forward` are not
guaranteed to be inverses of each other because `inverse(y)` uses statistics
of the current minibatch, while `forward(x)` uses running-average statistics
accumulated from training. In other words,
`BatchNorm.inverse(BatchNorm.forward(...))` and
`BatchNorm.forward(BatchNorm.inverse(...))` will be identical when
`training=False` but may be different when `training=True`.
#### References
[1]: Sergey Ioffe and Christian Szegedy. Batch Normalization: Accelerating
Deep Network Training by Reducing Internal Covariate Shift. In
_International Conference on Machine Learning_, 2015.
https://arxiv.org/abs/1502.03167
[2]: Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density Estimation
using Real NVP. In _International Conference on Learning
Representations_, 2017. https://arxiv.org/abs/1605.08803
[3]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
batchnorm_layer=None,
training=True,
validate_args=False,
name="batch_normalization"):
"""Instantiates the `BatchNorm` bijector.
Args:
batchnorm_layer: `tf.compat.v1.layers.BatchNormalization` layer object. If
`None`, defaults to
`tf.compat.v1.layers.BatchNormalization(gamma_constraint=nn_ops.relu(x)
+ 1e-6)`. This ensures positivity of the scale variable.
training: If True, updates running-average statistics during call to
`inverse()`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: If bn_layer is not an instance of
`tf.compat.v1.layers.BatchNormalization`, or if it is specified with
`renorm=True`
or a virtual batch size.
"""
# Scale must be positive.
g_constraint = lambda x: nn.relu(x) + 1e-6
self.batchnorm = batchnorm_layer or normalization.BatchNormalization(
gamma_constraint=g_constraint)
self._validate_bn_layer(self.batchnorm)
self._training = training
if isinstance(self.batchnorm.axis, int):
forward_min_event_ndims = 1
else:
forward_min_event_ndims = len(self.batchnorm.axis)
super(BatchNormalization, self).__init__(
forward_min_event_ndims=forward_min_event_ndims,
validate_args=validate_args,
name=name)
def _validate_bn_layer(self, layer):
"""Check for valid BatchNormalization layer.
Args:
layer: Instance of `tf.compat.v1.layers.BatchNormalization`.
Raises:
ValueError: If batchnorm_layer argument is not an instance of
`tf.compat.v1.layers.BatchNormalization`, or if
`batchnorm_layer.renorm=True` or
if `batchnorm_layer.virtual_batch_size` is specified.
"""
if not isinstance(layer, normalization.BatchNormalization):
raise ValueError(
"batchnorm_layer must be an instance of BatchNormalization layer.")
if layer.renorm:
raise ValueError("BatchNorm Bijector does not support renormalization.")
if layer.virtual_batch_size:
raise ValueError(
"BatchNorm Bijector does not support virtual batch sizes.")
def _get_broadcast_fn(self, x):
# Compute shape to broadcast scale/shift parameters to.
if not x.shape.is_fully_defined():
raise ValueError("Input must have shape known at graph construction.")
input_shape = np.int32(x.shape.as_list())
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.batchnorm.axis]
# Broadcasting only necessary for single-axis batch norm where the axis is
# not the last dimension
broadcast_shape = [1] * ndims
broadcast_shape[self.batchnorm.axis[0]] = (
input_shape[self.batchnorm.axis[0]])
def _broadcast(v):
if (v is not None and len(v.get_shape()) != ndims and
reduction_axes != list(range(ndims - 1))):
return array_ops.reshape(v, broadcast_shape)
return v
return _broadcast
def _normalize(self, y):
return self.batchnorm.apply(y, training=self._training)
def _de_normalize(self, x):
# Uses the saved statistics.
if not self.batchnorm.built:
input_shape = x.get_shape()
self.batchnorm.build(input_shape)
broadcast_fn = self._get_broadcast_fn(x)
mean = broadcast_fn(self.batchnorm.moving_mean)
variance = broadcast_fn(self.batchnorm.moving_variance)
beta = broadcast_fn(self.batchnorm.beta) if self.batchnorm.center else None
gamma = broadcast_fn(self.batchnorm.gamma) if self.batchnorm.scale else None
return _undo_batch_normalization(x, mean, variance, beta, gamma,
self.batchnorm.epsilon)
def _forward(self, x):
return self._de_normalize(x)
def _inverse(self, y):
return self._normalize(y)
def _forward_log_det_jacobian(self, x):
# Uses saved statistics to compute volume distortion.
return -self._inverse_log_det_jacobian(x, use_saved_statistics=True)
def _inverse_log_det_jacobian(self, y, use_saved_statistics=False):
if not y.shape.is_fully_defined():
raise ValueError("Input must have shape known at graph construction.")
input_shape = np.int32(y.shape.as_list())
if not self.batchnorm.built:
# Create variables.
self.batchnorm.build(input_shape)
event_dims = self.batchnorm.axis
reduction_axes = [i for i in range(len(input_shape)) if i not in event_dims]
if use_saved_statistics or not self._training:
log_variance = math_ops.log(self.batchnorm.moving_variance +
self.batchnorm.epsilon)
else:
# At training-time, ildj is computed from the mean and log-variance across
# the current minibatch.
_, v = nn.moments(y, axes=reduction_axes, keepdims=True)
log_variance = math_ops.log(v + self.batchnorm.epsilon)
# `gamma` and `log Var(y)` reductions over event_dims.
# Log(total change in area from gamma term).
log_total_gamma = math_ops.reduce_sum(math_ops.log(self.batchnorm.gamma))
# Log(total change in area from log-variance term).
log_total_variance = math_ops.reduce_sum(log_variance)
# The ildj is scalar, as it does not depend on the values of x and are
# constant across minibatch elements.
return log_total_gamma - 0.5 * log_total_variance
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/batch_normalization.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CholeskyOuterProduct bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
__all__ = [
"CholeskyOuterProduct",
]
class CholeskyOuterProduct(bijector.Bijector):
"""Compute `g(X) = X @ X.T`; X is lower-triangular, positive-diagonal matrix.
Note: the upper-triangular part of X is ignored (whether or not its zero).
The surjectivity of g as a map from the set of n x n positive-diagonal
lower-triangular matrices to the set of SPD matrices follows immediately from
executing the Cholesky factorization algorithm on an SPD matrix A to produce a
positive-diagonal lower-triangular matrix L such that `A = L @ L.T`.
To prove the injectivity of g, suppose that L_1 and L_2 are lower-triangular
with positive diagonals and satisfy `A = L_1 @ L_1.T = L_2 @ L_2.T`. Then
`inv(L_1) @ A @ inv(L_1).T = [inv(L_1) @ L_2] @ [inv(L_1) @ L_2].T = I`.
Setting `L_3 := inv(L_1) @ L_2`, that L_3 is a positive-diagonal
lower-triangular matrix follows from `inv(L_1)` being positive-diagonal
lower-triangular (which follows from the diagonal of a triangular matrix being
its spectrum), and that the product of two positive-diagonal lower-triangular
matrices is another positive-diagonal lower-triangular matrix.
A simple inductive argument (proceeding one column of L_3 at a time) shows
that, if `I = L_3 @ L_3.T`, with L_3 being lower-triangular with positive-
diagonal, then `L_3 = I`. Thus, `L_1 = L_2`, proving injectivity of g.
#### Examples
```python
bijector.CholeskyOuterProduct().forward(x=[[1., 0], [2, 1]])
# Result: [[1., 2], [2, 5]], i.e., x @ x.T
bijector.CholeskyOuterProduct().inverse(y=[[1., 2], [2, 5]])
# Result: [[1., 0], [2, 1]], i.e., cholesky(y).
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, validate_args=False, name="cholesky_outer_product"):
"""Instantiates the `CholeskyOuterProduct` bijector.
Args:
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._graph_parents = []
self._name = name
super(CholeskyOuterProduct, self).__init__(
forward_min_event_ndims=2,
validate_args=validate_args,
name=name)
def _forward(self, x):
if self.validate_args:
is_matrix = check_ops.assert_rank_at_least(x, 2)
shape = array_ops.shape(x)
is_square = check_ops.assert_equal(shape[-2], shape[-1])
x = control_flow_ops.with_dependencies([is_matrix, is_square], x)
# For safety, explicitly zero-out the upper triangular part.
x = array_ops.matrix_band_part(x, -1, 0)
return math_ops.matmul(x, x, adjoint_b=True)
def _inverse(self, y):
return linalg_ops.cholesky(y)
def _forward_log_det_jacobian(self, x):
# Let Y be a symmetric, positive definite matrix and write:
# Y = X X.T
# where X is lower-triangular.
#
# Observe that,
# dY[i,j]/dX[a,b]
# = d/dX[a,b] { X[i,:] X[j,:] }
# = sum_{d=1}^p { I[i=a] I[d=b] X[j,d] + I[j=a] I[d=b] X[i,d] }
#
# To compute the Jacobian dX/dY we must represent X,Y as vectors. Since Y is
# symmetric and X is lower-triangular, we need vectors of dimension:
# d = p (p + 1) / 2
# where X, Y are p x p matrices, p > 0. We use a row-major mapping, i.e.,
# k = { i (i + 1) / 2 + j i>=j
# { undef i<j
# and assume zero-based indexes. When k is undef, the element is dropped.
# Example:
# j k
# 0 1 2 3 /
# 0 [ 0 . . . ]
# i 1 [ 1 2 . . ]
# 2 [ 3 4 5 . ]
# 3 [ 6 7 8 9 ]
# Write vec[.] to indicate transforming a matrix to vector via k(i,j). (With
# slight abuse: k(i,j)=undef means the element is dropped.)
#
# We now show d vec[Y] / d vec[X] is lower triangular. Assuming both are
# defined, observe that k(i,j) < k(a,b) iff (1) i<a or (2) i=a and j<b.
# In both cases dvec[Y]/dvec[X]@[k(i,j),k(a,b)] = 0 since:
# (1) j<=i<a thus i,j!=a.
# (2) i=a>j thus i,j!=a.
#
# Since the Jacobian is lower-triangular, we need only compute the product
# of diagonal elements:
# d vec[Y] / d vec[X] @[k(i,j), k(i,j)]
# = X[j,j] + I[i=j] X[i,j]
# = 2 X[j,j].
# Since there is a 2 X[j,j] term for every lower-triangular element of X we
# conclude:
# |Jac(d vec[Y]/d vec[X])| = 2^p prod_{j=0}^{p-1} X[j,j]^{p-j}.
diag = array_ops.matrix_diag_part(x)
# We now ensure diag is columnar. Eg, if `diag = [1, 2, 3]` then the output
# is `[[1], [2], [3]]` and if `diag = [[1, 2, 3], [4, 5, 6]]` then the
# output is unchanged.
diag = self._make_columnar(diag)
if self.validate_args:
is_matrix = check_ops.assert_rank_at_least(
x, 2, message="Input must be a (batch of) matrix.")
shape = array_ops.shape(x)
is_square = check_ops.assert_equal(
shape[-2], shape[-1],
message="Input must be a (batch of) square matrix.")
# Assuming lower-triangular means we only need check diag>0.
is_positive_definite = check_ops.assert_positive(
diag, message="Input must be positive definite.")
x = control_flow_ops.with_dependencies(
[is_matrix, is_square, is_positive_definite], x)
# Create a vector equal to: [p, p-1, ..., 2, 1].
if x.get_shape().ndims is None or x.get_shape().dims[-1].value is None:
p_int = array_ops.shape(x)[-1]
p_float = math_ops.cast(p_int, dtype=x.dtype)
else:
p_int = x.get_shape().dims[-1].value
p_float = np.array(p_int, dtype=x.dtype.as_numpy_dtype)
exponents = math_ops.linspace(p_float, 1., p_int)
sum_weighted_log_diag = array_ops.squeeze(
math_ops.matmul(math_ops.log(diag),
exponents[..., array_ops.newaxis]),
axis=-1)
fldj = p_float * np.log(2.) + sum_weighted_log_diag
# We finally need to undo adding an extra column in non-scalar cases
# where there is a single matrix as input.
if x.get_shape().ndims is not None:
if x.get_shape().ndims == 2:
fldj = array_ops.squeeze(fldj, axis=-1)
return fldj
shape = array_ops.shape(fldj)
maybe_squeeze_shape = array_ops.concat([
shape[:-1],
distribution_util.pick_vector(
math_ops.equal(array_ops.rank(x), 2),
np.array([], dtype=np.int32), shape[-1:])], 0)
return array_ops.reshape(fldj, maybe_squeeze_shape)
def _make_columnar(self, x):
"""Ensures non-scalar input has at least one column.
Example:
If `x = [1, 2, 3]` then the output is `[[1], [2], [3]]`.
If `x = [[1, 2, 3], [4, 5, 6]]` then the output is unchanged.
If `x = 1` then the output is unchanged.
Args:
x: `Tensor`.
Returns:
columnar_x: `Tensor` with at least two dimensions.
"""
if x.get_shape().ndims is not None:
if x.get_shape().ndims == 1:
x = x[array_ops.newaxis, :]
return x
shape = array_ops.shape(x)
maybe_expanded_shape = array_ops.concat([
shape[:-1],
distribution_util.pick_vector(
math_ops.equal(array_ops.rank(x), 1),
[1], np.array([], dtype=np.int32)),
shape[-1:],
], 0)
return array_ops.reshape(x, maybe_expanded_shape)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/cholesky_outer_product.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Permutation bijectors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Permute",
]
class Permute(bijector.Bijector):
"""Permutes the rightmost dimension of a `Tensor`.
```python
import tensorflow_probability as tfp
tfb = tfp.bijectors
reverse = tfb.Permute(permutation=[2, 1, 0])
reverse.forward([-1., 0., 1.])
# ==> [1., 0., -1]
reverse.inverse([1., 0., -1])
# ==> [-1., 0., 1.]
reverse.forward_log_det_jacobian(any_value)
# ==> 0.
reverse.inverse_log_det_jacobian(any_value)
# ==> 0.
```
Warning: `tf.estimator` may repeatedly build the graph thus
`Permute(np.random.permutation(event_size)).astype("int32"))` is not a
reliable parameterization (nor would it be even if using `tf.constant`). A
safe alternative is to use `tf.compat.v1.get_variable` to achieve "init once"
behavior,
i.e.,
```python
def init_once(x, name):
return tf.compat.v1.get_variable(name, initializer=x, trainable=False)
Permute(permutation=init_once(
np.random.permutation(event_size).astype("int32"),
name="permutation"))
```
"""
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, permutation, validate_args=False, name=None):
"""Creates the `Permute` bijector.
Args:
permutation: An `int`-like vector-shaped `Tensor` representing the
permutation to apply to the rightmost dimension of the transformed
`Tensor`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object.
Raises:
TypeError: if `not permutation.dtype.is_integer`.
ValueError: if `permutation` does not contain exactly one of each of
`{0, 1, ..., d}`.
"""
with ops.name_scope(name, "permute", values=[permutation]):
permutation = ops.convert_to_tensor(permutation, name="permutation")
if not permutation.dtype.is_integer:
raise TypeError("permutation.dtype ({}) should be `int`-like.".format(
permutation.dtype.name))
p = tensor_util.constant_value(permutation)
if p is not None:
if set(p) != set(np.arange(p.size)):
raise ValueError("Permutation over `d` must contain exactly one of "
"each of `{0, 1, ..., d}`.")
elif validate_args:
p, _ = nn_ops.top_k(
-permutation, k=array_ops.shape(permutation)[-1], sorted=True)
permutation = control_flow_ops.with_dependencies([
check_ops.assert_equal(
-p,
math_ops.range(array_ops.size(p)),
message=("Permutation over `d` must contain exactly one of "
"each of `{0, 1, ..., d}`.")),
], permutation)
self._permutation = permutation
super(Permute, self).__init__(
forward_min_event_ndims=1,
is_constant_jacobian=True,
validate_args=validate_args,
name=name or "permute")
@property
def permutation(self):
return self._permutation
def _forward(self, x):
return array_ops.gather(x, self.permutation, axis=-1)
def _inverse(self, y):
return array_ops.gather(
y, array_ops.invert_permutation(self.permutation), axis=-1)
def _inverse_log_det_jacobian(self, y):
# is_constant_jacobian = True for this bijector, hence the
# `log_det_jacobian` need only be specified for a single input, as this will
# be tiled to match `event_ndims`.
return constant_op.constant(0., dtype=y.dtype.base_dtype)
def _forward_log_det_jacobian(self, x):
return constant_op.constant(0., dtype=x.dtype.base_dtype)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/permute.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SinhArcsinh bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"SinhArcsinh",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _sqrtx2p1(x):
"""Implementation of `sqrt(1 + x**2)` which is stable despite large `x`."""
return array_ops.where(
math_ops.abs(x) * np.sqrt(np.finfo(x.dtype.as_numpy_dtype).eps) <= 1.,
math_ops.sqrt(x**2. + 1.),
# For large x, calculating x**2 can overflow. This can be alleviated by
# considering:
# sqrt(1 + x**2)
# = exp(0.5 log(1 + x**2))
# = exp(0.5 log(x**2 * (1 + x**-2)))
# = exp(log(x) + 0.5 * log(1 + x**-2))
# = |x| * exp(0.5 log(1 + x**-2))
# = |x| * sqrt(1 + x**-2)
# We omit the last term in this approximation.
# When |x| > 1 / sqrt(machineepsilon), the second term will be 1,
# due to sqrt(1 + x**-2) = 1. This is also true with the gradient term,
# and higher order gradients, since the first order derivative of
# sqrt(1 + x**-2) is -2 * x**-3 / (1 + x**-2) = -2 / (x**3 + x),
# and all nth-order derivatives will be O(x**-(n + 2)). This makes any
# gradient terms that contain any derivatives of sqrt(1 + x**-2) vanish.
math_ops.abs(x))
class SinhArcsinh(bijector.Bijector):
"""Compute `Y = g(X) = Sinh( (Arcsinh(X) + skewness) * tailweight )`.
For `skewness in (-inf, inf)` and `tailweight in (0, inf)`, this
transformation is a
diffeomorphism of the real line `(-inf, inf)`. The inverse transform is
`X = g^{-1}(Y) = Sinh( ArcSinh(Y) / tailweight - skewness )`.
The `SinhArcsinh` transformation of the Normal is described in
[Sinh-arcsinh distributions](https://www.jstor.org/stable/27798865)
This Bijector allows a similar transformation of any distribution supported on
`(-inf, inf)`.
#### Meaning of the parameters
* If `skewness = 0` and `tailweight = 1`, this transform is the identity.
* Positive (negative) `skewness` leads to positive (negative) skew.
* positive skew means, for unimodal `X` centered at zero, the mode of `Y` is
"tilted" to the right.
* positive skew means positive values of `Y` become more likely, and
negative values become less likely.
* Larger (smaller) `tailweight` leads to fatter (thinner) tails.
* Fatter tails mean larger values of `|Y|` become more likely.
* If `X` is a unit Normal, `tailweight < 1` leads to a distribution that is
"flat" around `Y = 0`, and a very steep drop-off in the tails.
* If `X` is a unit Normal, `tailweight > 1` leads to a distribution more
peaked at the mode with heavier tails.
To see the argument about the tails, note that for `|X| >> 1` and
`|X| >> (|skewness| * tailweight)**tailweight`, we have
`Y approx 0.5 X**tailweight e**(sign(X) skewness * tailweight)`.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
skewness=None,
tailweight=None,
validate_args=False,
name="SinhArcsinh"):
"""Instantiates the `SinhArcsinh` bijector.
Args:
skewness: Skewness parameter. Float-type `Tensor`. Default is `0`
of type `float32`.
tailweight: Tailweight parameter. Positive `Tensor` of same `dtype` as
`skewness` and broadcastable `shape`. Default is `1` of type `float32`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
with self._name_scope("init", values=[skewness, tailweight]):
tailweight = 1. if tailweight is None else tailweight
skewness = 0. if skewness is None else skewness
self._skewness = ops.convert_to_tensor(
skewness, name="skewness")
self._tailweight = ops.convert_to_tensor(
tailweight, name="tailweight", dtype=self._skewness.dtype)
check_ops.assert_same_float_dtype([self._skewness, self._tailweight])
if validate_args:
self._tailweight = control_flow_ops.with_dependencies([
check_ops.assert_positive(
self._tailweight,
message="Argument tailweight was not positive")
], self._tailweight)
super(SinhArcsinh, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
@property
def skewness(self):
"""The `skewness` in: `Y = Sinh((Arcsinh(X) + skewness) * tailweight)`."""
return self._skewness
@property
def tailweight(self):
"""The `tailweight` in: `Y = Sinh((Arcsinh(X) + skewness) * tailweight)`."""
return self._tailweight
def _forward(self, x):
return math_ops.sinh((math_ops.asinh(x) + self.skewness) * self.tailweight)
def _inverse(self, y):
return math_ops.sinh(math_ops.asinh(y) / self.tailweight - self.skewness)
def _inverse_log_det_jacobian(self, y):
# x = sinh(arcsinh(y) / tailweight - skewness)
# Using sinh' = cosh, arcsinh'(y) = 1 / sqrt(y**2 + 1),
# dx/dy
# = cosh(arcsinh(y) / tailweight - skewness)
# / (tailweight * sqrt(y**2 + 1))
# This is computed inside the log to avoid catastrophic cancellations
# from cosh((arcsinh(y) / tailweight) - skewness) and sqrt(x**2 + 1).
return (
math_ops.log(math_ops.cosh(
math_ops.asinh(y) / self.tailweight - self.skewness)
# TODO(srvasude): Consider using cosh(arcsinh(x)) in cases
# where (arcsinh(x) / tailweight) - skewness ~= arcsinh(x).
/ _sqrtx2p1(y))
- math_ops.log(self.tailweight))
def _forward_log_det_jacobian(self, x):
# y = sinh((arcsinh(x) + skewness) * tailweight)
# Using sinh' = cosh, arcsinh'(x) = 1 / sqrt(x**2 + 1),
# dy/dx
# = cosh((arcsinh(x) + skewness) * tailweight) * tailweight / sqrt(x**2 + 1)
# This is computed inside the log to avoid catastrophic cancellations
# from cosh((arcsinh(x) + skewness) * tailweight) and sqrt(x**2 + 1).
return (
math_ops.log(math_ops.cosh(
(math_ops.asinh(x) + self.skewness) * self.tailweight)
# TODO(srvasude): Consider using cosh(arcsinh(x)) in cases
# where (arcsinh(x) + skewness) * tailweight ~= arcsinh(x).
/ _sqrtx2p1(x))
+ math_ops.log(self.tailweight))
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/sinh_arcsinh.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Weibull bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Weibull",
]
class Weibull(bijector.Bijector):
"""Compute `Y = g(X) = 1 - exp((-X / scale) ** concentration), X >= 0`.
This bijector maps inputs from `[0, inf]` to [0, 1]`. The inverse of the
bijector applied to a uniform random variable `X ~ U(0, 1) gives back a
random variable with the
[Weibull distribution](https://en.wikipedia.org/wiki/Weibull_distribution):
```none
Y ~ Weibull(scale, concentration)
pdf(y; scale, concentration, y >= 0) = (scale / concentration) * (
scale / concentration) ** (concentration - 1) * exp(
-(y / scale) ** concentration)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
scale=1.,
concentration=1.,
validate_args=False,
name="weibull"):
"""Instantiates the `Weibull` bijector.
Args:
scale: Positive Float-type `Tensor` that is the same dtype and is
broadcastable with `concentration`.
This is `l` in `Y = g(X) = 1 - exp((-x / l) ** k)`.
concentration: Positive Float-type `Tensor` that is the same dtype and is
broadcastable with `scale`.
This is `k` in `Y = g(X) = 1 - exp((-x / l) ** k)`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
with self._name_scope("init", values=[scale, concentration]):
self._scale = ops.convert_to_tensor(scale, name="scale")
self._concentration = ops.convert_to_tensor(
concentration, name="concentration")
check_ops.assert_same_float_dtype([self._scale, self._concentration])
if validate_args:
self._scale = control_flow_ops.with_dependencies([
check_ops.assert_positive(
self._scale,
message="Argument scale was not positive")
], self._scale)
self._concentration = control_flow_ops.with_dependencies([
check_ops.assert_positive(
self._concentration,
message="Argument concentration was not positive")
], self._concentration)
super(Weibull, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
@property
def scale(self):
"""The `l` in `Y = g(X) = 1 - exp((-x / l) ** k)`."""
return self._scale
@property
def concentration(self):
"""The `k` in `Y = g(X) = 1 - exp((-x / l) ** k)`."""
return self._concentration
def _forward(self, x):
x = self._maybe_assert_valid_x(x)
return -math_ops.expm1(-((x / self.scale) ** self.concentration))
def _inverse(self, y):
y = self._maybe_assert_valid_y(y)
return self.scale * (-math_ops.log1p(-y)) ** (1 / self.concentration)
def _inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid_y(y)
return (
-math_ops.log1p(-y) +
(1 / self.concentration - 1) * math_ops.log(-math_ops.log1p(-y)) +
math_ops.log(self.scale / self.concentration))
def _forward_log_det_jacobian(self, x):
x = self._maybe_assert_valid_x(x)
return (
-(x / self.scale) ** self.concentration +
(self.concentration - 1) * math_ops.log(x) +
math_ops.log(self.concentration) +
-self.concentration * math_ops.log(self.scale))
def _maybe_assert_valid_x(self, x):
if not self.validate_args:
return x
is_valid = check_ops.assert_non_negative(
x,
message="Forward transformation input must be at least 0.")
return control_flow_ops.with_dependencies([is_valid], x)
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return y
is_positive = check_ops.assert_non_negative(
y, message="Inverse transformation input must be greater than 0.")
less_than_one = check_ops.assert_less_equal(
y, constant_op.constant(1., y.dtype),
message="Inverse transformation input must be less than or equal to 1.")
return control_flow_ops.with_dependencies([is_positive, less_than_one], y)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/weibull.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Softsign bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Softsign",
]
class Softsign(bijector.Bijector):
"""Bijector which computes `Y = g(X) = X / (1 + |X|)`.
The softsign `Bijector` has the following two useful properties:
* The domain is all real numbers
* `softsign(x) approx sgn(x)`, for large `|x|`.
#### Examples
```python
# Create the Y = softsign(X) transform.
softsign = Softsign()
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
x / (1 + abs(x)) == softsign.forward(x)
x / (1 - abs(x)) == softsign.inverse(x)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, validate_args=False, name="softsign"):
super(Softsign, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
def _forward(self, x):
return x / (1. + math_ops.abs(x))
def _inverse(self, y):
y = self._maybe_assert_valid_y(y)
return y / (1. - math_ops.abs(y))
def _forward_log_det_jacobian(self, x):
return -2. * math_ops.log1p(math_ops.abs(x))
def _inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid_y(y)
return -2. * math_ops.log1p(-math_ops.abs(y))
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return y
is_valid = [
check_ops.assert_greater(
y, math_ops.cast(-1., dtype=y.dtype.base_dtype),
message="Inverse transformation input must be greater than -1."),
check_ops.assert_less(
y, math_ops.cast(1., dtype=y.dtype.base_dtype),
message="Inverse transformation input must be less than 1.")
]
return control_flow_ops.with_dependencies(is_valid, y)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/softsign.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Chain bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Chain",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _use_static_shape(input_tensor, ndims):
return input_tensor.shape.is_fully_defined() and isinstance(ndims, int)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _compute_min_event_ndims(bijector_list, compute_forward=True):
"""Computes the min_event_ndims associated with the give list of bijectors.
Given a list `bijector_list` of bijectors, compute the min_event_ndims that is
associated with the composition of bijectors in that list.
min_event_ndims is the # of right most dimensions for which the bijector has
done necessary computation on (i.e. the non-broadcastable part of the
computation).
We can derive the min_event_ndims for a chain of bijectors as follows:
In the case where there are no rank changing bijectors, this will simply be
`max(b.forward_min_event_ndims for b in bijector_list)`. This is because the
bijector with the most forward_min_event_ndims requires the most dimensions,
and hence the chain also requires operating on those dimensions.
However in the case of rank changing, more care is needed in determining the
exact amount of dimensions. Padding dimensions causes subsequent bijectors to
operate on the padded dimensions, and Removing dimensions causes bijectors to
operate more left.
Args:
bijector_list: List of bijectors to be composed by chain.
compute_forward: Boolean. If True, computes the min_event_ndims associated
with a forward call to Chain, and otherwise computes the min_event_ndims
associated with an inverse call to Chain. The latter is the same as the
min_event_ndims associated with a forward call to Invert(Chain(....)).
Returns:
min_event_ndims
"""
min_event_ndims = 0
# This is a mouthful, but what this encapsulates is that if not for rank
# changing bijectors, we'd only need to compute the largest of the min
# required ndims. Hence "max_min". Due to rank changing bijectors, we need to
# account for synthetic rank growth / synthetic rank decrease from a rank
# changing bijector.
rank_changed_adjusted_max_min_event_ndims = 0
if compute_forward:
bijector_list = reversed(bijector_list)
for b in bijector_list:
if compute_forward:
current_min_event_ndims = b.forward_min_event_ndims
current_inverse_min_event_ndims = b.inverse_min_event_ndims
else:
current_min_event_ndims = b.inverse_min_event_ndims
current_inverse_min_event_ndims = b.forward_min_event_ndims
# New dimensions were touched.
if rank_changed_adjusted_max_min_event_ndims < current_min_event_ndims:
min_event_ndims += (
current_min_event_ndims - rank_changed_adjusted_max_min_event_ndims)
rank_changed_adjusted_max_min_event_ndims = max(
current_min_event_ndims, rank_changed_adjusted_max_min_event_ndims)
# If the number of dimensions has increased via forward, then
# inverse_min_event_ndims > forward_min_event_ndims, and hence the
# dimensions we computed on, have moved left (so we have operated
# on additional dimensions).
# Conversely, if the number of dimensions has decreased via forward,
# then we have inverse_min_event_ndims < forward_min_event_ndims,
# and so we will have operated on fewer right most dimensions.
number_of_changed_dimensions = (
current_min_event_ndims - current_inverse_min_event_ndims)
rank_changed_adjusted_max_min_event_ndims -= number_of_changed_dimensions
return min_event_ndims
class Chain(bijector.Bijector):
"""Bijector which applies a sequence of bijectors.
Example Use:
```python
chain = Chain([Exp(), Softplus()], name="one_plus_exp")
```
Results in:
* Forward:
```python
exp = Exp()
softplus = Softplus()
Chain([exp, softplus]).forward(x)
= exp.forward(softplus.forward(x))
= tf.exp(tf.math.log(1. + tf.exp(x)))
= 1. + tf.exp(x)
```
* Inverse:
```python
exp = Exp()
softplus = Softplus()
Chain([exp, softplus]).inverse(y)
= softplus.inverse(exp.inverse(y))
= tf.math.log(tf.exp(tf.math.log(y)) - 1.)
= tf.math.log(y - 1.)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, bijectors=None, validate_args=False, name=None):
"""Instantiates `Chain` bijector.
Args:
bijectors: Python `list` of bijector instances. An empty list makes this
bijector equivalent to the `Identity` bijector.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object. Default:
E.g., `Chain([Exp(), Softplus()]).name == "chain_of_exp_of_softplus"`.
Raises:
ValueError: if bijectors have different dtypes.
"""
if bijectors is None:
bijectors = ()
self._bijectors = bijectors
for a_bijector in bijectors:
if not a_bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError(
"Invert is not implemented for non-injective bijector ({})".format(
a_bijector.name))
dtype = list(set([b.dtype for b in bijectors]))
if len(dtype) > 2:
raise ValueError("incompatible dtypes: %s" % dtype)
elif len(dtype) == 2:
dtype = dtype[1] if dtype[0] is None else dtype[0]
elif len(dtype) == 1:
dtype = dtype[0]
else:
dtype = None
inverse_min_event_ndims = _compute_min_event_ndims(
bijectors, compute_forward=False)
forward_min_event_ndims = _compute_min_event_ndims(
bijectors, compute_forward=True)
super(Chain, self).__init__(
graph_parents=list(itertools.chain.from_iterable(
b.graph_parents for b in bijectors)),
forward_min_event_ndims=forward_min_event_ndims,
inverse_min_event_ndims=inverse_min_event_ndims,
is_constant_jacobian=all(b.is_constant_jacobian for b in bijectors),
validate_args=validate_args,
dtype=dtype,
name=name or ("identity" if not bijectors else
"_of_".join(["chain"] + [b.name for b in bijectors])))
@property
def bijectors(self):
return self._bijectors
def _shape_helper(self, func_name, input_shape, reverse):
new_shape = input_shape
for b in reversed(self.bijectors) if reverse else self.bijectors:
func = getattr(b, func_name, None)
if func is None:
raise ValueError("unable to call %s on bijector %s (%s)" %
(func_name, b.name, func))
new_shape = func(new_shape)
return new_shape
def _forward_event_shape(self, input_shape):
return self._shape_helper("forward_event_shape", input_shape,
reverse=True)
def _forward_event_shape_tensor(self, input_shape):
return self._shape_helper(
"forward_event_shape_tensor", input_shape, reverse=True)
def _inverse_event_shape(self, output_shape):
return self._shape_helper("inverse_event_shape", output_shape,
reverse=False)
def _inverse_event_shape_tensor(self, output_shape):
return self._shape_helper("inverse_event_shape_tensor", output_shape,
reverse=False)
def _inverse(self, y, **kwargs):
for b in self.bijectors:
y = b.inverse(y, **kwargs.get(b.name, {}))
return y
def _inverse_log_det_jacobian(self, y, **kwargs):
y = ops.convert_to_tensor(y, name="y")
ildj = math_ops.cast(0., dtype=y.dtype.base_dtype)
if not self.bijectors:
return ildj
event_ndims = self._maybe_get_static_event_ndims(
self.inverse_min_event_ndims)
if _use_static_shape(y, event_ndims):
event_shape = y.shape[y.shape.ndims - event_ndims:]
else:
event_shape = array_ops.shape(y)[array_ops.rank(y) - event_ndims:]
for b in self.bijectors:
ildj += b.inverse_log_det_jacobian(
y, event_ndims=event_ndims, **kwargs.get(b.name, {}))
if _use_static_shape(y, event_ndims):
event_shape = b.inverse_event_shape(event_shape)
event_ndims = self._maybe_get_static_event_ndims(
event_shape.ndims)
else:
event_shape = b.inverse_event_shape_tensor(event_shape)
event_ndims = array_ops.size(event_shape)
event_ndims_ = self._maybe_get_static_event_ndims(event_ndims)
if event_ndims_ is not None:
event_ndims = event_ndims_
y = b.inverse(y, **kwargs.get(b.name, {}))
return ildj
def _forward(self, x, **kwargs):
for b in reversed(self.bijectors):
x = b.forward(x, **kwargs.get(b.name, {}))
return x
def _forward_log_det_jacobian(self, x, **kwargs):
x = ops.convert_to_tensor(x, name="x")
fldj = math_ops.cast(0., dtype=x.dtype.base_dtype)
if not self.bijectors:
return fldj
event_ndims = self._maybe_get_static_event_ndims(
self.forward_min_event_ndims)
if _use_static_shape(x, event_ndims):
event_shape = x.shape[x.shape.ndims - event_ndims:]
else:
event_shape = array_ops.shape(x)[array_ops.rank(x) - event_ndims:]
for b in reversed(self.bijectors):
fldj += b.forward_log_det_jacobian(
x, event_ndims=event_ndims, **kwargs.get(b.name, {}))
if _use_static_shape(x, event_ndims):
event_shape = b.forward_event_shape(event_shape)
event_ndims = self._maybe_get_static_event_ndims(event_shape.ndims)
else:
event_shape = b.forward_event_shape_tensor(event_shape)
event_ndims = array_ops.size(event_shape)
event_ndims_ = self._maybe_get_static_event_ndims(event_ndims)
if event_ndims_ is not None:
event_ndims = event_ndims_
x = b.forward(x, **kwargs.get(b.name, {}))
return fldj
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/chain.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Kumaraswamy bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Kumaraswamy",
]
class Kumaraswamy(bijector.Bijector):
"""Compute `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a), X in [0, 1]`.
This bijector maps inputs from `[0, 1]` to [0, 1]`. The inverse of the
bijector applied to a uniform random variable `X ~ U(0, 1) gives back a
random variable with the [Kumaraswamy distribution](
https://en.wikipedia.org/wiki/Kumaraswamy_distribution):
```none
Y ~ Kumaraswamy(a, b)
pdf(y; a, b, 0 <= y <= 1) = a * b * y ** (a - 1) * (1 - y**a) ** (b - 1)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
concentration1=None,
concentration0=None,
validate_args=False,
name="kumaraswamy"):
"""Instantiates the `Kumaraswamy` bijector.
Args:
concentration1: Python `float` scalar indicating the transform power,
i.e., `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)` where `a` is
`concentration1`.
concentration0: Python `float` scalar indicating the transform power,
i.e., `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)` where `b` is
`concentration0`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
with self._name_scope("init", values=[concentration1, concentration0]):
concentration1 = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration1, name="concentration1"),
validate_args=validate_args)
concentration0 = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration0, name="concentration0"),
validate_args=validate_args)
self._concentration1 = concentration1
self._concentration0 = concentration0
super(Kumaraswamy, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
@property
def concentration1(self):
"""The `a` in: `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)`."""
return self._concentration1
@property
def concentration0(self):
"""The `b` in: `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)`."""
return self._concentration0
def _forward(self, x):
x = self._maybe_assert_valid(x)
return math_ops.exp(
math_ops.log1p(-math_ops.exp(math_ops.log1p(-x) / self.concentration0))
/ self.concentration1)
def _inverse(self, y):
y = self._maybe_assert_valid(y)
return math_ops.exp(math_ops.log1p(
-(1 - y**self.concentration1)**self.concentration0))
def _inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid(y)
return (
math_ops.log(self.concentration1) + math_ops.log(self.concentration0) +
(self.concentration1 - 1) * math_ops.log(y) +
(self.concentration0 - 1) * math_ops.log1p(-y**self.concentration1))
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of a concentration parameter."""
if not validate_args:
return concentration
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
], concentration)
def _maybe_assert_valid(self, x):
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_non_negative(
x,
message="sample must be non-negative"),
check_ops.assert_less_equal(
x, array_ops.ones([], self.concentration0.dtype),
message="sample must be no larger than `1`."),
], x)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/kumaraswamy.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reshape bijectors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Reshape",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _static_ndims_from_shape(shape):
return tensor_shape.dimension_value(shape.shape.with_rank_at_least(1)[0])
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _ndims_from_shape(shape):
return array_ops.shape(shape)[0]
class Reshape(bijector.Bijector):
"""Reshapes the `event_shape` of a `Tensor`.
The semantics generally follow that of `tf.reshape()`, with
a few differences:
* The user must provide both the input and output shape, so that
the transformation can be inverted. If an input shape is not
specified, the default assumes a vector-shaped input, i.e.,
event_shape_in = (-1,).
* The `Reshape` bijector automatically broadcasts over the leftmost
dimensions of its input (`sample_shape` and `batch_shape`); only
the rightmost `event_ndims_in` dimensions are reshaped. The
number of dimensions to reshape is inferred from the provided
`event_shape_in` (`event_ndims_in = len(event_shape_in)`).
Example usage:
```python
import tensorflow_probability as tfp
tfb = tfp.bijectors
r = tfb.Reshape(event_shape_out=[1, -1])
r.forward([3., 4.]) # shape [2]
# ==> [[3., 4.]] # shape [1, 2]
r.forward([[1., 2.], [3., 4.]]) # shape [2, 2]
# ==> [[[1., 2.]],
# [[3., 4.]]] # shape [2, 1, 2]
r.inverse([[3., 4.]]) # shape [1,2]
# ==> [3., 4.] # shape [2]
r.forward_log_det_jacobian(any_value)
# ==> 0.
r.inverse_log_det_jacobian(any_value)
# ==> 0.
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, event_shape_out, event_shape_in=(-1,),
validate_args=False, name=None):
"""Creates a `Reshape` bijector.
Args:
event_shape_out: An `int`-like vector-shaped `Tensor`
representing the event shape of the transformed output.
event_shape_in: An optional `int`-like vector-shape `Tensor`
representing the event shape of the input. This is required in
order to define inverse operations; the default of (-1,)
assumes a vector-shaped input.
validate_args: Python `bool` indicating whether arguments should
be checked for correctness.
name: Python `str`, name given to ops managed by this object.
Raises:
TypeError: if either `event_shape_in` or `event_shape_out` has
non-integer `dtype`.
ValueError: if either of `event_shape_in` or `event_shape_out`
has non-vector shape (`rank > 1`), or if their sizes do not
match.
"""
with ops.name_scope(name, "reshape",
values=[event_shape_out, event_shape_in]):
event_shape_out = ops.convert_to_tensor(event_shape_out,
name="event_shape_out",
preferred_dtype=dtypes.int32)
event_shape_in = ops.convert_to_tensor(event_shape_in,
name="event_shape_in",
preferred_dtype=dtypes.int32)
assertions = []
assertions.extend(self._maybe_check_valid_shape(
event_shape_out, validate_args))
assertions.extend(self._maybe_check_valid_shape(
event_shape_in, validate_args))
self._assertions = assertions
self._event_shape_in = event_shape_in
self._event_shape_out = event_shape_out
super(Reshape, self).__init__(
forward_min_event_ndims=0,
is_constant_jacobian=True,
validate_args=validate_args,
name=name or "reshape")
def _maybe_check_valid_shape(self, shape, validate_args):
"""Check that a shape Tensor is int-type and otherwise sane."""
if not shape.dtype.is_integer:
raise TypeError("{} dtype ({}) should be `int`-like.".format(
shape, shape.dtype.name))
assertions = []
ndims = array_ops.rank(shape)
ndims_ = tensor_util.constant_value(ndims)
if ndims_ is not None and ndims_ > 1:
raise ValueError("`{}` rank ({}) should be <= 1.".format(
shape, ndims_))
elif validate_args:
assertions.append(check_ops.assert_less_equal(
ndims, 1, message="`{}` rank should be <= 1.".format(shape)))
shape_ = tensor_util.constant_value_as_shape(shape)
if shape_.is_fully_defined():
es = np.int32(shape_.as_list())
if sum(es == -1) > 1:
raise ValueError(
"`{}` must have at most one `-1` (given {})"
.format(shape, es))
if np.any(es < -1):
raise ValueError(
"`{}` elements must be either positive integers or `-1`"
"(given {})."
.format(shape, es))
elif validate_args:
assertions.extend([
check_ops.assert_less_equal(
math_ops.reduce_sum(
math_ops.cast(math_ops.equal(shape, -1), dtypes.int32)),
1,
message="`{}` elements must have at most one `-1`."
.format(shape)),
check_ops.assert_greater_equal(
shape, -1,
message="`{}` elements must be either positive integers or `-1`."
.format(shape)),
])
return assertions
def _reshape_helper(self, x, event_shape_in, event_shape_out):
"""Reshape only the event_shape of an input `Tensor`."""
event_ndims_in_ = _static_ndims_from_shape(event_shape_in)
event_ndims_in = _ndims_from_shape(event_shape_in)
x_ndims_, x_ndims = x.shape.ndims, array_ops.rank(x)
assertions = []
# Ensure x.event_shape is compatible with event_shape_in.
if (event_ndims_in_ is not None
and x_ndims_ is not None
and x.shape.with_rank_at_least(event_ndims_in_)[
x_ndims_-event_ndims_in_:].is_fully_defined()):
x_event_shape_, x_event_shape = [ # pylint: disable=unbalanced-tuple-unpacking
np.int32(x.shape[x_ndims_-event_ndims_in_:])]*2
else:
x_event_shape_, x_event_shape = (
None, array_ops.shape(x)[x_ndims-event_ndims_in:])
event_shape_in_ = tensor_util.constant_value(event_shape_in)
if x_event_shape_ is not None and event_shape_in_ is not None:
# Compare the shape dimensions that are fully specified in the
# input (i.e., for which event_shape_in is not -1). If x_event_shape
# matches along all of these dimensions, it is compatible with
# the desired input shape and any further mismatches (i.e.,
# imcompatibility with the desired *output* shape) will be
# caught inside of array_ops.reshape() below.
x_event_shape_specified_ = x_event_shape_[event_shape_in_ >= 0]
event_shape_in_specified_ = event_shape_in_[event_shape_in_ >= 0]
if not np.equal(x_event_shape_specified_,
event_shape_in_specified_).all():
raise ValueError(
"Input `event_shape` does not match `event_shape_in` ({} vs {}).".
format(x_event_shape_, event_shape_in_))
elif self.validate_args:
# Similarly to the static case, we compare the shape dimensions
# that are fully specified in the input. We extract these
# dimensions using boolean_mask(), which requires that the mask
# have known ndims. We can assume that shape Tensors always have
# ndims==1 (this assumption is verified inside of
# _maybe_check_valid_shape), so the reshape operation is just a
# no-op that formally encodes this fact to make boolean_mask()
# happy.
event_shape_mask = array_ops.reshape(event_shape_in >= 0, [-1])
x_event_shape_specified = array_ops.boolean_mask(x_event_shape,
event_shape_mask)
event_shape_in_specified = array_ops.boolean_mask(event_shape_in,
event_shape_mask)
assertions.append(check_ops.assert_equal(
x_event_shape_specified, event_shape_in_specified,
message="Input `event_shape` does not match `event_shape_in`."))
if assertions:
x = control_flow_ops.with_dependencies(assertions, x)
# get the parts of shape(x) that will not change
sample_and_batch_shape = array_ops.shape(x)
ndims = (x.shape.ndims if x.shape.ndims is not None
else array_ops.rank(x))
sample_and_batch_shape = sample_and_batch_shape[
:(ndims - math_ops.abs(event_ndims_in))]
if (event_ndims_in_ is not None
and x_ndims_ is not None
and event_ndims_in_ == x_ndims_):
# Hack to allow forward/inverse_event_shape to do shape
# inference by calling this helper method with a dummy Tensor of
# shape event_shape_in. In this special case,
# sample_and_batch_shape will be empty so we can preserve static
# shape information by avoiding the concat operation below
# (which would be a no-op).
new_shape = event_shape_out
else:
new_shape = array_ops.concat(
[sample_and_batch_shape, event_shape_out], axis=0)
return array_ops.reshape(x, new_shape)
def _forward(self, x):
with ops.control_dependencies(self._assertions):
return self._reshape_helper(x,
self._event_shape_in,
self._event_shape_out)
def _inverse(self, y):
with ops.control_dependencies(self._assertions):
return self._reshape_helper(y,
self._event_shape_out,
self._event_shape_in)
def _inverse_log_det_jacobian(self, y):
with ops.control_dependencies(self._assertions):
return constant_op.constant(0., dtype=y.dtype)
def _forward_log_det_jacobian(self, x):
with ops.control_dependencies(self._assertions):
return constant_op.constant(0., dtype=x.dtype)
def _forward_event_shape(self, input_shape):
# NOTE: this method and the other *_event_shape* methods
# compute shape by explicit transformation of a dummy
# variable. This approach is not generally recommended because it
# bloats the graph and could in general trigger side effects.
#
# In this particular case of the Reshape bijector, the
# forward and inverse transforms have no side effects, and we
# believe the reduction in code complexity from delegating the
# heavy lifting to tf.reshape() is worth the added graph ops.
# However, you should think hard before implementing this approach
# in other Bijectors; it is strongly preferred to compute
# shapes explicitly whenever it's feasible to do so.
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=input_shape)
dummy_reshaped = self.forward(dummy)
return dummy_reshaped.shape
def _inverse_event_shape(self, output_shape):
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=output_shape)
dummy_reshaped = self.inverse(dummy)
return dummy_reshaped.shape
def _forward_event_shape_tensor(self, input_shape):
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=input_shape)
dummy_reshaped = self.forward(dummy)
return array_ops.shape(dummy_reshaped)
def _inverse_event_shape_tensor(self, output_shape):
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=output_shape)
dummy_reshaped = self.inverse(dummy)
return array_ops.shape(dummy_reshaped)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/reshape.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FillTriangular bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.distributions import util as dist_util
from tensorflow.python.util import deprecation
__all__ = [
"FillTriangular",
]
class FillTriangular(bijector.Bijector):
"""Transforms vectors to triangular.
Triangular matrix elements are filled in a clockwise spiral.
Given input with shape `batch_shape + [d]`, produces output with
shape `batch_shape + [n, n]`, where
`n = (-1 + sqrt(1 + 8 * d))/2`.
This follows by solving the quadratic equation
`d = 1 + 2 + ... + n = n * (n + 1)/2`.
#### Example
```python
b = tfb.FillTriangular(upper=False)
b.forward([1, 2, 3, 4, 5, 6])
# ==> [[4, 0, 0],
# [6, 5, 0],
# [3, 2, 1]]
b = tfb.FillTriangular(upper=True)
b.forward([1, 2, 3, 4, 5, 6])
# ==> [[1, 2, 3],
# [0, 5, 6],
# [0, 0, 4]]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
upper=False,
validate_args=False,
name="fill_triangular"):
"""Instantiates the `FillTriangular` bijector.
Args:
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._upper = upper
super(FillTriangular, self).__init__(
forward_min_event_ndims=1,
inverse_min_event_ndims=2,
validate_args=validate_args,
name=name)
def _forward(self, x):
return dist_util.fill_triangular(x, upper=self._upper)
def _inverse(self, y):
return dist_util.fill_triangular_inverse(y, upper=self._upper)
def _forward_log_det_jacobian(self, x):
return array_ops.zeros_like(x[..., 0])
def _inverse_log_det_jacobian(self, y):
return array_ops.zeros_like(y[..., 0, 0])
def _forward_event_shape(self, input_shape):
batch_shape, d = (input_shape[:-1],
tensor_shape.dimension_value(input_shape[-1]))
if d is None:
n = None
else:
n = vector_size_to_square_matrix_size(d, self.validate_args)
return batch_shape.concatenate([n, n])
def _inverse_event_shape(self, output_shape):
batch_shape, n1, n2 = (output_shape[:-2],
tensor_shape.dimension_value(output_shape[-2]),
tensor_shape.dimension_value(output_shape[-1]))
if n1 is None or n2 is None:
m = None
elif n1 != n2:
raise ValueError("Matrix must be square. (saw [{}, {}])".format(n1, n2))
else:
m = n1 * (n1 + 1) / 2
return batch_shape.concatenate([m])
def _forward_event_shape_tensor(self, input_shape_tensor):
batch_shape, d = input_shape_tensor[:-1], input_shape_tensor[-1]
n = vector_size_to_square_matrix_size(d, self.validate_args)
return array_ops.concat([batch_shape, [n, n]], axis=0)
def _inverse_event_shape_tensor(self, output_shape_tensor):
batch_shape, n = output_shape_tensor[:-2], output_shape_tensor[-1]
if self.validate_args:
is_square_matrix = check_ops.assert_equal(
n, output_shape_tensor[-2], message="Matrix must be square.")
with ops.control_dependencies([is_square_matrix]):
n = array_ops.identity(n)
d = math_ops.cast(n * (n + 1) / 2, output_shape_tensor.dtype)
return array_ops.concat([batch_shape, [d]], axis=0)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def vector_size_to_square_matrix_size(d, validate_args, name=None):
"""Convert a vector size to a matrix size."""
if isinstance(d, (float, int, np.generic, np.ndarray)):
n = (-1 + np.sqrt(1 + 8 * d)) / 2.
if float(int(n)) != n:
raise ValueError("Vector length is not a triangular number.")
return int(n)
else:
with ops.name_scope(name, "vector_size_to_square_matrix_size", [d]) as name:
n = (-1. + math_ops.sqrt(1 + 8. * math_ops.cast(d, dtypes.float32))) / 2.
if validate_args:
with ops.control_dependencies([
check_ops.assert_equal(
math_ops.cast(math_ops.cast(n, dtypes.int32), dtypes.float32),
n,
message="Vector length is not a triangular number")
]):
n = array_ops.identity(n)
return math_ops.cast(n, d.dtype)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/fill_triangular.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ConditionalBijector base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.distributions import util as distribution_util
__all__ = ["ConditionalBijector"]
class ConditionalBijector(bijector.Bijector):
"""Conditional Bijector is a Bijector that allows intrinsic conditioning."""
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def forward(self, x, name="forward", **condition_kwargs):
return self._call_forward(x, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def inverse(self, y, name="inverse", **condition_kwargs):
return self._call_inverse(y, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def inverse_log_det_jacobian(
self, y, event_ndims, name="inverse_log_det_jacobian",
**condition_kwargs):
return self._call_inverse_log_det_jacobian(
y, event_ndims, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def forward_log_det_jacobian(
self, x, event_ndims, name="forward_log_det_jacobian",
**condition_kwargs):
return self._call_forward_log_det_jacobian(
x, event_ndims, name, **condition_kwargs)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/conditional_bijector.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""PowerTransform bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"PowerTransform",
]
class PowerTransform(bijector.Bijector):
"""Compute `Y = g(X) = (1 + X * c)**(1 / c), X >= -1 / c`.
The [power transform](https://en.wikipedia.org/wiki/Power_transform) maps
inputs from `[0, inf]` to `[-1/c, inf]`; this is equivalent to the `inverse`
of this bijector.
This bijector is equivalent to the `Exp` bijector when `c=0`.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
power=0.,
validate_args=False,
name="power_transform"):
"""Instantiates the `PowerTransform` bijector.
Args:
power: Python `float` scalar indicating the transform power, i.e.,
`Y = g(X) = (1 + X * c)**(1 / c)` where `c` is the `power`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: if `power < 0` or is not known statically.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
with self._name_scope("init", values=[power]):
power = tensor_util.constant_value(
ops.convert_to_tensor(power, name="power"))
if power is None or power < 0:
raise ValueError("`power` must be a non-negative TF constant.")
self._power = power
super(PowerTransform, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
@property
def power(self):
"""The `c` in: `Y = g(X) = (1 + X * c)**(1 / c)`."""
return self._power
def _forward(self, x):
x = self._maybe_assert_valid_x(x)
if self.power == 0.:
return math_ops.exp(x)
# If large x accuracy is an issue, consider using:
# (1. + x * self.power)**(1. / self.power) when x >> 1.
return math_ops.exp(math_ops.log1p(x * self.power) / self.power)
def _inverse(self, y):
y = self._maybe_assert_valid_y(y)
if self.power == 0.:
return math_ops.log(y)
# If large y accuracy is an issue, consider using:
# (y**self.power - 1.) / self.power when y >> 1.
return math_ops.expm1(math_ops.log(y) * self.power) / self.power
def _inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid_y(y)
return (self.power - 1.) * math_ops.log(y)
def _forward_log_det_jacobian(self, x):
x = self._maybe_assert_valid_x(x)
if self.power == 0.:
return x
return (1. / self.power - 1.) * math_ops.log1p(x * self.power)
def _maybe_assert_valid_x(self, x):
if not self.validate_args or self.power == 0.:
return x
is_valid = check_ops.assert_non_negative(
1. + self.power * x,
message="Forward transformation input must be at least {}.".format(
-1. / self.power))
return control_flow_ops.with_dependencies([is_valid], x)
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return y
is_valid = check_ops.assert_positive(
y, message="Inverse transformation input must be greater than 0.")
return control_flow_ops.with_dependencies([is_valid], y)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/power_transform.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Affine bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"Affine",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _as_tensor(x, name):
"""Convenience to convert to `Tensor` or leave as `None`."""
return None if x is None else ops.convert_to_tensor(x, name=name)
class Affine(bijector.Bijector):
"""Compute `Y = g(X; shift, scale) = scale @ X + shift`.
Here `scale = c * I + diag(D1) + tril(L) + V @ diag(D2) @ V.T`.
In TF parlance, the `scale` term is logically equivalent to:
```python
scale = (
scale_identity_multiplier * tf.linalg.tensor_diag(tf.ones(d)) +
tf.linalg.tensor_diag(scale_diag) +
scale_tril +
scale_perturb_factor @ diag(scale_perturb_diag) @
tf.transpose([scale_perturb_factor])
)
```
The `scale` term is applied without necessarily materializing constituent
matrices, i.e., the matmul is [matrix-free](
https://en.wikipedia.org/wiki/Matrix-free_methods) when possible.
#### Examples
```python
# Y = X
b = Affine()
# Y = X + shift
b = Affine(shift=[1., 2, 3])
# Y = 2 * I @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_identity_multiplier=2.)
# Y = tf.linalg.tensor_diag(d1) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_diag=[-1., 2, 1]) # Implicitly 3x3.
# Y = (I + v * v.T) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_perturb_factor=[[1., 0],
[0, 1],
[1, 1]])
# Y = (diag(d1) + v * diag(d2) * v.T) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_diag=[1., 3, 3], # Implicitly 3x3.
scale_perturb_diag=[2., 1], # Implicitly 2x2.
scale_perturb_factor=[[1., 0],
[0, 1],
[1, 1]])
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
shift=None,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
validate_args=False,
name="affine"):
"""Instantiates the `Affine` bijector.
This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
giving the forward operation:
```none
Y = g(X) = scale @ X + shift
```
where the `scale` term is logically equivalent to:
```python
scale = (
scale_identity_multiplier * tf.linalg.tensor_diag(tf.ones(d)) +
tf.linalg.tensor_diag(scale_diag) +
scale_tril +
scale_perturb_factor @ diag(scale_perturb_diag) @
tf.transpose([scale_perturb_factor])
)
```
If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are
specified then `scale += IdentityMatrix`. Otherwise specifying a
`scale` argument has the semantics of `scale += Expand(arg)`, i.e.,
`scale_diag != None` means `scale += tf.linalg.tensor_diag(scale_diag)`.
Args:
shift: Floating-point `Tensor`. If this is set to `None`, no shift is
applied.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to `scale`.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to `scale`.
The upper triangular elements above the diagonal are ignored.
scale_perturb_factor: Floating-point `Tensor` representing factor matrix
with last two dimensions of shape `(k, r)`. When `None`, no rank-r
update is added to `scale`.
scale_perturb_diag: Floating-point `Tensor` representing the diagonal
matrix. `scale_perturb_diag` has shape [N1, N2, ... r], which
represents an `r x r` diagonal matrix. When `None` low rank updates will
take the form `scale_perturb_factor * scale_perturb_factor.T`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: if `perturb_diag` is specified but not `perturb_factor`.
TypeError: if `shift` has different `dtype` from `scale` arguments.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
# Ambiguous definition of low rank update.
if scale_perturb_diag is not None and scale_perturb_factor is None:
raise ValueError("When scale_perturb_diag is specified, "
"scale_perturb_factor must be specified.")
# Special case, only handling a scaled identity matrix. We don't know its
# dimensions, so this is special cased.
# We don't check identity_multiplier, since below we set it to 1. if all
# other scale args are None.
self._is_only_identity_multiplier = (scale_tril is None and
scale_diag is None and
scale_perturb_factor is None)
with self._name_scope("init", values=[
shift, scale_identity_multiplier, scale_diag, scale_tril,
scale_perturb_diag, scale_perturb_factor]):
# In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`.
dtype = dtypes.float32
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
dtype = shift.dtype.base_dtype
self._shift = shift
# When no args are specified, pretend the scale matrix is the identity
# matrix.
if (self._is_only_identity_multiplier and
scale_identity_multiplier is None):
scale_identity_multiplier = ops.convert_to_tensor(1., dtype=dtype)
# self._create_scale_operator returns a LinearOperator in all cases
# except if self._is_only_identity_multiplier; in which case it
# returns a scalar Tensor.
scale = self._create_scale_operator(
identity_multiplier=scale_identity_multiplier,
diag=scale_diag,
tril=scale_tril,
perturb_diag=scale_perturb_diag,
perturb_factor=scale_perturb_factor,
shift=shift,
validate_args=validate_args)
if scale.dtype is not None:
dtype = scale.dtype.base_dtype
if scale is not None and not self._is_only_identity_multiplier:
if (shift is not None and
shift.dtype.base_dtype != scale.dtype.base_dtype):
raise TypeError(
"shift.dtype({}) is incompatible with scale.dtype({}).".format(
shift.dtype, scale.dtype))
if scale.tensor_rank is not None:
batch_ndims = scale.tensor_rank - 2
else:
batch_ndims = scale.tensor_rank_tensor() - 2
else:
# We won't need shape inference when scale is None or when scale is a
# scalar.
batch_ndims = 0
self._scale = scale
self._shaper = _DistributionShape(
batch_ndims=batch_ndims,
event_ndims=1,
validate_args=validate_args)
super(Affine, self).__init__(
forward_min_event_ndims=1,
graph_parents=(
[self._scale] if tensor_util.is_tensor(self._scale)
else self._scale.graph_parents +
[self._shift] if self._shift is not None else []),
is_constant_jacobian=True,
dtype=dtype,
validate_args=validate_args,
name=name)
def _create_scale_operator(self, identity_multiplier, diag, tril,
perturb_diag, perturb_factor, shift,
validate_args):
"""Construct `scale` from various components.
Args:
identity_multiplier: floating point rank 0 `Tensor` representing a scaling
done to the identity matrix.
diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_tril` has shape [N1, N2, ... k], which represents a k x k lower
triangular matrix.
perturb_diag: Floating-point `Tensor` representing the diagonal matrix of
the low rank update.
perturb_factor: Floating-point `Tensor` representing factor matrix.
shift: Floating-point `Tensor` representing `shift in `scale @ X + shift`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
Returns:
scale. In the case of scaling by a constant, scale is a
floating point `Tensor`. Otherwise, scale is a `LinearOperator`.
Raises:
ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`.
"""
identity_multiplier = _as_tensor(identity_multiplier, "identity_multiplier")
diag = _as_tensor(diag, "diag")
tril = _as_tensor(tril, "tril")
perturb_diag = _as_tensor(perturb_diag, "perturb_diag")
perturb_factor = _as_tensor(perturb_factor, "perturb_factor")
# If possible, use the low rank update to infer the shape of
# the identity matrix, when scale represents a scaled identity matrix
# with a low rank update.
shape_hint = None
if perturb_factor is not None:
shape_hint = distribution_util.dimension_size(perturb_factor, axis=-2)
if self._is_only_identity_multiplier:
if validate_args:
return control_flow_ops.with_dependencies(
[check_ops.assert_none_equal(
identity_multiplier,
array_ops.zeros([], identity_multiplier.dtype),
["identity_multiplier should be non-zero."])],
identity_multiplier)
return identity_multiplier
scale = distribution_util.make_tril_scale(
loc=shift,
scale_tril=tril,
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=validate_args,
assert_positive=False,
shape_hint=shape_hint)
if perturb_factor is not None:
return linalg.LinearOperatorLowRankUpdate(
scale,
u=perturb_factor,
diag_update=perturb_diag,
is_diag_update_positive=perturb_diag is None,
is_non_singular=True, # Implied by is_positive_definite=True.
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
return scale
@property
def shift(self):
"""The `shift` `Tensor` in `Y = scale @ X + shift`."""
return self._shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + shift`."""
return self._scale
def _forward(self, x):
y = x
if self._is_only_identity_multiplier:
y *= self._scale
if self.shift is not None:
return y + self.shift
return y
y, sample_shape = self._shaper.make_batch_of_event_sample_matrices(
y, expand_batch_dim=False)
with ops.control_dependencies(self._maybe_check_scale() if
self.validate_args else []):
y = self.scale.matmul(y)
y = self._shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape, expand_batch_dim=False)
if self.shift is not None:
y += self.shift
return y
def _inverse(self, y):
x = y
if self.shift is not None:
x -= self.shift
if self._is_only_identity_multiplier:
return x / self._scale
x, sample_shape = self._shaper.make_batch_of_event_sample_matrices(
x, expand_batch_dim=False)
# Solve fails if the op is singular so we may safely skip this assertion.
x = self.scale.solve(x)
x = self._shaper.undo_make_batch_of_event_sample_matrices(
x, sample_shape, expand_batch_dim=False)
return x
def _forward_log_det_jacobian(self, x):
# is_constant_jacobian = True for this bijector, hence the
# `log_det_jacobian` need only be specified for a single input, as this will
# be tiled to match `event_ndims`.
if self._is_only_identity_multiplier:
# We don't pad in this case and instead let the fldj be applied
# via broadcast.
event_size = array_ops.shape(x)[-1]
event_size = math_ops.cast(event_size, dtype=self._scale.dtype)
return math_ops.log(math_ops.abs(self._scale)) * event_size
return self.scale.log_abs_determinant()
def _maybe_check_scale(self):
try:
return [self.scale.assert_non_singular()]
except NotImplementedError:
pass
return []
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/affine.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bijector Ops.
Use [tfp.bijectors](/probability/api_docs/python/tfp/bijectors) instead.
@@AbsoluteValue
@@Affine
@@AffineLinearOperator
@@AffineScalar
@@Bijector
@@BatchNormalization
@@Chain
@@CholeskyOuterProduct
@@ConditionalBijector
@@Exp
@@FillTriangular
@@Gumbel
@@Identity
@@Inline
@@Invert
@@Kumaraswamy
@@MaskedAutoregressiveFlow
@@MatrixInverseTriL
@@Ordered
@@Permute
@@PowerTransform
@@RealNVP
@@Reshape
@@ScaleTriL
@@Sigmoid
@@SinhArcsinh
@@SoftmaxCentered
@@Softplus
@@Softsign
@@Square
@@TransformDiagonal
@@Weibull
@@masked_autoregressive_default_template
@@masked_dense
@@real_nvp_default_template
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long,g-importing-member
from tensorflow.contrib.distributions.python.ops.bijectors.absolute_value import *
from tensorflow.contrib.distributions.python.ops.bijectors.affine import *
from tensorflow.contrib.distributions.python.ops.bijectors.affine_linear_operator import *
from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import *
from tensorflow.contrib.distributions.python.ops.bijectors.batch_normalization import *
from tensorflow.contrib.distributions.python.ops.bijectors.chain import *
from tensorflow.contrib.distributions.python.ops.bijectors.cholesky_outer_product import *
from tensorflow.contrib.distributions.python.ops.bijectors.conditional_bijector import *
from tensorflow.contrib.distributions.python.ops.bijectors.exp import *
from tensorflow.contrib.distributions.python.ops.bijectors.fill_triangular import *
from tensorflow.contrib.distributions.python.ops.bijectors.gumbel import *
from tensorflow.contrib.distributions.python.ops.bijectors.inline import *
from tensorflow.contrib.distributions.python.ops.bijectors.invert import *
from tensorflow.contrib.distributions.python.ops.bijectors.kumaraswamy import *
from tensorflow.contrib.distributions.python.ops.bijectors.masked_autoregressive import *
from tensorflow.contrib.distributions.python.ops.bijectors.matrix_inverse_tril import *
from tensorflow.contrib.distributions.python.ops.bijectors.ordered import *
from tensorflow.contrib.distributions.python.ops.bijectors.permute import *
from tensorflow.contrib.distributions.python.ops.bijectors.power_transform import *
from tensorflow.contrib.distributions.python.ops.bijectors.real_nvp import *
from tensorflow.contrib.distributions.python.ops.bijectors.reshape import *
from tensorflow.contrib.distributions.python.ops.bijectors.scale_tril import *
from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid import *
from tensorflow.contrib.distributions.python.ops.bijectors.sinh_arcsinh import *
from tensorflow.contrib.distributions.python.ops.bijectors.softmax_centered import *
from tensorflow.contrib.distributions.python.ops.bijectors.softplus import *
from tensorflow.contrib.distributions.python.ops.bijectors.softsign import *
from tensorflow.contrib.distributions.python.ops.bijectors.square import *
from tensorflow.contrib.distributions.python.ops.bijectors.transform_diagonal import *
from tensorflow.python.ops.distributions.bijector import *
from tensorflow.python.ops.distributions.identity_bijector import Identity
# pylint: enable=unused-import,wildcard-import,line-too-long,g-importing-member
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ScaleTriL bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops.bijectors import affine_scalar
from tensorflow.contrib.distributions.python.ops.bijectors import chain
from tensorflow.contrib.distributions.python.ops.bijectors import fill_triangular
from tensorflow.contrib.distributions.python.ops.bijectors import softplus
from tensorflow.contrib.distributions.python.ops.bijectors import transform_diagonal
from tensorflow.python.util import deprecation
__all__ = [
"ScaleTriL",
]
class ScaleTriL(chain.Chain):
"""Transforms unconstrained vectors to TriL matrices with positive diagonal.
This is implemented as a simple `tfb.Chain` of `tfb.FillTriangular`
followed by `tfb.TransformDiagonal`, and provided mostly as a
convenience. The default setup is somewhat opinionated, using a
Softplus transformation followed by a small shift (`1e-5`) which
attempts to avoid numerical issues from zeros on the diagonal.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
b = tfb.ScaleTriL(
diag_bijector=tfb.Exp(),
diag_shift=None)
b.forward(x=[0., 0., 0.])
# Result: [[1., 0.],
# [0., 1.]]
b.inverse(y=[[1., 0],
[.5, 2]])
# Result: [log(2), .5, log(1)]
# Define a distribution over PSD matrices of shape `[3, 3]`,
# with `1 + 2 + 3 = 6` degrees of freedom.
dist = tfd.TransformedDistribution(
tfd.Normal(tf.zeros(6), tf.ones(6)),
tfb.Chain([tfb.CholeskyOuterProduct(), tfb.ScaleTriL()]))
# Using an identity transformation, ScaleTriL is equivalent to
# tfb.FillTriangular.
b = tfb.ScaleTriL(
diag_bijector=tfb.Identity(),
diag_shift=None)
# For greater control over initialization, one can manually encode
# pre- and post- shifts inside of `diag_bijector`.
b = tfb.ScaleTriL(
diag_bijector=tfb.Chain([
tfb.AffineScalar(shift=1e-3),
tfb.Softplus(),
tfb.AffineScalar(shift=0.5413)]), # softplus_inverse(1.)
# = log(expm1(1.)) = 0.5413
diag_shift=None)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
diag_bijector=None,
diag_shift=1e-5,
validate_args=False,
name="scale_tril"):
"""Instantiates the `ScaleTriL` bijector.
Args:
diag_bijector: `Bijector` instance, used to transform the output diagonal
to be positive.
Default value: `None` (i.e., `tfb.Softplus()`).
diag_shift: Float value broadcastable and added to all diagonal entries
after applying the `diag_bijector`. Setting a positive
value forces the output diagonal entries to be positive, but
prevents inverting the transformation for matrices with
diagonal entries less than this value.
Default value: `1e-5` (i.e., no shift is applied).
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
Default value: `False` (i.e., arguments are not validated).
name: Python `str` name given to ops managed by this object.
Default value: `scale_tril`.
"""
if diag_bijector is None:
diag_bijector = softplus.Softplus(validate_args=validate_args)
if diag_shift is not None:
diag_bijector = chain.Chain([affine_scalar.AffineScalar(shift=diag_shift),
diag_bijector])
super(ScaleTriL, self).__init__(
[transform_diagonal.TransformDiagonal(diag_bijector=diag_bijector),
fill_triangular.FillTriangular()],
validate_args=validate_args,
name=name)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/scale_tril.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gumbel bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Gumbel",
]
class Gumbel(bijector.Bijector):
"""Compute `Y = g(X) = exp(-exp(-(X - loc) / scale))`.
This bijector maps inputs from `[-inf, inf]` to [0, 1]`. The inverse of the
bijector applied to a uniform random variable `X ~ U(0, 1) gives back a
random variable with the
[Gumbel distribution](https://en.wikipedia.org/wiki/Gumbel_distribution):
```none
Y ~ Gumbel(loc, scale)
pdf(y; loc, scale) = exp(
-( (y - loc) / scale + exp(- (y - loc) / scale) ) ) / scale
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=0.,
scale=1.,
validate_args=False,
name="gumbel"):
"""Instantiates the `Gumbel` bijector.
Args:
loc: Float-like `Tensor` that is the same dtype and is
broadcastable with `scale`.
This is `loc` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`.
scale: Positive Float-like `Tensor` that is the same dtype and is
broadcastable with `loc`.
This is `scale` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
with self._name_scope("init", values=[loc, scale]):
self._loc = ops.convert_to_tensor(loc, name="loc")
self._scale = ops.convert_to_tensor(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
if validate_args:
self._scale = control_flow_ops.with_dependencies([
check_ops.assert_positive(
self._scale, message="Argument scale was not positive")
], self._scale)
super(Gumbel, self).__init__(
validate_args=validate_args,
forward_min_event_ndims=0,
name=name)
@property
def loc(self):
"""The `loc` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`."""
return self._loc
@property
def scale(self):
"""This is `scale` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`."""
return self._scale
def _forward(self, x):
z = (x - self.loc) / self.scale
return math_ops.exp(-math_ops.exp(-z))
def _inverse(self, y):
y = self._maybe_assert_valid_y(y)
return self.loc - self.scale * math_ops.log(-math_ops.log(y))
def _inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid_y(y)
return math_ops.log(self.scale / (-math_ops.log(y) * y))
def _forward_log_det_jacobian(self, x):
z = (x - self.loc) / self.scale
return -z - math_ops.exp(-z) - math_ops.log(self.scale)
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return y
is_positive = check_ops.assert_non_negative(
y, message="Inverse transformation input must be greater than 0.")
less_than_one = check_ops.assert_less_equal(
y,
constant_op.constant(1., y.dtype),
message="Inverse transformation input must be less than or equal to 1.")
return control_flow_ops.with_dependencies([is_positive, less_than_one], y)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/gumbel.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SoftmaxCentered bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"SoftmaxCentered",
]
class SoftmaxCentered(bijector.Bijector):
"""Bijector which computes `Y = g(X) = exp([X 0]) / sum(exp([X 0]))`.
To implement [softmax](https://en.wikipedia.org/wiki/Softmax_function) as a
bijection, the forward transformation appends a value to the input and the
inverse removes this coordinate. The appended coordinate represents a pivot,
e.g., `softmax(x) = exp(x-c) / sum(exp(x-c))` where `c` is the implicit last
coordinate.
Example Use:
```python
bijector.SoftmaxCentered().forward(tf.math.log([2, 3, 4]))
# Result: [0.2, 0.3, 0.4, 0.1]
# Extra result: 0.1
bijector.SoftmaxCentered().inverse([0.2, 0.3, 0.4, 0.1])
# Result: tf.math.log([2, 3, 4])
# Extra coordinate removed.
```
At first blush it may seem like the [Invariance of domain](
https://en.wikipedia.org/wiki/Invariance_of_domain) theorem implies this
implementation is not a bijection. However, the appended dimension
makes the (forward) image non-open and the theorem does not directly apply.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
validate_args=False,
name="softmax_centered"):
self._graph_parents = []
self._name = name
super(SoftmaxCentered, self).__init__(
forward_min_event_ndims=1,
validate_args=validate_args,
name=name)
def _forward_event_shape(self, input_shape):
if input_shape.ndims is None or input_shape[-1] is None:
return input_shape
return tensor_shape.TensorShape([input_shape[-1] + 1])
def _forward_event_shape_tensor(self, input_shape):
return (input_shape[-1] + 1)[..., array_ops.newaxis]
def _inverse_event_shape(self, output_shape):
if output_shape.ndims is None or output_shape[-1] is None:
return output_shape
if output_shape[-1] <= 1:
raise ValueError("output_shape[-1] = %d <= 1" % output_shape[-1])
return tensor_shape.TensorShape([output_shape[-1] - 1])
def _inverse_event_shape_tensor(self, output_shape):
if self.validate_args:
# It is not possible for a negative shape so we need only check <= 1.
is_greater_one = check_ops.assert_greater(
output_shape[-1], 1, message="Need last dimension greater than 1.")
output_shape = control_flow_ops.with_dependencies(
[is_greater_one], output_shape)
return (output_shape[-1] - 1)[..., array_ops.newaxis]
def _forward(self, x):
# Pad the last dim with a zeros vector. We need this because it lets us
# infer the scale in the inverse function.
y = distribution_util.pad(x, axis=-1, back=True)
# Set shape hints.
if x.shape.ndims is not None:
shape = x.shape[:-1].concatenate(x.shape.dims[-1] + 1)
y.shape.assert_is_compatible_with(shape)
y.set_shape(shape)
return nn_ops.softmax(y)
def _inverse(self, y):
# To derive the inverse mapping note that:
# y[i] = exp(x[i]) / normalization
# and
# y[end] = 1 / normalization.
# Thus:
# x[i] = log(exp(x[i])) - log(y[end]) - log(normalization)
# = log(exp(x[i])/normalization) - log(y[end])
# = log(y[i]) - log(y[end])
# Do this first to make sure CSE catches that it'll happen again in
# _inverse_log_det_jacobian.
x = math_ops.log(y)
log_normalization = (-x[..., -1])[..., array_ops.newaxis]
x = x[..., :-1] + log_normalization
# Set shape hints.
if y.shape.ndims is not None:
shape = y.shape[:-1].concatenate(y.shape.dims[-1] - 1)
x.shape.assert_is_compatible_with(shape)
x.set_shape(shape)
return x
def _inverse_log_det_jacobian(self, y):
# WLOG, consider the vector case:
# x = log(y[:-1]) - log(y[-1])
# where,
# y[-1] = 1 - sum(y[:-1]).
# We have:
# det{ dX/dY } = det{ diag(1 ./ y[:-1]) + 1 / y[-1] }
# = det{ inv{ diag(y[:-1]) - y[:-1]' y[:-1] } } (1)
# = 1 / det{ diag(y[:-1]) - y[:-1]' y[:-1] }
# = 1 / { (1 + y[:-1]' inv(diag(y[:-1])) y[:-1]) *
# det(diag(y[:-1])) } (2)
# = 1 / { y[-1] prod(y[:-1]) }
# = 1 / prod(y)
# (1) - https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula
# or by noting that det{ dX/dY } = 1 / det{ dY/dX } from Bijector
# docstring "Tip".
# (2) - https://en.wikipedia.org/wiki/Matrix_determinant_lemma
return -math_ops.reduce_sum(math_ops.log(y), axis=-1)
def _forward_log_det_jacobian(self, x):
# This code is similar to nn_ops.log_softmax but different because we have
# an implicit zero column to handle. I.e., instead of:
# reduce_sum(logits - reduce_sum(exp(logits), dim))
# we must do:
# log_normalization = 1 + reduce_sum(exp(logits))
# -log_normalization + reduce_sum(logits - log_normalization)
log_normalization = nn_ops.softplus(
math_ops.reduce_logsumexp(x, axis=-1, keepdims=True))
return array_ops.squeeze(
(-log_normalization + math_ops.reduce_sum(
x - log_normalization, axis=-1, keepdims=True)), axis=-1)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/softmax_centered.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Real NVP bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import core as layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import template as template_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = ["RealNVP", "real_nvp_default_template"]
class RealNVP(bijector.Bijector):
"""RealNVP "affine coupling layer" for vector-valued events.
Real NVP models a normalizing flow on a `D`-dimensional distribution via a
single `D-d`-dimensional conditional distribution [(Dinh et al., 2017)][1]:
`y[d:D] = y[d:D] * math_ops.exp(log_scale_fn(y[d:D])) + shift_fn(y[d:D])`
`y[0:d] = x[0:d]`
The last `D-d` units are scaled and shifted based on the first `d` units only,
while the first `d` units are 'masked' and left unchanged. Real NVP's
`shift_and_log_scale_fn` computes vector-valued quantities. For
scale-and-shift transforms that do not depend on any masked units, i.e.
`d=0`, use the `tfb.Affine` bijector with learned parameters instead.
Masking is currently only supported for base distributions with
`event_ndims=1`. For more sophisticated masking schemes like checkerboard or
channel-wise masking [(Papamakarios et al., 2016)[4], use the `tfb.Permute`
bijector to re-order desired masked units into the first `d` units. For base
distributions with `event_ndims > 1`, use the `tfb.Reshape` bijector to
flatten the event shape.
Recall that the MAF bijector [(Papamakarios et al., 2016)][4] implements a
normalizing flow via an autoregressive transformation. MAF and IAF have
opposite computational tradeoffs - MAF can train all units in parallel but
must sample units sequentially, while IAF must train units sequentially but
can sample in parallel. In contrast, Real NVP can compute both forward and
inverse computations in parallel. However, the lack of an autoregressive
transformations makes it less expressive on a per-bijector basis.
A "valid" `shift_and_log_scale_fn` must compute each `shift` (aka `loc` or
"mu" in [Papamakarios et al. (2016)][4]) and `log(scale)` (aka "alpha" in
[Papamakarios et al. (2016)][4]) such that each are broadcastable with the
arguments to `forward` and `inverse`, i.e., such that the calculations in
`forward`, `inverse` [below] are possible. For convenience,
`real_nvp_default_nvp` is offered as a possible `shift_and_log_scale_fn`
function.
NICE [(Dinh et al., 2014)][2] is a special case of the Real NVP bijector
which discards the scale transformation, resulting in a constant-time
inverse-log-determinant-Jacobian. To use a NICE bijector instead of Real
NVP, `shift_and_log_scale_fn` should return `(shift, None)`, and
`is_constant_jacobian` should be set to `True` in the `RealNVP` constructor.
Calling `real_nvp_default_template` with `shift_only=True` returns one such
NICE-compatible `shift_and_log_scale_fn`.
Caching: the scalar input depth `D` of the base distribution is not known at
construction time. The first call to any of `forward(x)`, `inverse(x)`,
`inverse_log_det_jacobian(x)`, or `forward_log_det_jacobian(x)` memoizes
`D`, which is re-used in subsequent calls. This shape must be known prior to
graph execution (which is the case if using tf.layers).
#### Example Use
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
# A common choice for a normalizing flow is to use a Gaussian for the base
# distribution. (However, any continuous distribution would work.) E.g.,
num_dims = 3
num_samples = 1
nvp = tfd.TransformedDistribution(
distribution=tfd.MultivariateNormalDiag(loc=np.zeros(num_dims)),
bijector=tfb.RealNVP(
num_masked=2,
shift_and_log_scale_fn=tfb.real_nvp_default_template(
hidden_layers=[512, 512])))
x = nvp.sample(num_samples)
nvp.log_prob(x)
nvp.log_prob(np.zeros([num_samples, num_dims]))
```
For more examples, see [Jang (2018)][3].
#### References
[1]: Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density Estimation
using Real NVP. In _International Conference on Learning
Representations_, 2017. https://arxiv.org/abs/1605.08803
[2]: Laurent Dinh, David Krueger, and Yoshua Bengio. NICE: Non-linear
Independent Components Estimation. _arXiv preprint arXiv:1410.8516_,
2014. https://arxiv.org/abs/1410.8516
[3]: Eric Jang. Normalizing Flows Tutorial, Part 2: Modern Normalizing Flows.
_Technical Report_, 2018. http://blog.evjang.com/2018/01/nf2.html
[4]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
num_masked,
shift_and_log_scale_fn,
is_constant_jacobian=False,
validate_args=False,
name=None):
"""Creates the Real NVP or NICE bijector.
Args:
num_masked: Python `int` indicating that the first `d` units of the event
should be masked. Must be in the closed interval `[1, D-1]`, where `D`
is the event size of the base distribution.
shift_and_log_scale_fn: Python `callable` which computes `shift` and
`log_scale` from both the forward domain (`x`) and the inverse domain
(`y`). Calculation must respect the "autoregressive property" (see class
docstring). Suggested default
`masked_autoregressive_default_template(hidden_layers=...)`. Typically
the function contains `tf.Variables` and is wrapped using
`tf.compat.v1.make_template`. Returning `None` for either (both)
`shift`, `log_scale` is equivalent to (but more efficient than)
returning zero.
is_constant_jacobian: Python `bool`. Default: `False`. When `True` the
implementation assumes `log_scale` does not depend on the forward domain
(`x`) or inverse domain (`y`) values. (No validation is made;
`is_constant_jacobian=False` is always safe but possibly computationally
inefficient.)
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object.
Raises:
ValueError: If num_masked < 1.
"""
name = name or "real_nvp"
if num_masked <= 0:
raise ValueError("num_masked must be a positive integer.")
self._num_masked = num_masked
# At construction time, we don't know input_depth.
self._input_depth = None
self._shift_and_log_scale_fn = shift_and_log_scale_fn
super(RealNVP, self).__init__(
forward_min_event_ndims=1,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
def _cache_input_depth(self, x):
if self._input_depth is None:
self._input_depth = tensor_shape.dimension_value(
x.shape.with_rank_at_least(1)[-1])
if self._input_depth is None:
raise NotImplementedError(
"Rightmost dimension must be known prior to graph execution.")
if self._num_masked >= self._input_depth:
raise ValueError(
"Number of masked units must be smaller than the event size.")
def _forward(self, x):
self._cache_input_depth(x)
# Performs scale and shift.
x0, x1 = x[:, :self._num_masked], x[:, self._num_masked:]
shift, log_scale = self._shift_and_log_scale_fn(
x0, self._input_depth - self._num_masked)
y1 = x1
if log_scale is not None:
y1 *= math_ops.exp(log_scale)
if shift is not None:
y1 += shift
y = array_ops.concat([x0, y1], axis=-1)
return y
def _inverse(self, y):
self._cache_input_depth(y)
# Performs un-shift and un-scale.
y0, y1 = y[:, :self._num_masked], y[:, self._num_masked:]
shift, log_scale = self._shift_and_log_scale_fn(
y0, self._input_depth - self._num_masked)
x1 = y1
if shift is not None:
x1 -= shift
if log_scale is not None:
x1 *= math_ops.exp(-log_scale)
x = array_ops.concat([y0, x1], axis=-1)
return x
def _inverse_log_det_jacobian(self, y):
self._cache_input_depth(y)
y0 = y[:, :self._num_masked]
_, log_scale = self._shift_and_log_scale_fn(
y0, self._input_depth - self._num_masked)
if log_scale is None:
return constant_op.constant(0., dtype=y.dtype, name="ildj")
return -math_ops.reduce_sum(log_scale, axis=-1)
def _forward_log_det_jacobian(self, x):
self._cache_input_depth(x)
x0 = x[:, :self._num_masked]
_, log_scale = self._shift_and_log_scale_fn(
x0, self._input_depth - self._num_masked)
if log_scale is None:
return constant_op.constant(0., dtype=x.dtype, name="fldj")
return math_ops.reduce_sum(log_scale, axis=-1)
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def real_nvp_default_template(
hidden_layers,
shift_only=False,
activation=nn_ops.relu,
name=None,
*args,
**kwargs):
"""Build a scale-and-shift function using a multi-layer neural network.
This will be wrapped in a make_template to ensure the variables are only
created once. It takes the `d`-dimensional input x[0:d] and returns the `D-d`
dimensional outputs `loc` ("mu") and `log_scale` ("alpha").
Arguments:
hidden_layers: Python `list`-like of non-negative integer, scalars
indicating the number of units in each hidden layer. Default: `[512, 512].
shift_only: Python `bool` indicating if only the `shift` term shall be
computed (i.e. NICE bijector). Default: `False`.
activation: Activation function (callable). Explicitly setting to `None`
implies a linear activation.
name: A name for ops managed by this function. Default:
"real_nvp_default_template".
*args: `tf.compat.v1.layers.dense` arguments.
**kwargs: `tf.compat.v1.layers.dense` keyword arguments.
Returns:
shift: `Float`-like `Tensor` of shift terms ("mu" in
[Papamakarios et al. (2016)][1]).
log_scale: `Float`-like `Tensor` of log(scale) terms ("alpha" in
[Papamakarios et al. (2016)][1]).
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution.
#### References
[1]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
with ops.name_scope(name, "real_nvp_default_template"):
def _fn(x, output_units):
"""Fully connected MLP parameterized via `real_nvp_template`."""
for units in hidden_layers:
x = layers.dense(
inputs=x, units=units, activation=activation, *args, **kwargs)
x = layers.dense(
inputs=x,
units=(1 if shift_only else 2) * output_units,
activation=None,
*args,
**kwargs)
if shift_only:
return x, None
shift, log_scale = array_ops.split(x, 2, axis=-1)
return shift, log_scale
return template_ops.make_template("real_nvp_default_template", _fn)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/real_nvp.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AffineLinearOperator bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.util import deprecation
__all__ = [
"AffineLinearOperator",
]
class AffineLinearOperator(bijector.Bijector):
"""Compute `Y = g(X; shift, scale) = scale @ X + shift`.
`shift` is a numeric `Tensor` and `scale` is a `LinearOperator`.
If `X` is a scalar then the forward transformation is: `scale * X + shift`
where `*` denotes the scalar product.
Note: we don't always simply transpose `X` (but write it this way for
brevity). Actually the input `X` undergoes the following transformation
before being premultiplied by `scale`:
1. If there are no sample dims, we call `X = tf.expand_dims(X, 0)`, i.e.,
`new_sample_shape = [1]`. Otherwise do nothing.
2. The sample shape is flattened to have one dimension, i.e.,
`new_sample_shape = [n]` where `n = tf.reduce_prod(old_sample_shape)`.
3. The sample dim is cyclically rotated left by 1, i.e.,
`new_shape = [B1,...,Bb, k, n]` where `n` is as above, `k` is the
event_shape, and `B1,...,Bb` are the batch shapes for each of `b` batch
dimensions.
(For more details see `shape.make_batch_of_event_sample_matrices`.)
The result of the above transformation is that `X` can be regarded as a batch
of matrices where each column is a draw from the distribution. After
premultiplying by `scale`, we take the inverse of this procedure. The input
`Y` also undergoes the same transformation before/after premultiplying by
`inv(scale)`.
Example Use:
```python
linalg = tf.linalg
x = [1., 2, 3]
shift = [-1., 0., 1]
diag = [1., 2, 3]
scale = linalg.LinearOperatorDiag(diag)
affine = AffineLinearOperator(shift, scale)
# In this case, `forward` is equivalent to:
# y = scale @ x + shift
y = affine.forward(x) # [0., 4, 10]
shift = [2., 3, 1]
tril = [[1., 0, 0],
[2, 1, 0],
[3, 2, 1]]
scale = linalg.LinearOperatorLowerTriangular(tril)
affine = AffineLinearOperator(shift, scale)
# In this case, `forward` is equivalent to:
# np.squeeze(np.matmul(tril, np.expand_dims(x, -1)), -1) + shift
y = affine.forward(x) # [3., 7, 11]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
shift=None,
scale=None,
validate_args=False,
name="affine_linear_operator"):
"""Instantiates the `AffineLinearOperator` bijector.
Args:
shift: Floating-point `Tensor`.
scale: Subclass of `LinearOperator`. Represents the (batch) positive
definite matrix `M` in `R^{k x k}`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
TypeError: if `scale` is not a `LinearOperator`.
TypeError: if `shift.dtype` does not match `scale.dtype`.
ValueError: if not `scale.is_non_singular`.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
graph_parents = []
with self._name_scope("init", values=[shift]):
# In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`.
dtype = dtypes.float32
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
graph_parents += [shift]
dtype = shift.dtype.base_dtype
self._shift = shift
if scale is not None:
if (shift is not None and
shift.dtype.base_dtype != scale.dtype.base_dtype):
raise TypeError(
"shift.dtype({}) is incompatible with scale.dtype({}).".format(
shift.dtype, scale.dtype))
if not isinstance(scale, linear_operator.LinearOperator):
raise TypeError("scale is not an instance of tf.LinearOperator")
if validate_args and not scale.is_non_singular:
raise ValueError("Scale matrix must be non-singular.")
graph_parents += scale.graph_parents
if scale.tensor_rank is not None:
batch_ndims = scale.tensor_rank - 2
else:
batch_ndims = scale.tensor_rank_tensor() - 2
graph_parents += [batch_ndims]
if scale.dtype is not None:
dtype = scale.dtype.base_dtype
else:
batch_ndims = 0 # We won't need shape inference when scale is None.
self._scale = scale
self._shaper = _DistributionShape(
batch_ndims=batch_ndims,
event_ndims=1,
validate_args=validate_args)
super(AffineLinearOperator, self).__init__(
forward_min_event_ndims=1,
graph_parents=graph_parents,
is_constant_jacobian=True,
dtype=dtype,
validate_args=validate_args,
name=name)
@property
def shift(self):
"""The `shift` `Tensor` in `Y = scale @ X + shift`."""
return self._shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + shift`."""
return self._scale
def _forward(self, x):
y = x
if self.scale is not None:
y, sample_shape = self._shaper.make_batch_of_event_sample_matrices(
y, expand_batch_dim=False)
with ops.control_dependencies(self._maybe_collect_assertions() if
self.validate_args else []):
y = self.scale.matmul(y)
y = self._shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape, expand_batch_dim=False)
if self.shift is not None:
y += self.shift
return y
def _inverse(self, y):
x = y
if self.shift is not None:
x -= self.shift
if self.scale is not None:
x, sample_shape = self._shaper.make_batch_of_event_sample_matrices(
x, expand_batch_dim=False)
# Solve fails if the op is singular so we may safely skip this assertion.
x = self.scale.solve(x)
x = self._shaper.undo_make_batch_of_event_sample_matrices(
x, sample_shape, expand_batch_dim=False)
return x
def _forward_log_det_jacobian(self, x):
# is_constant_jacobian = True for this bijector, hence the
# `log_det_jacobian` need only be specified for a single input, as this will
# be tiled to match `event_ndims`.
if self.scale is None:
return constant_op.constant(0., dtype=x.dtype.base_dtype)
with ops.control_dependencies(self._maybe_collect_assertions() if
self.validate_args else []):
return self.scale.log_abs_determinant()
def _maybe_collect_assertions(self):
try:
return [self.scale.assert_non_singular()]
except NotImplementedError:
pass
return []
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/affine_linear_operator.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TransformDiagonal bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"TransformDiagonal",
]
class TransformDiagonal(bijector.Bijector):
"""Applies a Bijector to the diagonal of a matrix.
#### Example
```python
b = tfb.TransformDiagonal(diag_bijector=tfb.Exp())
b.forward([[1., 0.],
[0., 1.]])
# ==> [[2.718, 0.],
[0., 2.718]]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
diag_bijector,
validate_args=False,
name="transform_diagonal"):
"""Instantiates the `TransformDiagonal` bijector.
Args:
diag_bijector: `Bijector` instance used to transform the diagonal.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._diag_bijector = diag_bijector
super(TransformDiagonal, self).__init__(
forward_min_event_ndims=2,
inverse_min_event_ndims=2,
validate_args=validate_args,
name=name)
def _forward(self, x):
diag = self._diag_bijector.forward(array_ops.matrix_diag_part(x))
return array_ops.matrix_set_diag(x, diag)
def _inverse(self, y):
diag = self._diag_bijector.inverse(array_ops.matrix_diag_part(y))
return array_ops.matrix_set_diag(y, diag)
def _forward_log_det_jacobian(self, x):
# We formulate the Jacobian with respect to the flattened matrices
# `vec(x)` and `vec(y)`. Suppose for notational convenience that
# the first `n` entries of `vec(x)` are the diagonal of `x`, and
# the remaining `n**2-n` entries are the off-diagonals in
# arbitrary order. Then the Jacobian is a block-diagonal matrix,
# with the Jacobian of the diagonal bijector in the first block,
# and the identity Jacobian for the remaining entries (since this
# bijector acts as the identity on non-diagonal entries):
#
# J_vec(x) (vec(y)) =
# -------------------------------
# | J_diag(x) (diag(y)) 0 | n entries
# | |
# | 0 I | n**2-n entries
# -------------------------------
# n n**2-n
#
# Since the log-det of the second (identity) block is zero, the
# overall log-det-jacobian is just the log-det of first block,
# from the diagonal bijector.
#
# Note that for elementwise operations (exp, softplus, etc) the
# first block of the Jacobian will itself be a diagonal matrix,
# but our implementation does not require this to be true.
return self._diag_bijector.forward_log_det_jacobian(
array_ops.matrix_diag_part(x), event_ndims=1)
def _inverse_log_det_jacobian(self, y):
return self._diag_bijector.inverse_log_det_jacobian(
array_ops.matrix_diag_part(y), event_ndims=1)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/transform_diagonal.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ordered bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Ordered",
]
class Ordered(bijector.Bijector):
"""Bijector which maps a tensor x_k that has increasing elements in the last
dimension to an unconstrained tensor y_k.
Both the domain and the codomain of the mapping is [-inf, inf], however,
the input of the forward mapping must be strictly increasing.
The inverse of the bijector applied to a normal random vector `y ~ N(0, 1)`
gives back a sorted random vector with the same distribution `x ~ N(0, 1)`
where `x = sort(y)`
On the last dimension of the tensor, Ordered bijector performs:
`y[0] = x[0]`
`y[1:] = math_ops.log(x[1:] - x[:-1])`
#### Example Use:
```python
bijector.Ordered().forward([2, 3, 4])
# Result: [2., 0., 0.]
bijector.Ordered().inverse([0.06428002, -1.07774478, -0.71530371])
# Result: [0.06428002, 0.40464228, 0.8936858]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, validate_args=False, name="ordered"):
super(Ordered, self).__init__(
forward_min_event_ndims=1,
validate_args=validate_args,
name=name)
def _forward_event_shape(self, input_shape):
if input_shape.ndims is None or input_shape[-1] is None:
return input_shape
return tensor_shape.TensorShape([input_shape[-1]])
def _forward_event_shape_tensor(self, input_shape):
return (input_shape[-1])[..., array_ops.newaxis]
def _inverse_event_shape(self, output_shape):
if output_shape.ndims is None or output_shape[-1] is None:
return output_shape
if output_shape[-1] <= 1:
raise ValueError("output_shape[-1] = %d <= 1" % output_shape[-1])
return tensor_shape.TensorShape([output_shape[-1]])
def _inverse_event_shape_tensor(self, output_shape):
if self.validate_args:
is_greater_one = check_ops.assert_greater(
output_shape[-1], 1, message="Need last dimension greater than 1.")
output_shape = control_flow_ops.with_dependencies(
[is_greater_one], output_shape)
return (output_shape[-1])[..., array_ops.newaxis]
def _forward(self, x):
x = self._maybe_assert_valid_x(x)
y0 = x[..., 0, array_ops.newaxis]
yk = math_ops.log(x[..., 1:] - x[..., :-1])
y = array_ops.concat([y0, yk], axis=-1)
return y
def _inverse(self, y):
x0 = y[..., 0, array_ops.newaxis]
xk = math_ops.exp(y[..., 1:])
x = array_ops.concat([x0, xk], axis=-1)
return math_ops.cumsum(x, axis=-1)
def _inverse_log_det_jacobian(self, y):
# The Jacobian of the inverse mapping is lower
# triangular, with the diagonal elements being:
# J[i,i] = 1 if i=1, and
# exp(y_i) if 1<i<=K
# which gives the absolute Jacobian determinant:
# |det(Jac)| = prod_{i=1}^{K} exp(y[i]).
# (1) - Stan Modeling Language User's Guide and Reference Manual
# Version 2.17.0 session 35.2
return math_ops.reduce_sum(y[..., 1:], axis=-1)
def _forward_log_det_jacobian(self, x):
x = self._maybe_assert_valid_x(x)
return -math_ops.reduce_sum(
math_ops.log(x[..., 1:] - x[..., :-1]),
axis=-1)
def _maybe_assert_valid_x(self, x):
if not self.validate_args:
return x
is_valid = check_ops.assert_positive(
x[..., 1:] - x[..., :-1],
message="Forward transformation input must be strictly increasing.")
return control_flow_ops.with_dependencies([is_valid], x)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/ordered.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Invert bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Invert",
]
class Invert(bijector.Bijector):
"""Bijector which inverts another Bijector.
Example Use: [ExpGammaDistribution (see Background & Context)](
https://reference.wolfram.com/language/ref/ExpGammaDistribution.html)
models `Y=log(X)` where `X ~ Gamma`.
```python
exp_gamma_distribution = TransformedDistribution(
distribution=Gamma(concentration=1., rate=2.),
bijector=bijector.Invert(bijector.Exp())
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, bijector, validate_args=False, name=None):
"""Creates a `Bijector` which swaps the meaning of `inverse` and `forward`.
Note: An inverted bijector's `inverse_log_det_jacobian` is often more
efficient if the base bijector implements `_forward_log_det_jacobian`. If
`_forward_log_det_jacobian` is not implemented then the following code is
used:
```python
y = self.inverse(x, **kwargs)
return -self.inverse_log_det_jacobian(y, **kwargs)
```
Args:
bijector: Bijector instance.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object.
"""
if not bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError(
"Invert is not implemented for non-injective bijectors.")
self._bijector = bijector
super(Invert, self).__init__(
graph_parents=bijector.graph_parents,
forward_min_event_ndims=bijector.inverse_min_event_ndims,
inverse_min_event_ndims=bijector.forward_min_event_ndims,
is_constant_jacobian=bijector.is_constant_jacobian,
validate_args=validate_args,
dtype=bijector.dtype,
name=name or "_".join(["invert", bijector.name]))
def _forward_event_shape(self, input_shape):
return self.bijector._inverse_event_shape(input_shape) # pylint: disable=protected-access
def _forward_event_shape_tensor(self, input_shape):
return self.bijector._inverse_event_shape_tensor(input_shape) # pylint: disable=protected-access
def _inverse_event_shape(self, output_shape):
return self.bijector._forward_event_shape(output_shape) # pylint: disable=protected-access
def _inverse_event_shape_tensor(self, output_shape):
return self.bijector._forward_event_shape_tensor(output_shape) # pylint: disable=protected-access
@property
def bijector(self):
return self._bijector
def _forward(self, x, **kwargs):
return self.bijector._inverse(x, **kwargs) # pylint: disable=protected-access
def _inverse(self, y, **kwargs):
return self.bijector._forward(y, **kwargs) # pylint: disable=protected-access
def _inverse_log_det_jacobian(self, y, **kwargs):
return self.bijector._forward_log_det_jacobian(y, **kwargs) # pylint: disable=protected-access
def _forward_log_det_jacobian(self, x, **kwargs):
return self.bijector._inverse_log_det_jacobian(x, **kwargs) # pylint: disable=protected-access
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/invert.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Square bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Square",
]
class Square(bijector.Bijector):
"""Compute `g(X) = X^2`; X is a positive real number.
g is a bijection between the non-negative real numbers (R_+) and the
non-negative real numbers.
#### Examples
```python
bijector.Square().forward(x=[[1., 0], [2, 1]])
# Result: [[1., 0], [4, 1]], i.e., x^2
bijector.Square().inverse(y=[[1., 4], [9, 1]])
# Result: [[1., 2], [3, 1]], i.e., sqrt(y).
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, validate_args=False, name="square"):
"""Instantiates the `Square` bijector.
Args:
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._name = name
super(Square, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
def _forward(self, x):
x = self._maybe_assert_valid(x)
return math_ops.square(x)
def _inverse(self, y):
y = self._maybe_assert_valid(y)
return math_ops.sqrt(y)
def _forward_log_det_jacobian(self, x):
x = self._maybe_assert_valid(x)
return np.log(2.) + math_ops.log(x)
def _maybe_assert_valid(self, t):
if not self.validate_args:
return t
is_valid = check_ops.assert_non_negative(
t, message="All elements must be non-negative.")
return control_flow_ops.with_dependencies([is_valid], t)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/square.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Softplus bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
__all__ = [
"Softplus",
]
class Softplus(bijector.Bijector):
"""Bijector which computes `Y = g(X) = Log[1 + exp(X)]`.
The softplus `Bijector` has the following two useful properties:
* The domain is the positive real numbers
* `softplus(x) approx x`, for large `x`, so it does not overflow as easily as
the `Exp` `Bijector`.
The optional nonzero `hinge_softness` parameter changes the transition at
zero. With `hinge_softness = c`, the bijector is:
```f_c(x) := c * g(x / c) = c * Log[1 + exp(x / c)].```
For large `x >> 1`, `c * Log[1 + exp(x / c)] approx c * Log[exp(x / c)] = x`,
so the behavior for large `x` is the same as the standard softplus.
As `c > 0` approaches 0 from the right, `f_c(x)` becomes less and less soft,
approaching `max(0, x)`.
* `c = 1` is the default.
* `c > 0` but small means `f(x) approx ReLu(x) = max(0, x)`.
* `c < 0` flips sign and reflects around the `y-axis`: `f_{-c}(x) = -f_c(-x)`.
* `c = 0` results in a non-bijective transformation and triggers an exception.
Example Use:
```python
# Create the Y=g(X)=softplus(X) transform which works only on Tensors with 1
# batch ndim and 2 event ndims (i.e., vector of matrices).
softplus = Softplus()
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
log(1 + exp(x)) == softplus.forward(x)
log(exp(x) - 1) == softplus.inverse(x)
```
Note: log(.) and exp(.) are applied element-wise but the Jacobian is a
reduction over the event space.
"""
@distribution_util.AppendDocstring(
kwargs_dict={
"hinge_softness": (
"Nonzero floating point `Tensor`. Controls the softness of what "
"would otherwise be a kink at the origin. Default is 1.0")})
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
hinge_softness=None,
validate_args=False,
name="softplus"):
with ops.name_scope(name, values=[hinge_softness]):
if hinge_softness is not None:
self._hinge_softness = ops.convert_to_tensor(
hinge_softness, name="hinge_softness")
else:
self._hinge_softness = None
if validate_args:
nonzero_check = check_ops.assert_none_equal(
ops.convert_to_tensor(
0, dtype=self.hinge_softness.dtype),
self.hinge_softness,
message="hinge_softness must be non-zero")
self._hinge_softness = control_flow_ops.with_dependencies(
[nonzero_check], self.hinge_softness)
super(Softplus, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
def _forward(self, x):
if self.hinge_softness is None:
return nn_ops.softplus(x)
hinge_softness = math_ops.cast(self.hinge_softness, x.dtype)
return hinge_softness * nn_ops.softplus(x / hinge_softness)
def _inverse(self, y):
if self.hinge_softness is None:
return distribution_util.softplus_inverse(y)
hinge_softness = math_ops.cast(self.hinge_softness, y.dtype)
return hinge_softness * distribution_util.softplus_inverse(
y / hinge_softness)
def _inverse_log_det_jacobian(self, y):
# Could also do:
# ildj = math_ops.reduce_sum(y - distribution_util.softplus_inverse(y),
# axis=event_dims)
# but the following is more numerically stable. Ie,
# Y = Log[1 + exp{X}] ==> X = Log[exp{Y} - 1]
# ==> dX/dY = exp{Y} / (exp{Y} - 1)
# = 1 / (1 - exp{-Y}),
# which is the most stable for large Y > 0. For small Y, we use
# 1 - exp{-Y} approx Y.
if self.hinge_softness is not None:
y /= math_ops.cast(self.hinge_softness, y.dtype)
return -math_ops.log(-math_ops.expm1(-y))
def _forward_log_det_jacobian(self, x):
if self.hinge_softness is not None:
x /= math_ops.cast(self.hinge_softness, x.dtype)
return -nn_ops.softplus(-x)
@property
def hinge_softness(self):
return self._hinge_softness
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/softplus.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sigmoid bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Sigmoid",
]
class Sigmoid(bijector.Bijector):
"""Bijector which computes `Y = g(X) = 1 / (1 + exp(-X))`."""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, validate_args=False, name="sigmoid"):
super(Sigmoid, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
def _forward(self, x):
return math_ops.sigmoid(x)
def _inverse(self, y):
return math_ops.log(y) - math_ops.log1p(-y)
def _inverse_log_det_jacobian(self, y):
return -math_ops.log(y) - math_ops.log1p(-y)
def _forward_log_det_jacobian(self, x):
return -nn_ops.softplus(-x) - nn_ops.softplus(x)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/sigmoid.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exp bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops.bijectors import power_transform
from tensorflow.python.util import deprecation
__all__ = [
"Exp",
]
class Exp(power_transform.PowerTransform):
"""Compute `Y = g(X) = exp(X)`.
Example Use:
```python
# Create the Y=g(X)=exp(X) transform which works only on Tensors with 1
# batch ndim 2.
exp = Exp()
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
exp(x) == exp.forward(x)
log(x) == exp.inverse(x)
```
Note: the exp(.) is applied element-wise but the Jacobian is a reduction
over the event space.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
validate_args=False,
name="exp"):
"""Instantiates the `Exp` bijector.
Args:
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
# forward_min_event_ndims = 0.
# No forward_min_event_ndims specified as this is done in PowerTransform.
super(Exp, self).__init__(
validate_args=validate_args,
name=name)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/exp.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Affine bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"AffineScalar",
]
class AffineScalar(bijector.Bijector):
"""Compute `Y = g(X; shift, scale) = scale * X + shift`.
Examples:
```python
# Y = X
b = AffineScalar()
# Y = X + shift
b = AffineScalar(shift=[1., 2, 3])
# Y = 2 * X + shift
b = AffineScalar(
shift=[1., 2, 3],
scale=2.)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
shift=None,
scale=None,
validate_args=False,
name="affine_scalar"):
"""Instantiates the `AffineScalar` bijector.
This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
giving the forward operation:
```none
Y = g(X) = scale * X + shift
```
if `scale` is not specified, then the bijector has the semantics of
`scale = 1.`. Similarly, if `shift` is not specified, then the bijector
has the semantics of `shift = 0.`.
Args:
shift: Floating-point `Tensor`. If this is set to `None`, no shift is
applied.
scale: Floating-point `Tensor`. If this is set to `None`, no scale is
applied.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
with self._name_scope("init", values=[scale, shift]):
self._shift = shift
self._scale = scale
if self._shift is not None:
self._shift = ops.convert_to_tensor(shift, name="shift")
if self._scale is not None:
self._scale = ops.convert_to_tensor(self._scale, name="scale")
if validate_args:
self._scale = control_flow_ops.with_dependencies(
[check_ops.assert_none_equal(
self._scale,
array_ops.zeros([], dtype=self._scale.dtype))],
self._scale)
super(AffineScalar, self).__init__(
forward_min_event_ndims=0,
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
@property
def shift(self):
"""The `shift` `Tensor` in `Y = scale @ X + shift`."""
return self._shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + shift`."""
return self._scale
def _forward(self, x):
y = array_ops.identity(x)
if self.scale is not None:
y *= self.scale
if self.shift is not None:
y += self.shift
return y
def _inverse(self, y):
x = array_ops.identity(y)
if self.shift is not None:
x -= self.shift
if self.scale is not None:
x /= self.scale
return x
def _forward_log_det_jacobian(self, x):
# is_constant_jacobian = True for this bijector, hence the
# `log_det_jacobian` need only be specified for a single input, as this will
# be tiled to match `event_ndims`.
if self.scale is None:
return constant_op.constant(0., dtype=x.dtype.base_dtype)
return math_ops.log(math_ops.abs(self.scale))
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/affine_scalar.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inline bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Inline",
]
class Inline(bijector.Bijector):
"""Bijector constructed from custom callables.
Example Use:
```python
exp = Inline(
forward_fn=tf.exp,
inverse_fn=tf.math.log,
inverse_log_det_jacobian_fn=(
lambda y: -tf.reduce_sum(tf.math.log(y), axis=-1)),
name="exp")
```
The above example is equivalent to the `Bijector` `Exp()`.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
forward_fn=None,
inverse_fn=None,
inverse_log_det_jacobian_fn=None,
forward_log_det_jacobian_fn=None,
forward_event_shape_fn=None,
forward_event_shape_tensor_fn=None,
inverse_event_shape_fn=None,
inverse_event_shape_tensor_fn=None,
is_constant_jacobian=False,
validate_args=False,
forward_min_event_ndims=None,
inverse_min_event_ndims=None,
name="inline"):
"""Creates a `Bijector` from callables.
Args:
forward_fn: Python callable implementing the forward transformation.
inverse_fn: Python callable implementing the inverse transformation.
inverse_log_det_jacobian_fn: Python callable implementing the
log o det o jacobian of the inverse transformation.
forward_log_det_jacobian_fn: Python callable implementing the
log o det o jacobian of the forward transformation.
forward_event_shape_fn: Python callable implementing non-identical
static event shape changes. Default: shape is assumed unchanged.
forward_event_shape_tensor_fn: Python callable implementing non-identical
event shape changes. Default: shape is assumed unchanged.
inverse_event_shape_fn: Python callable implementing non-identical
static event shape changes. Default: shape is assumed unchanged.
inverse_event_shape_tensor_fn: Python callable implementing non-identical
event shape changes. Default: shape is assumed unchanged.
is_constant_jacobian: Python `bool` indicating that the Jacobian is
constant for all input arguments.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
forward_min_event_ndims: Python `int` indicating the minimal
dimensionality this bijector acts on.
inverse_min_event_ndims: Python `int` indicating the minimal
dimensionality this bijector acts on.
name: Python `str`, name given to ops managed by this object.
"""
super(Inline, self).__init__(
forward_min_event_ndims=forward_min_event_ndims,
inverse_min_event_ndims=inverse_min_event_ndims,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
self._forward_fn = forward_fn
self._inverse_fn = inverse_fn
self._inverse_log_det_jacobian_fn = inverse_log_det_jacobian_fn
self._forward_log_det_jacobian_fn = forward_log_det_jacobian_fn
self._forward_event_shape_fn = forward_event_shape_fn
self._forward_event_shape_tensor_fn = forward_event_shape_tensor_fn
self._inverse_event_shape_fn = inverse_event_shape_fn
self._inverse_event_shape_tensor_fn = inverse_event_shape_tensor_fn
def _forward_event_shape(self, input_shape):
if self._forward_event_shape_fn is None:
# By default assume shape doesn't change.
return input_shape
return self._forward_event_shape_fn(input_shape)
def _forward_event_shape_tensor(self, input_shape):
if self._forward_event_shape_tensor_fn is None:
# By default assume shape doesn't change.
return input_shape
return self._forward_event_shape_tensor_fn(input_shape)
def _inverse_event_shape(self, output_shape):
if self._inverse_event_shape_fn is None:
# By default assume shape doesn't change.
return output_shape
return self._inverse_event_shape_fn(output_shape)
def _inverse_event_shape_tensor(self, output_shape):
if self._inverse_event_shape_tensor_fn is None:
# By default assume shape doesn't change.
return output_shape
return self._inverse_event_shape_tensor_fn(output_shape)
def _forward(self, x, **kwargs):
if not callable(self._forward_fn):
raise NotImplementedError(
"forward_fn is not a callable function.")
return self._forward_fn(x, **kwargs)
def _inverse(self, y, **kwargs):
if not callable(self._inverse_fn):
raise NotImplementedError(
"inverse_fn is not a callable function.")
return self._inverse_fn(y, **kwargs)
def _inverse_log_det_jacobian(self, y, **kwargs):
if not callable(self._inverse_log_det_jacobian_fn):
raise NotImplementedError(
"inverse_log_det_jacobian_fn is not a callable function.")
return self._inverse_log_det_jacobian_fn(y, **kwargs)
def _forward_log_det_jacobian(self, x, **kwargs):
if not callable(self._forward_log_det_jacobian_fn):
raise NotImplementedError(
"forward_log_det_jacobian_fn is not a callable function.")
return self._forward_log_det_jacobian_fn(x, **kwargs)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/inline.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AbsoluteValue bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"AbsoluteValue",
]
class AbsoluteValue(bijector.Bijector):
"""Computes `Y = g(X) = Abs(X)`, element-wise.
This non-injective bijector allows for transformations of scalar distributions
with the absolute value function, which maps `(-inf, inf)` to `[0, inf)`.
* For `y in (0, inf)`, `AbsoluteValue.inverse(y)` returns the set inverse
`{x in (-inf, inf) : |x| = y}` as a tuple, `-y, y`.
* `AbsoluteValue.inverse(0)` returns `0, 0`, which is not the set inverse
(the set inverse is the singleton `{0}`), but "works" in conjunction with
`TransformedDistribution` to produce a left semi-continuous pdf.
* For `y < 0`, `AbsoluteValue.inverse(y)` happily returns the
wrong thing, `-y, y`. This is done for efficiency. If
`validate_args == True`, `y < 0` will raise an exception.
```python
tfd = tf.contrib.distributions
abs = tfd.bijectors.AbsoluteValue()
abs.forward([-1., 0., 1.])
==> [1., 0., 1.]
abs.inverse(1.)
==> [-1., 1.]
# The |dX/dY| is constant, == 1. So Log|dX/dY| == 0.
abs.inverse_log_det_jacobian(1.)
==> [0., 0.]
# Special case handling of 0.
abs.inverse(0.)
==> [0., 0.]
abs.inverse_log_det_jacobian(0.)
==> [0., 0.]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, validate_args=False, name="absolute_value"):
"""Instantiates the `AbsoluteValue` bijector.
Args:
validate_args: Python `bool` indicating whether arguments should be
checked for correctness, in particular whether inputs to `inverse` and
`inverse_log_det_jacobian` are non-negative.
name: Python `str` name given to ops managed by this object.
"""
self._graph_parents = []
self._name = name
with self._name_scope("init"):
super(AbsoluteValue, self).__init__(
forward_min_event_ndims=0,
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
def _forward(self, x):
return math_ops.abs(x)
def _inverse(self, y):
if self.validate_args:
y = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(y, message="Argument y was negative")],
y)
return -y, y
def _inverse_log_det_jacobian(self, y):
# If event_ndims = 2,
# F^{-1}(y) = (-y, y), so DF^{-1}(y) = (-1, 1),
# so Log|DF^{-1}(y)| = Log[1, 1] = [0, 0].
zeros = constant_op.constant(0., dtype=y.dtype)
if self.validate_args:
zeros = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(y, message="Argument y was negative")],
zeros)
return zeros, zeros
@property
def _is_injective(self):
return False
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/absolute_value.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MatrixInverseTriL bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"MatrixInverseTriL",
]
class MatrixInverseTriL(bijector.Bijector):
"""Computes `g(L) = inv(L)`, where `L` is a lower-triangular matrix.
`L` must be nonsingular; equivalently, all diagonal entries of `L` must be
nonzero.
The input must have `rank >= 2`. The input is treated as a batch of matrices
with batch shape `input.shape[:-2]`, where each matrix has dimensions
`input.shape[-2]` by `input.shape[-1]` (hence `input.shape[-2]` must equal
`input.shape[-1]`).
#### Examples
```python
tfd.bijectors.MatrixInverseTriL().forward(x=[[1., 0], [2, 1]])
# Result: [[1., 0], [-2, 1]], i.e., inv(x)
tfd.bijectors.MatrixInverseTriL().inverse(y=[[1., 0], [-2, 1]])
# Result: [[1., 0], [2, 1]], i.e., inv(y).
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, validate_args=False, name="matrix_inverse_tril"):
"""Instantiates the `MatrixInverseTriL` bijector.
Args:
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._graph_parents = []
self._name = name
super(MatrixInverseTriL, self).__init__(
forward_min_event_ndims=2,
validate_args=validate_args,
name=name)
def _forward(self, x):
with ops.control_dependencies(self._assertions(x)):
shape = array_ops.shape(x)
return linalg_ops.matrix_triangular_solve(
x, linalg_ops.eye(shape[-1], batch_shape=shape[:-2]), lower=True)
def _inverse(self, y):
return self._forward(y)
def _forward_log_det_jacobian(self, x):
# Calculation of the Jacobian:
#
# Let X = (x_{ij}), 0 <= i,j < n, be a matrix of indeterminates. Let Z =
# X^{-1} where Z = (z_{ij}). Then
#
# dZ/dx_{ij} = (d/dt | t=0) Y(t)^{-1},
#
# where Y(t) = X + t*E_{ij} and E_{ij} is the matrix with a 1 in the (i,j)
# entry and zeros elsewhere. By the product rule,
#
# 0 = d/dt [Identity matrix]
# = d/dt [Y Y^{-1}]
# = Y d/dt[Y^{-1}] + dY/dt Y^{-1}
#
# so
#
# d/dt[Y^{-1}] = -Y^{-1} dY/dt Y^{-1}
# = -Y^{-1} E_{ij} Y^{-1}.
#
# Evaluating at t=0,
#
# dZ/dx_{ij} = -Z E_{ij} Z.
#
# Taking the (r,s) entry of each side,
#
# dz_{rs}/dx_{ij} = -z_{ri}z_{sj}.
#
# Now, let J be the Jacobian dZ/dX, arranged as the n^2-by-n^2 matrix whose
# (r*n + s, i*n + j) entry is dz_{rs}/dx_{ij}. Considering J as an n-by-n
# block matrix with n-by-n blocks, the above expression for dz_{rs}/dx_{ij}
# shows that the block at position (r,i) is -z_{ri}Z. Hence
#
# J = -KroneckerProduct(Z, Z),
# det(J) = (-1)^(n^2) (det Z)^(2n)
# = (-1)^n (det X)^(-2n).
with ops.control_dependencies(self._assertions(x)):
return (-2. * math_ops.cast(array_ops.shape(x)[-1], x.dtype.base_dtype) *
math_ops.reduce_sum(
math_ops.log(math_ops.abs(array_ops.matrix_diag_part(x))),
axis=-1))
def _assertions(self, x):
if not self.validate_args:
return []
shape = array_ops.shape(x)
is_matrix = check_ops.assert_rank_at_least(
x, 2, message="Input must have rank at least 2.")
is_square = check_ops.assert_equal(
shape[-2], shape[-1], message="Input must be a square matrix.")
above_diagonal = array_ops.matrix_band_part(
array_ops.matrix_set_diag(
x, array_ops.zeros(shape[:-1], dtype=dtypes.float32)),
0, -1)
is_lower_triangular = check_ops.assert_equal(
above_diagonal, array_ops.zeros_like(above_diagonal),
message="Input must be lower triangular.")
# A lower triangular matrix is nonsingular iff all its diagonal entries are
# nonzero.
diag_part = array_ops.matrix_diag_part(x)
is_nonsingular = check_ops.assert_none_equal(
diag_part, array_ops.zeros_like(diag_part),
message="Input must have all diagonal entries nonzero.")
return [is_matrix, is_square, is_lower_triangular, is_nonsingular]
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/bijectors/matrix_inverse_tril.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Labels for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.labeled_tensor.python.ops import core as _core
from tensorflow.contrib.labeled_tensor.python.ops import io_ops as _io_ops
from tensorflow.contrib.labeled_tensor.python.ops import nn
from tensorflow.contrib.labeled_tensor.python.ops import ops as _ops
from tensorflow.contrib.labeled_tensor.python.ops import sugar as _sugar
# pylint: disable=invalid-name
# Core types.
Axis = _core.Axis
Axes = _core.Axes
LabeledTensor = _core.LabeledTensor
as_axis = _core.as_axis
convert_to_labeled_tensor = _core.convert_to_labeled_tensor
identity = _core.identity
slice = _core.slice_function # pylint: disable=redefined-builtin
transpose = _core.transpose
expand_dims = _core.expand_dims
align = _core.align
axis_order_scope = _core.axis_order_scope
check_axis_order = _core.check_axis_order
impose_axis_order = _core.impose_axis_order
AxisOrderError = _core.AxisOrderError
define_unary_op = _core.define_unary_op
define_binary_op = _core.define_binary_op
define_reduce_op = _ops.define_reduce_op
abs = _core.abs_function # pylint: disable=redefined-builtin
neg = _core.neg
sign = _core.sign
reciprocal = _core.reciprocal
square = _core.square
round = _core.round_function # pylint: disable=redefined-builtin
sqrt = _core.sqrt
rsqrt = _core.rsqrt
exp = _core.exp
log = _core.log
ceil = _core.ceil
floor = _core.floor
cos = _core.cos
sin = _core.sin
tan = _core.tan
acos = _core.acos
asin = _core.asin
atan = _core.atan
lgamma = _core.lgamma
digamma = _core.digamma
erf = _core.erf
erfc = _core.erfc
logical_not = _core.logical_not
tanh = _core.tanh
sigmoid = _core.sigmoid
add = _core.add
sub = _core.sub
mul = _core.mul
div = _core.div
mod = _core.mod
pow = _core.pow_function # pylint: disable=redefined-builtin
equal = _core.equal
greater = _core.greater
greater_equal = _core.greater_equal
not_equal = _core.not_equal
less = _core.less
less_equal = _core.less_equal
logical_and = _core.logical_and
logical_or = _core.logical_or
logical_xor = _core.logical_xor
maximum = _core.maximum
minimum = _core.minimum
squared_difference = _core.squared_difference
igamma = _core.igamma
igammac = _core.igammac
zeta = _core.zeta
polygamma = _core.polygamma
select = _ops.select
concat = _ops.concat
pack = _ops.pack
unpack = _ops.unpack
reshape = _ops.reshape
rename_axis = _ops.rename_axis
random_crop = _ops.random_crop
map_fn = _ops.map_fn
foldl = _ops.foldl
squeeze = _ops.squeeze
matmul = _ops.matmul
tile = _ops.tile
pad = _ops.pad
constant = _ops.constant
zeros_like = _ops.zeros_like
ones_like = _ops.ones_like
cast = _ops.cast
verify_tensor_all_finite = _ops.verify_tensor_all_finite
boolean_mask = _ops.boolean_mask
where = _ops.where
reduce_all = _ops.reduce_all
reduce_any = _ops.reduce_any
reduce_logsumexp = _ops.reduce_logsumexp
reduce_max = _ops.reduce_max
reduce_mean = _ops.reduce_mean
reduce_min = _ops.reduce_min
reduce_prod = _ops.reduce_prod
reduce_sum = _ops.reduce_sum
batch = _ops.batch
shuffle_batch = _ops.shuffle_batch
FixedLenFeature = _io_ops.FixedLenFeature
parse_example = _io_ops.parse_example
parse_single_example = _io_ops.parse_single_example
placeholder = _io_ops.placeholder
ReshapeCoder = _sugar.ReshapeCoder
|
tensorflow-master
|
tensorflow/contrib/labeled_tensor/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import io_ops
from tensorflow.contrib.labeled_tensor.python.ops import test_util
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test as test_lib
class ParseBase(test_util.Base):
def setUp(self):
super(ParseBase, self).setUp()
examples = [
example_pb2.Example(features=feature_pb2.Features(feature={
'a':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[1])),
'b':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[2, 3, 4])),
})),
example_pb2.Example(features=feature_pb2.Features(feature={
'a':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[5])),
'b':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[6, 7, 8])),
})),
]
self.serialized = core.LabeledTensor(
constant_op.constant([ex.SerializeToString() for ex in examples]),
['batch'])
self.features = {
'a': io_ops.FixedLenFeature([], dtypes.int64),
'b': io_ops.FixedLenFeature([('x', 3)], dtypes.int64)
}
class TestParseExample(ParseBase):
def test(self):
expected_a = core.LabeledTensor(constant_op.constant([1, 5]), ['batch'])
expected_b = core.LabeledTensor(
constant_op.constant([[2, 3, 4], [6, 7, 8]]), ['batch', 'x'])
parsed = io_ops.parse_example(self.serialized, self.features)
self.assertLabeledTensorsEqual(expected_a, parsed['a'])
self.assertLabeledTensorsEqual(expected_b, parsed['b'])
def test_placeholder(self):
serialized = core.LabeledTensor(
array_ops.placeholder(dtypes.string, [None]), ['batch'])
# should not raise
io_ops.parse_example(serialized, self.features)
class TestParseSingleExample(ParseBase):
def test(self):
expected_a = core.LabeledTensor(constant_op.constant(1), [])
expected_b = core.LabeledTensor(constant_op.constant([2, 3, 4]), ['x'])
parsed = io_ops.parse_single_example(self.serialized[0], self.features)
self.assertLabeledTensorsEqual(expected_a, parsed['a'])
self.assertLabeledTensorsEqual(expected_b, parsed['b'])
def test_unknown_size(self):
features = {'a': io_ops.FixedLenFeature([('x', None)], dtypes.int64)}
serialized = array_ops.placeholder(dtypes.string, [])
with self.assertRaisesRegexp(ValueError, 'unknown size'):
io_ops.parse_single_example(serialized, features)
class PlaceholderTest(test_util.Base):
def test_name(self):
placeholder_lt = io_ops.placeholder(dtypes.float32, [])
self.assertIn('lt_placeholder', placeholder_lt.name)
def test(self):
placeholder_lt = io_ops.placeholder(dtypes.float32,
['batch', ('x', ['a', 'b'])])
self.assertEqual(placeholder_lt.dtype, dtypes.float32)
self.assertEqual(placeholder_lt.axes,
core.Axes([('batch', None), ('x', ['a', 'b'])]))
def test_feed(self):
sess = session.Session()
placeholder_lt = io_ops.placeholder(dtypes.float32, [])
two_times = 2.0 * placeholder_lt
result = sess.run(two_times, {placeholder_lt.tensor: 1})
self.assertEqual(result, 2.0)
if __name__ == '__main__':
test_lib.main()
|
tensorflow-master
|
tensorflow/contrib/labeled_tensor/python/ops/io_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural network ops for LabeledTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import nn as contrib_nn
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.ops import nn
relu = core.define_unary_op('relu', nn.relu)
relu6 = core.define_unary_op('relu6', nn.relu6)
crelu = core.define_unary_op('crelu', nn.crelu)
elu = core.define_unary_op('elu', nn.elu)
softplus = core.define_unary_op('softplus', nn.softplus)
l2_loss = core.define_unary_op('l2_loss', nn.l2_loss)
sigmoid_cross_entropy_with_logits = core.define_binary_op(
'sigmoid_cross_entropy_with_logits',
contrib_nn.deprecated_flipped_sigmoid_cross_entropy_with_logits)
softmax = core.define_unary_op('softmax', nn.softmax)
log_softmax = core.define_unary_op('log_softmax', nn.log_softmax)
softmax_cross_entropy_with_logits = core.define_binary_op(
'softmax_cross_entropy_with_logits',
contrib_nn.deprecated_flipped_softmax_cross_entropy_with_logits)
sparse_softmax_cross_entropy_with_logits = core.define_binary_op(
'sparse_softmax_cross_entropy_with_logits',
contrib_nn.deprecated_flipped_sparse_softmax_cross_entropy_with_logits)
|
tensorflow-master
|
tensorflow/contrib/labeled_tensor/python/ops/nn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import ops
from tensorflow.contrib.labeled_tensor.python.ops import sugar
from tensorflow.contrib.labeled_tensor.python.ops import test_util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class Base(test_util.Base):
def setUp(self):
super(Base, self).setUp()
self.small_lt = core.LabeledTensor(constant_op.constant([1]), [('x', 1)])
class ReshapeCoderTest(Base):
def setUp(self):
super(ReshapeCoderTest, self).setUp()
self.batch_size = 8
self.num_rows = 50
self.num_columns = 100
self.channels = ['red', 'green', 'blue']
self.masks = [False, True]
tensor = math_ops.range(0,
self.batch_size * self.num_rows * self.num_columns *
len(self.channels) * len(self.masks))
tensor = array_ops.reshape(tensor, [
self.batch_size, self.num_rows, self.num_columns, len(self.channels),
len(self.masks)
])
self.batch_axis = ('batch', range(self.batch_size))
self.row_axis = ('row', range(self.num_rows))
self.column_axis = ('column', range(self.num_columns))
self.channel_axis = ('channel', self.channels)
self.mask_axis = ('mask', self.masks)
axes = [
self.batch_axis, self.row_axis, self.column_axis, self.channel_axis,
self.mask_axis
]
self.masked_image_lt = core.LabeledTensor(tensor, axes)
def test_name(self):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
encode_lt = rc.encode(self.masked_image_lt)
decode_lt = rc.decode(encode_lt)
self.assertIn('lt_reshape_encode', encode_lt.name)
self.assertIn('lt_reshape_decode', decode_lt.name)
def test_bijection_flat(self):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
encode_lt = rc.encode(self.masked_image_lt)
golden_axes = core.Axes([
self.batch_axis, self.row_axis, self.column_axis,
('depth', len(self.channels) * len(self.masks))
])
self.assertEqual(encode_lt.axes, golden_axes)
decode_lt = rc.decode(encode_lt)
self.assertLabeledTensorsEqual(decode_lt, self.masked_image_lt)
def test_bijection_with_labels(self):
depth_axis = core.Axis('depth', range(len(self.channels) * len(self.masks)))
rc = sugar.ReshapeCoder(['channel', 'mask'],
[depth_axis, ('other', ['label'])])
encode_lt = rc.encode(self.masked_image_lt)
golden_axes = core.Axes([
self.batch_axis, self.row_axis, self.column_axis, depth_axis,
('other', ['label'])
])
self.assertEqual(encode_lt.axes, golden_axes)
decode_lt = rc.decode(encode_lt)
self.assertLabeledTensorsEqual(decode_lt, self.masked_image_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
rc.decode(self.masked_image_lt)
with self.assertRaises(ValueError):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
rc.encode(self.masked_image_lt)
rc.encode(ops.select(self.masked_image_lt, {'channel': 'red'}))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/labeled_tensor/python/ops/sugar_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Minimal runtime type checking library.
This module should not be considered public API.
"""
# TODO(ericmc,shoyer): Delete this in favor of using pytype or mypy
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
from tensorflow.python.util import tf_inspect
# used for register_type_abbreviation and _type_repr below.
_TYPE_ABBREVIATIONS = {}
class Type(object):
"""Base class for type checker types.
The custom types defined in this module are based on types in the standard
library's typing module (in Python 3.5):
https://docs.python.org/3/library/typing.html
The only difference should be that we use actual instances of Type classes to
represent custom types rather than the metaclass magic typing uses to create
new class objects. In practice, all this should mean is that we use
`List(int)` rather than `List[int]`.
Custom types should implement __instancecheck__ and inherit from Type. Every
argument in the constructor must be a type or Type instance, and these
arguments must be stored as a tuple on the `_types` attribute.
"""
def __init__(self, *types):
self._types = types
def __repr__(self):
args_repr = ", ".join(repr(t) for t in self._types)
return "typecheck.%s(%s)" % (type(self).__name__, args_repr)
class _SingleArgumentType(Type):
"""Use this subclass for parametric types that accept only one argument."""
def __init__(self, tpe):
super(_SingleArgumentType, self).__init__(tpe)
@property
def _type(self):
tpe, = self._types # pylint: disable=unbalanced-tuple-unpacking
return tpe
class _TwoArgumentType(Type):
"""Use this subclass for parametric types that accept two arguments."""
def __init__(self, first_type, second_type):
super(_TwoArgumentType, self).__init__(first_type, second_type)
class Union(Type):
"""A sum type.
A correct type is any of the types provided.
"""
def __instancecheck__(self, instance):
return isinstance(instance, self._types)
class Optional(_SingleArgumentType):
"""An optional type.
A correct type is either the provided type or NoneType.
"""
def __instancecheck__(self, instance):
# types.NoneType does not exist in Python 3
return isinstance(instance, (self._type, type(None)))
class List(_SingleArgumentType):
"""A typed list.
A correct type is a list where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, list) and
all(isinstance(x, self._type) for x in instance))
class Sequence(_SingleArgumentType):
"""A typed sequence.
A correct type is a sequence where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Sequence) and
all(isinstance(x, self._type) for x in instance))
class Collection(_SingleArgumentType):
"""A sized, iterable container.
A correct type is an iterable and container with known size where each element
has the single provided type.
We use this in preference to Iterable because we check each instance of the
iterable at runtime, and hence need to avoid iterables that could be
exhausted.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Iterable) and
isinstance(instance, collections.Sized) and
isinstance(instance, collections.Container) and
all(isinstance(x, self._type) for x in instance))
class Tuple(Type):
"""A typed tuple.
A correct type is a tuple with the correct length where each element has
the correct type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, tuple) and
len(instance) == len(self._types) and
all(isinstance(x, t) for x, t in zip(instance, self._types)))
class Mapping(_TwoArgumentType):
"""A typed mapping.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
key_type, value_type = self._types # pylint: disable=unbalanced-tuple-unpacking
return (isinstance(instance, collections.Mapping) and
all(isinstance(k, key_type) for k in instance.keys()) and
all(isinstance(k, value_type) for k in instance.values()))
class Dict(Mapping):
"""A typed dict.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, dict) and
super(Dict, self).__instancecheck__(instance))
def _replace_forward_references(t, context):
"""Replace forward references in the given type."""
if isinstance(t, str):
return context[t]
elif isinstance(t, Type):
return type(t)(*[_replace_forward_references(t, context) for t in t._types]) # pylint: disable=protected-access
else:
return t
def register_type_abbreviation(name, alias):
"""Register an abbreviation for a type in typecheck tracebacks.
This makes otherwise very long typecheck errors much more readable.
Example:
typecheck.register_type_abbreviation(tf.compat.v1.Dimension,
'tf.compat.v1.Dimension')
Args:
name: type or class to abbreviate.
alias: string alias to substitute.
"""
_TYPE_ABBREVIATIONS[name] = alias
def _type_repr(t):
"""A more succinct repr for typecheck tracebacks."""
string = repr(t)
for type_, alias in _TYPE_ABBREVIATIONS.items():
string = string.replace(repr(type_), alias)
string = re.sub(r"<(class|type) '([\w.]+)'>", r"\2", string)
string = re.sub(r"typecheck\.(\w+)", r"\1", string)
return string
class Error(TypeError):
"""Exception for typecheck failures."""
def accepts(*types):
"""A decorator which checks the input types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
Returns:
A function to use as a decorator.
"""
def check_accepts(f):
"""Check the types."""
spec = tf_inspect.getargspec(f)
num_function_arguments = len(spec.args)
if len(types) != num_function_arguments:
raise Error(
"Function %r has %d arguments but only %d types were provided in the "
"annotation." % (f, num_function_arguments, len(types)))
if spec.defaults:
num_defaults = len(spec.defaults)
for (name, a, t) in zip(spec.args[-num_defaults:], spec.defaults,
types[-num_defaults:]):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("default argument value %r of type %r is not an instance "
"of the allowed type %s for the %s argument to %r" %
(a, type(a), _type_repr(allowed_type), name, f))
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
for (a, t) in zip(args, types):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("%r of type %r is not an instance of the allowed type %s "
"for %r" % (a, type(a), _type_repr(allowed_type), f))
return f(*args, **kwds)
return new_f
return check_accepts
def returns(*types):
"""A decorator which checks the return types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types. A list of one element corresponds to a
single return value. A list of several elements corresponds to several
return values. Note that a function with no explicit return value has an
implicit NoneType return and should be annotated correspondingly.
Returns:
A function to use as a decorator.
"""
def check_returns(f):
"""Check the types."""
if not types:
raise TypeError("A return type annotation must contain at least one type")
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
return_value = f(*args, **kwds)
if len(types) == 1:
# The function has a single return value.
allowed_type = _replace_forward_references(types[0], f.__globals__)
if not isinstance(return_value, allowed_type):
raise Error(
"%r of type %r is not an instance of the allowed type %s "
"for %r" %
(return_value, type(return_value), _type_repr(allowed_type), f))
else:
if len(return_value) != len(types):
raise Error("Function %r has %d return values but only %d types were "
"provided in the annotation." %
(f, len(return_value), len(types)))
for (r, t) in zip(return_value, types):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(r, allowed_type):
raise Error("%r of type %r is not an instance of allowed type %s "
"for %r" % (r, type(r), _type_repr(allowed_type), f))
return return_value
return new_f
return check_returns
|
tensorflow-master
|
tensorflow/contrib/labeled_tensor/python/ops/_typecheck.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core classes and core ops for LabeledTensor.
Core ops are ops which will eventually be called by LabeledTensor methods,
and ops which a core op depends upon.
For example, `add` is a core op because we'll eventually support the `+`
operator.
Non-core ops should go in `ops.py`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import numbers
import types
import numpy as np
from six import binary_type
from six import string_types
from six import text_type
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# pylint: disable=invalid-name
# Types coercible to Axis.labels
# We use this instead of collections.Sequence to exclude strings.
LabelsLike = tc.Union(np.ndarray, range, list, tuple)
# Types coercible to a tf.compat.v1.Dimension
DimensionLike = tc.Optional(tc.Union(tensor_shape.Dimension, int))
# Types usable for axis values
AxisValue = tc.Union(LabelsLike, DimensionLike)
# Valid scalar values for TensorFlow
Scalar = tc.Union(numbers.Number, bool, binary_type, text_type)
# pylint: enable=invalid-name
class Axis(object):
"""Size and label information for an axis.
Axis contains either a tf.compat.v1.Dimension indicating the size of an axis,
or a tuple of tick labels for the axis.
If tick labels are provided, they must be unique.
"""
@tc.accepts(object, string_types, AxisValue)
def __init__(self, name, value):
"""Construct an Axis.
Args:
name: Name of the axis.
value: Either None, an int or tf.compat.v1.Dimension giving the size of
the axis, or a sequence that is not a string additionally providing
coordinate (tick) labels.
Raises:
ValueError: If the user provides labels with duplicate values.
"""
if isinstance(value, tensor_shape.Dimension):
dimension = value
labels = None
elif isinstance(value, int) or value is None:
dimension = tensor_shape.Dimension(value)
labels = None
else:
dimension = tensor_shape.Dimension(len(value))
labels = tuple(value)
if dimension.value == 0:
# Treat a zero-length axis as if it has labels.
labels = ()
if labels is not None:
index = dict(zip(labels, range(len(labels))))
if len(index) != len(labels):
raise ValueError(
'Tick labels must be unique, but got {}'.format(labels))
else:
index = None
self._name = name # type: string_types
self._dimension = dimension # type: tensor_shape.Dimension
self._labels = labels # type: Optional[tuple]
self._index = index # type: Optional[Dict[Any, int]]
@property
@tc.returns(string_types)
def name(self):
return self._name
@tc.returns(string_types)
def __repr__(self):
# Axis('x', Dimension(2))
# TODO(shoyer): make very long reprs more succint?
return "%s('%s', %r)" % (type(self).__name__, self.name, self.value)
@tc.returns(bool)
def __eq__(self, other):
return (isinstance(other, Axis) and self.name == other.name and
self.size == other.size and self.labels == other.labels)
def __hash__(self):
return hash((self.name, self.size, self.labels))
@tc.returns(bool)
def __ne__(self, other):
return not self == other
@tc.returns(int)
def __len__(self):
size = self.size
if size is None:
raise ValueError('axis %r has unknown length' % self.name)
return size
@property
@tc.returns(tc.Optional(tensor_shape.Dimension))
def dimension(self):
return self._dimension
@property
@tc.returns(tc.Optional(int))
def size(self):
return self._dimension.value
@property
@tc.returns(tc.Union(tuple, tensor_shape.Dimension))
def value(self):
"""Returns the tf.compat.v1.Dimension or tuple specifying axis ticks."""
if self.labels is None:
return self.dimension
else:
return self.labels
@property
@tc.returns(tc.Optional(tuple))
def labels(self):
"""Returns the tuple containing coordinate labels, else None."""
return self._labels
def index(self, value):
"""Returns the integer position of the given tick label."""
if self._index is None:
raise ValueError('Axis does not have tick labels')
return self._index[value]
# tc class for anything that can be coerced into an Axis
# pylint: disable=invalid-name
AxisLike = tc.Union(Axis, tc.Tuple(string_types, AxisValue))
# pylint: enable=invalid-name
@tc.returns(Axis)
@tc.accepts(AxisLike)
def as_axis(axis_data):
"""Convert an AxisLike object into an Axis.
Args:
axis_data: Axis object or tuple (axis_name, axis_value) describing an axis.
Returns:
Axis object. This may be the original object if axis_data is an Axis.
"""
if isinstance(axis_data, Axis):
axis = axis_data
else:
axis = Axis(*axis_data)
return axis
class Axes(collections.Mapping):
"""Axis names and indices for a tensor.
It is an ordered mapping, with keys given by axis name and values given
by Axis objects. Duplicate axis names are not allowed.
"""
@tc.accepts(object, tc.List(AxisLike))
def __init__(self, axes):
"""Construct an Axes.
Args:
axes: A list of Axis objects or (axis_name, axis_value) tuples.
Raises:
ValueError: If the user provides empty or duplicate axis names.
"""
self._axes = collections.OrderedDict()
for axis_data in axes:
axis = as_axis(axis_data)
name = axis.name
if name in self._axes:
raise ValueError('Duplicate axis name: %s' % name)
self._axes[name] = axis
def __iter__(self):
return iter(self._axes)
@tc.returns(string_types)
def __repr__(self):
# Axes([('x', Dimension(2)),
# ('y', ['a', 'b', 'c']),
# ('z', Dimension(4))])
cls_name = type(self).__name__
values = ["('%s', %r)" % (v.name, v.value) for v in self._axes.values()]
values_repr = (',\n' + ' ' * len(cls_name + '([')).join(values)
return '%s([%s])' % (cls_name, values_repr)
@tc.returns(Axis)
@tc.accepts(object, string_types)
def __getitem__(self, name):
return self._axes[name]
@tc.returns(bool)
def __contains__(self, name):
return name in self._axes
@tc.returns(int)
def __len__(self):
return len(self._axes)
def __hash__(self):
return hash(tuple(self.items()))
@tc.accepts(object, string_types)
def remove(self, axis_name):
"""Creates a new Axes object without the given axis."""
if axis_name not in self:
raise KeyError(axis_name)
remaining_axes = [axis for axis in self.values() if axis.name != axis_name]
return Axes(remaining_axes)
class LabeledTensor(object):
"""A tensor with annotated axes.
It has the following invariants:
1) The dimensionality of the tensor is equal to the number of elements
in axes.
2) The number of coordinate values in the ith dimension is equal to the
size of the tensor in the ith dimension.
Attributes:
tensor: tf.Tensor containing the data.
axes: lt.Axes containing axis names and coordinate labels.
"""
@tc.accepts(object, ops.Tensor,
tc.Union(Axes, tc.Collection(tc.Union(string_types, AxisLike))))
def __init__(self, tensor, axes):
"""Construct a LabeledTensor.
Args:
tensor: The underlying tensor containing the data.
axes: An Axes object, or a collection of strings, Axis objects or tuples
of (name, value) pairs indicating the axes.
Raises:
ValueError: If the provided axes do not satisfy the class invariants.
"""
self._tensor = tensor
shape = tensor.get_shape()
if isinstance(axes, Axes):
unvalidated_axes = axes
else:
mutable_axes = []
for position, axis_like in enumerate(axes):
if isinstance(axis_like, string_types):
# The coordinates for this axes are unlabeled.
# Infer the size of the axis.
value = shape[position]
axis_like = (axis_like, value)
mutable_axes.append(axis_like)
# Construct the Axis object, which will additionally validate the contents
# of the object.
unvalidated_axes = Axes(mutable_axes)
# Check our invariants.
# First, the rank of the tensor must be equal to the number of axes.
if len(shape) != len(unvalidated_axes):
raise ValueError(
'Tensor rank was not equal to the number of axes: %r, %r' %
(shape, unvalidated_axes))
# Second, the size of each tensor dimension must match the size of the
# corresponding indices.
for (d, axis) in zip(shape, unvalidated_axes.values()):
if d != axis.size:
raise ValueError(
'Provided axis size %d does not match tensor dimension size %d'
'in tensor %r' % (axis.size, d, tensor))
self._axes = unvalidated_axes
def __repr__(self):
# <LabeledTensor 'foo' shape=(2, 3, 4) dtype=float32
# axes=[('x', Dimension(2)),
# ('y', ('a', 'b', 'c'),
# ('z', Dimension(4))]>
axes = ["('%s', %r)" % (v.name, v.value) for v in self.axes.values()]
axes_repr = (',\n' + ' ' * len(' axes=[')).join(axes)
return ("<%s '%s' shape=%s dtype=%s\n axes=[%s]>" %
(type(self).__name__, self.tensor.name, self.tensor.get_shape(),
self.tensor.dtype.name, axes_repr))
@property
def tensor(self):
return self._tensor
def _as_graph_element(self):
"""Support tf.Graph.as_graph_element on LabeledTensor objects.
This allows operations such as tf.name_scope to take labeled tensors.
Returns:
self.tensor
"""
return self.tensor
@property
def axes(self):
return self._axes
# properties/methods directly borrowed from tf.Tensor:
@property
def dtype(self):
return self._tensor.dtype
@property
def shape(self):
return self._tensor.shape
@property
def name(self):
return self._tensor.name
def get_shape(self):
"""Returns the TensorShape that represents the shape of this tensor.
See tf.Tensor.get_shape().
Returns:
A TensorShape representing the shape of this tensor.
"""
return self._tensor.get_shape()
# TODO(shoyer): consider how/if to implement .eval(). Maybe it should return
# an xarray.DataArray?
def __getitem__(self, key):
# This should work exactly like tf.Tensor.__getitem__, except it preserves
# labels.
if not isinstance(key, tuple):
key = (key,)
if len(key) != len(self.axes):
raise ValueError('indexer %r must have the same length as the Tensor '
'rank (%r)' % (key, len(self.axes)))
selection = {a: k for a, k in zip(self.axes.keys(), key)}
return slice_function(self, selection)
# special methods for overloading arithmetic operations:
def __abs__(self):
return abs_function(self)
def __neg__(self):
return neg(self)
def __pos__(self):
return self
def __add__(self, other):
return add(self, other)
def __radd__(self, other):
return add(other, self)
def __sub__(self, other):
return sub(self, other)
def __rsub__(self, other):
return sub(other, self)
def __mul__(self, other):
return mul(self, other)
def __rmul__(self, other):
return mul(other, self)
def __truediv__(self, other):
return div(self, other)
__div__ = __truediv__
def __rtruediv__(self, other):
return div(other, self)
__rdiv__ = __rtruediv__
def __mod__(self, other):
return mod(self, other)
def __rmod__(self, other):
return mod(other, self)
def __pow__(self, other):
return pow_function(self, other)
def __rpow__(self, other):
return pow_function(other, self)
# logical operations:
def __invert__(self):
return logical_not(self)
def __and__(self, other):
return logical_and(self, other)
def __or__(self, other):
return logical_or(self, other)
def __xor__(self, other):
return logical_xor(self, other)
# boolean operations:
def __lt__(self, other):
return less(self, other)
def __le__(self, other):
return less_equal(self, other)
def __gt__(self, other):
return greater(self, other)
def __ge__(self, other):
return greater_equal(self, other)
def __eq__(self, other):
# for consistency with tf.Tensor
if not isinstance(other, LabeledTensor):
return False
return self.tensor == other.tensor and self.axes == other.axes
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.tensor, self.axes))
# typecheck type abbreviations:
# abbreviations for third-party types with very long reprs
tc.register_type_abbreviation(tensor_shape.Dimension, 'tensorflow.Dimension')
tc.register_type_abbreviation(ops.Tensor, 'tensorflow.Tensor')
tc.register_type_abbreviation(dtypes.DType, 'tensorflow.DType')
# core LabeledTensor types
tc.register_type_abbreviation(Axis, 'labeled_tensor.Axis')
tc.register_type_abbreviation(Axes, 'labeled_tensor.Axes')
tc.register_type_abbreviation(LabeledTensor, 'labeled_tensor.LabeledTensor')
@tc.returns(ops.Tensor)
@tc.accepts(LabeledTensor)
def _convert_labeled_tensor_to_tensor(value, *args, **kwargs):
# call ops.convert_to_tensor to handle optional arguments appropriately
return ops.internal_convert_to_tensor(value.tensor, *args, **kwargs)
ops.register_tensor_conversion_function(LabeledTensor,
_convert_labeled_tensor_to_tensor)
# tc class for anything that can be coerced into a LabeledTensor
# pylint: disable=invalid-name
LabeledTensorLike = tc.Union(LabeledTensor, ops.Tensor, np.ndarray, Scalar)
# pylint: enable=invalid-name
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, object, tc.Optional(string_types))
def convert_to_labeled_tensor(value, dtype=None, name=None):
"""Converts the given `value` to a `LabeledTensor`.
This function accepts `LabeledTensor` objects, 0-dimensional `Tensor` objects
and numpy arrays, and Python scalars. Higher dimensional unlabeled tensors
must use the `LabeledTensor` constructor explicitly.
Args:
value: Object to convert.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of value.
name: Optional name to use if a new Tensor is created.
Returns:
`value` converted into a `LabeledTensor` object.
Raises:
ValueError: If the output would have rank>0 but the input was not already a
`LabeledTensor`.
"""
# TODO(shoyer): consider extending to accept xarray.DataArray as input.
if isinstance(value, LabeledTensor):
axes = value.axes.values()
value = value.tensor
else:
axes = []
# We call convert_to_tensor even for LabeledTensor input because it also
# checks to make sure the dtype argument is compatible.
tensor = ops.convert_to_tensor(value, dtype=dtype, name=name)
if len(tensor.get_shape()) != len(axes):
raise ValueError('cannot automatically convert unlabeled arrays or tensors '
'with rank>0 into LabeledTensors: %r' % value)
return LabeledTensor(tensor, axes)
@tc.returns(Axis)
@tc.accepts(tc.Collection(Axis))
def concat_axes(axes):
"""Concatenate a list of Axes.
Args:
axes: A collection of Axis objects.
Returns:
The concatenation of the axes.
If all axes have labels, the result has the concatenation of the labels.
Else, the result has no labels, and its size is the sum of the sizes
of the axes.
Raises:
ValueError: If `others` is not a collection of Axes or if it is empty.
"""
if not axes:
raise ValueError('axes must not be empty')
for a in axes:
if not isinstance(a, Axis):
raise ValueError('Expected an Axis, but got %r of type %r' % (a, type(a)))
names = set(a.name for a in axes)
if len(names) > 1:
raise ValueError('axes do not all have the same name: %r' % names)
name, = names
all_have_labels = all(a.labels is not None for a in axes)
any_has_unknown_size = any(a.size is None for a in axes)
if all_have_labels:
value = tuple(label for a in axes for label in a.labels)
elif any_has_unknown_size:
value = None
else:
value = sum(len(a) for a in axes)
return Axis(name, value)
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, tc.Optional(string_types))
def identity(labeled_tensor, name=None):
"""The identity op.
See tf.identity.
Args:
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
The tensor.
"""
with ops.name_scope(name, 'lt_identity', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
return LabeledTensor(
array_ops.identity(labeled_tensor.tensor, name=scope),
labeled_tensor.axes)
# We don't call this slice because that shadows a built-in. Instead, we alias
# this to lt.slice in __init__.py.
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, tc.Mapping(string_types, tc.Union(int, slice)),
tc.Optional(string_types))
def slice_function(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
This is an analog of tf.slice.
For example:
>>> tensor = tf.reshape(tf.range(0, 6), [3, 2])
>>> labeled_tensor = lt.LabeledTensor(tensor, ['a', ('b', ['foo', 'bar'])])
>>> lt.slice(labeled_tensor, {'a': slice(0, 2), 'b': 1})
<LabeledTensor 'lt_slice:...' shape=(2,) dtype=int32
axes=[('a', Dimension(2))]>
Args:
labeled_tensor: The input tensor.
selection: A dictionary of type str -> Union(int, slice of int) mapping axis
names to sub-selections.
name: Optional op name.
Returns:
The slice as a `LabeledTensor`.
"""
with ops.name_scope(name, 'lt_slice', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
slices = []
for axis_name in labeled_tensor.axes:
if axis_name not in selection:
# We're not sub-selecting this axis, so use the full slice.
slices.append(slice(None))
else:
slices.append(selection[axis_name])
sliced_tensor = labeled_tensor.tensor[tuple(slices)]
sliced_axes = []
for axis, s in zip(labeled_tensor.axes.values(), slices):
# We sub-select this axis's index with the slice s.
# `s` is either an int or a proper slice.
if isinstance(s, slice):
if axis.labels is None:
# We're not tracking coordinate names for this axis.
sliced_axes.append(axis.name)
else:
sliced_axes.append((axis.name, axis.labels[s]))
else:
# If the slice is an int this dimension now has size 1, so we remove it.
assert isinstance(s, int)
return LabeledTensor(
array_ops.identity(sliced_tensor, name=scope), sliced_axes)
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, tc.Optional(tc.Collection(string_types)),
tc.Optional(string_types))
def transpose(labeled_tensor, axis_order=None, name=None):
"""Permute a tensor's axes.
See tf.transpose.
Args:
labeled_tensor: The input tensor.
axis_order: Optional desired axis order, as a list of names. By default, the
order of axes is reversed.
name: Optional op name.
Returns:
The permuted tensor.
Raises:
ValueError: If axis_order isn't a permutation of the existing axes.
"""
with ops.name_scope(name, 'lt_transpose', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
original_order = list(labeled_tensor.axes.keys())
if axis_order is None:
axis_order = list(reversed(original_order))
elif sorted(axis_order) != sorted(original_order):
raise ValueError(
'The new axis order must have the same names as the original axes, '
'but the new order is %r while the original order is %r' %
(axis_order, original_order))
axis_names = list(labeled_tensor.axes.keys())
permutation = [axis_names.index(n) for n in axis_order]
# Note: TensorFlow doesn't copy data for the identity transpose.
transpose_tensor = array_ops.transpose(
labeled_tensor.tensor, permutation, name=scope)
permuted_axes = [labeled_tensor.axes[n] for n in axis_order]
return LabeledTensor(transpose_tensor, permuted_axes)
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike,
tc.Collection(
tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))),
tc.Optional(string_types))
def expand_dims(labeled_tensor, axes, name=None):
"""Insert dimensions of size 1.
See tf.expand_dims.
Args:
labeled_tensor: The input tensor.
axes: The desired axis names as strings or tuples of (name, label), where
`label` is the coordinate name for the new dimension `name`. These must
include the existing axis names, and the existing names must appear in the
same order in this list as they do in the input tensor.
name: Optional op name.
Returns:
A tensor with an axis for each axis in axes.
New axes are created with size 1 and do not have labeled coordinates.
Raises:
AxisOrderError: If axis names don't appear in the same order in axes
and the labeled tensor.
"""
with ops.name_scope(name, 'lt_expand_dims', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
axis_names = [a if isinstance(a, string_types) else a[0] for a in axes]
check_axis_order(labeled_tensor, axis_names)
reshaped_axes = []
shape = []
for axis_spec in axes:
if axis_spec in labeled_tensor.axes:
axis = labeled_tensor.axes[axis_spec]
reshaped_axes.append(axis)
shape.append(-1 if axis.size is None else axis.size)
else:
if isinstance(axis_spec, string_types):
reshaped_axes.append((axis_spec, 1))
else:
(name, label) = axis_spec
reshaped_axes.append((name, (label,)))
shape.append(1)
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
return LabeledTensor(reshaped_tensor, reshaped_axes)
# This should only be added to a graph collection once.
_AXIS_ORDER_KEY = ('__axis_order',)
@tc.returns(tc.Optional(tc.List(string_types)))
def get_axis_order():
"""Get the axis_order set by any containing axis_order_scope.
Returns:
List of strings giving an order to use for axis names, or None, if no axis
order is set.
"""
# By storing axis_order in the graph, we can ensure that axis_order_scope is
# thread-safe.
axis_order_list = ops.get_collection(_AXIS_ORDER_KEY)
if axis_order_list:
axis_order, = axis_order_list
else:
axis_order = None
return axis_order
@tc.accepts(tc.Optional(tc.List(string_types)))
def _set_axis_order(axis_order):
axis_order_list = ops.get_collection_ref(_AXIS_ORDER_KEY)
if axis_order_list:
axis_order_list[0] = axis_order
else:
axis_order_list.append(axis_order)
@contextlib.contextmanager
@tc.accepts(tc.Optional(tc.List(string_types)))
def axis_order_scope(axis_order=None):
"""Set axis order for the result of broadcasting operations within a scope.
This allows you to ensure that tensors resulting from arithmetic have a
predictable axis order.
Example usage:
with lt.axis_order_scope(['x', 'y', 'z']):
# result is guaranteed to have the correct axis order
result = w + b
You can nest scopes, in which case only the inner-most scope applies, e.g.,
with lt.axis_order(['x', 'y', 'z']):
with lt.axis_order():
result = w + b # uses the default (left-most) axis ordering
Args:
axis_order: optional list of strings providing axis names. By default,
creates a scope without axis order.
Yields:
The provided axis_order or `None`.
"""
original_axis_order = get_axis_order()
_set_axis_order(axis_order)
try:
yield axis_order
finally:
_set_axis_order(original_axis_order)
@tc.returns(tc.List(string_types))
def _get_valid_axis_order():
axis_order = get_axis_order()
if axis_order is None:
raise AxisOrderError('an explicit axis order must be provided with the '
'axis_order argument or by using an axis_order_scope')
return axis_order
class AxisOrderError(ValueError):
"""Error class for cases where there is no valid axis order."""
# TODO(shoyer): should this function accept a list of labeled tensors instead?
@tc.returns(type(None))
@tc.accepts(LabeledTensorLike, tc.Optional(tc.Collection(string_types)))
def check_axis_order(labeled_tensor, axis_order=None):
"""Verify that the given tensor has a consistent axis order.
Args:
labeled_tensor: The input tensor. All axes on this tensor must appear in
axis_order.
axis_order: Optional desired axis order, as a list of names. If not
provided, defaults to the current axis_order_scope (if set).
Raises:
AxisOrderError: If the axis_order is unavailable, inconsistent or does not
include all existing axes.
"""
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
if axis_order is None:
axis_order = _get_valid_axis_order()
relevant_axis_order = [a for a in axis_order if a in labeled_tensor.axes]
if len(relevant_axis_order) < len(labeled_tensor.axes):
raise AxisOrderError(
'not all axis names appear in the required axis order %r: %r' %
(axis_order, labeled_tensor))
if relevant_axis_order != list(labeled_tensor.axes):
raise AxisOrderError(
'axes on a labeled tensor do not appear in the same order as the '
'required axis order %r: %r' % (axis_order, labeled_tensor))
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, tc.Optional(tc.Collection(string_types)),
tc.Optional(string_types))
def impose_axis_order(labeled_tensor, axis_order=None, name=None):
"""Impose desired axis order on a labeled tensor.
Args:
labeled_tensor: The input tensor.
axis_order: Optional desired axis order, as a list of names. If not
provided, defaults to the current axis_order_scope (if set).
name: Optional op name.
Returns:
Labeled tensor with possibly transposed axes.
Raises:
AxisOrderError: If no axis_order is provided or axis_order does not contain
all axes on the input tensor.
"""
with ops.name_scope(name, 'lt_impose_axis_order', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
if axis_order is None:
axis_order = _get_valid_axis_order()
relevant_axis_order = [a for a in axis_order if a in labeled_tensor.axes]
return transpose(labeled_tensor, relevant_axis_order, name=scope)
@tc.returns(tc.Optional(list))
@tc.accepts(list, list)
def _find_consistent_ordering(a, b):
"""Find the left-most consistent ordering between two lists of unique items.
A consistent ordering combines all elements in both a and b while keeping all
elements in their original order in both inputs. The left-most consistent
ordering orders elements from `a` not found in `b` before elements in `b` not
found in `a`.
For example, given ['x', 'z'] and ['y', 'z'], both ['x', 'y', 'z'] and ['y',
'x', 'z'] are consistent orderings because each of the inputs appears in
each consistent ordering in the same order, and ['x', 'y', 'z'] is the
left-most, because 'x' appears only in `a` and 'y' appears only in `b`. In
contrast, there is no consistent ordering between ['x', 'y'] and ['y', 'x'].
Args:
a: list with unique elements.
b: list with unique elements.
Returns:
List containing all elements in either a or b, or None, if no consistent
ordering exists.
"""
a_set = set(a)
b_set = set(b)
i = 0
j = 0
ordering = []
while i < len(a) and j < len(b):
if a[i] not in b_set:
ordering.append(a[i])
i += 1
elif b[j] not in a_set:
ordering.append(b[j])
j += 1
elif a[i] == b[j]:
ordering.append(a[i])
i += 1
j += 1
else:
return None
ordering.extend(a[i:])
ordering.extend(b[j:])
return ordering
@tc.returns(LabeledTensor, LabeledTensor, Axes)
@tc.accepts(LabeledTensorLike, LabeledTensorLike, tc.Optional(string_types))
def align(labeled_tensor_0, labeled_tensor_1, name=None):
"""Align the axes of two tensors so they may be broadcast to each other.
Axes are ordered by the current axis order scope, if present, or by the left-
most consistent ordering. An exception is raised if it is impossible to align
the tensors without a transpose (align never copies the input data).
Example usage:
>>> a = lt.LabeledTensor(tf.ones((2, 4)), ['x', 'z'])
>>> b = lt.LabeledTensor(tf.ones((3, 4)), ['y', 'z'])
>>> a2, b2, axes = lt.align(a, b)
>>> a2
<LabeledTensor 'lt_align_1/lt_align_1/0:...' shape=(2, 1, 4) dtype=float32
axes=[('x', Dimension(2)),
('y', Dimension(1)),
('z', Dimension(4))]>
>>> b2
<LabeledTensor 'lt_align_1/lt_align_1/1:...' shape=(1, 3, 4) dtype=float32
axes=[('x', Dimension(1)),
('y', Dimension(3)),
('z', Dimension(4))]>
>>> axes
Axes([('x', Dimension(2)),
('y', Dimension(3)),
('z', Dimension(4))])
Args:
labeled_tensor_0: An input tensor.
labeled_tensor_1: An input tensor.
name: Optional op name.
Returns:
The aligned tensors and the axes the resulting tensor would have if the two
aligned tensors were broadcast to each other. The aligned tensors have the
same rank but not necessarily the same shape, with axes in the same order.
Raises:
ValueError: If axes with the same name on the inputs are not equal.
AxisOrderError: If there is no way to reshape the input tensors into the
output without a transpose.
"""
with ops.name_scope(name, 'lt_align',
[labeled_tensor_0, labeled_tensor_1]) as scope:
labeled_tensor_0 = convert_to_labeled_tensor(labeled_tensor_0)
labeled_tensor_1 = convert_to_labeled_tensor(labeled_tensor_1)
axes_0 = labeled_tensor_0.axes
axes_1 = labeled_tensor_1.axes
for axis_name in axes_0:
if axis_name in axes_1:
if axes_0[axis_name] != axes_1[axis_name]:
raise ValueError('Mismatched %r axis on input tensors: %r and %r' %
(axis_name, axes_0[axis_name], axes_1[axis_name]))
axis_scope_order = get_axis_order()
if axis_scope_order is not None:
# we are in an axis_order_scope
axis_names_set = set(axes_0) | set(axes_1)
new_axis_names = [a for a in axis_scope_order if a in axis_names_set]
check_axis_order(labeled_tensor_0, axis_scope_order)
check_axis_order(labeled_tensor_1, axis_scope_order)
else:
# attempt to find a consistent ordering
new_axis_names = _find_consistent_ordering(list(axes_0), list(axes_1))
if new_axis_names is None:
raise AxisOrderError(
'No consistent axis order allows for aligning tensors with axis '
'orders %r and %r without copying data. Use transpose or '
'impose_axis_order to reorder axes on one of more of the inputs.' %
(axes_0.keys(), axes_1.keys()))
labeled_tensor_0 = expand_dims(
labeled_tensor_0, new_axis_names, name=scope + '0')
labeled_tensor_1 = expand_dims(
labeled_tensor_1, new_axis_names, name=scope + '1')
broadcast_axes = []
for axis_name in new_axis_names:
if axis_name in axes_0:
broadcast_axes.append(axes_0[axis_name])
else:
broadcast_axes.append(axes_1[axis_name])
return labeled_tensor_0, labeled_tensor_1, Axes(broadcast_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_unary_op(op_name, elementwise_function):
"""Define a unary operation for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
elementwise_function: function to call to evaluate the op on a single
tf.Tensor object. This function must accept two arguments: a tf.Tensor
object, and an optional `name`.
Returns:
Function defining the given op that acts on LabeledTensors.
"""
default_name = 'lt_%s' % op_name
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, tc.Optional(string_types))
def op(labeled_tensor, name=None):
"""LabeledTensor version of `tf.{op_name}`.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: Input tensor.
name: Optional op name.
Returns:
A LabeledTensor with result of applying `tf.{op_name}` elementwise.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
result_tensor = elementwise_function(labeled_tensor.tensor, name=scope)
return LabeledTensor(result_tensor, labeled_tensor.axes)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
abs_function = define_unary_op('abs', math_ops.abs)
neg = define_unary_op('neg', math_ops.negative)
sign = define_unary_op('sign', math_ops.sign)
reciprocal = define_unary_op('reciprocal', math_ops.reciprocal)
square = define_unary_op('square', math_ops.square)
round_function = define_unary_op('round', math_ops.round)
sqrt = define_unary_op('sqrt', math_ops.sqrt)
rsqrt = define_unary_op('rsqrt', math_ops.rsqrt)
exp = define_unary_op('exp', math_ops.exp)
log = define_unary_op('log', math_ops.log)
ceil = define_unary_op('ceil', math_ops.ceil)
floor = define_unary_op('floor', math_ops.floor)
cos = define_unary_op('cos', math_ops.cos)
sin = define_unary_op('sin', math_ops.sin)
tan = define_unary_op('tan', math_ops.tan)
acos = define_unary_op('acos', math_ops.acos)
asin = define_unary_op('asin', math_ops.asin)
atan = define_unary_op('atan', math_ops.atan)
lgamma = define_unary_op('lgamma', math_ops.lgamma)
digamma = define_unary_op('digamma', math_ops.digamma)
erf = define_unary_op('erf', math_ops.erf)
erfc = define_unary_op('erfc', math_ops.erfc)
logical_not = define_unary_op('logical_not', math_ops.logical_not)
tanh = define_unary_op('tanh', math_ops.tanh)
sigmoid = define_unary_op('sigmoid', math_ops.sigmoid)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_binary_op(op_name, elementwise_function):
"""Define a binary operation that broadcasts labeled tensors.
Args:
op_name: string name of the TensorFlow op.
elementwise_function: function to call to evaluate the op on tf.Tensor
objects. This function must accept three arguments: two tf.Tensor objects,
and an optional `name`.
Returns:
Function defining the given op that acts on LabeledTensors.
"""
default_name = 'lt_%s' % op_name
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, LabeledTensorLike, tc.Optional(string_types))
def op(labeled_tensor_0, labeled_tensor_1, name=None):
"""LabeledTensor version of `tf.{op_name}` with label based alignment.
See `tf.{op_name}` for full details.
Args:
labeled_tensor_0: Input tensor.
labeled_tensor_1: Input tensor.
name: Optional op name.
Returns:
A LabeledTensor with result of applying `tf.{op_name}` elementwise.
"""
with ops.name_scope(name, default_name,
[labeled_tensor_0, labeled_tensor_1]) as scope:
align_0, align_1, broadcast_axes = align(labeled_tensor_0,
labeled_tensor_1)
tensor = elementwise_function(align_0.tensor, align_1.tensor, name=scope)
return LabeledTensor(tensor, broadcast_axes)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
add = define_binary_op('add', math_ops.add)
sub = define_binary_op('sub', math_ops.subtract)
mul = define_binary_op('mul', math_ops.multiply)
div = define_binary_op('div', math_ops.div)
mod = define_binary_op('mod', math_ops.mod)
pow_function = define_binary_op('pow', math_ops.pow)
equal = define_binary_op('equal', math_ops.equal)
greater = define_binary_op('greater', math_ops.greater)
greater_equal = define_binary_op('greater_equal', math_ops.greater_equal)
not_equal = define_binary_op('not_equal', math_ops.not_equal)
less = define_binary_op('less', math_ops.less)
less_equal = define_binary_op('less_equal', math_ops.less_equal)
logical_and = define_binary_op('logical_and', math_ops.logical_and)
logical_or = define_binary_op('logical_or', math_ops.logical_or)
logical_xor = define_binary_op('logical_xor', math_ops.logical_xor)
maximum = define_binary_op('maximum', math_ops.maximum)
minimum = define_binary_op('minimum', math_ops.minimum)
squared_difference = define_binary_op('squared_difference',
math_ops.squared_difference)
igamma = define_binary_op('igamma', math_ops.igamma)
igammac = define_binary_op('igammac', math_ops.igammac)
zeta = define_binary_op('zeta', math_ops.zeta)
polygamma = define_binary_op('polygamma', math_ops.polygamma)
|
tensorflow-master
|
tensorflow/contrib/labeled_tensor/python/ops/core.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for writing tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class Base(test.TestCase):
"""A class with some useful methods for testing."""
def eval(self, tensors):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
try:
results = sess.run(tensors)
finally:
coord.request_stop()
coord.join(threads)
return results
def assertTensorsEqual(self, tensor_0, tensor_1):
[tensor_0_eval, tensor_1_eval] = self.eval([tensor_0, tensor_1])
self.assertAllEqual(tensor_0_eval, tensor_1_eval)
def assertLabeledTensorsEqual(self, tensor_0, tensor_1):
self.assertEqual(tensor_0.axes, tensor_1.axes)
self.assertTensorsEqual(tensor_0.tensor, tensor_1.tensor)
|
tensorflow-master
|
tensorflow/contrib/labeled_tensor/python/ops/test_util.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input parsing code for LabeledTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
class FixedLenFeature(object):
"""Configuration for parsing a fixed-length input feature.
Fields:
axes: A list of Axis objects or tuples (axis_name, axis_value),
where `axis_name` is a string and `axis_value` is None (unknown size), an
integer or a list of tick labels.
dtype: Data type of input.
default_value: Value to be used if an example is missing this feature. It
must be compatible with `dtype`.
"""
def __init__(self, axes, dtype, default_value=None):
self._axes = [core.as_axis(a) for a in axes]
self._dtype = dtype
self._default_value = default_value
@property
def axes(self):
return self._axes
@property
def dtype(self):
return self._dtype
@property
def default_value(self):
return self._default_value
@tc.returns(tc.Dict(string_types, parsing_ops.FixedLenFeature))
@tc.accepts(tc.Mapping(string_types, FixedLenFeature))
def _labeled_to_unlabeled_features(features):
"""Convert a dict of lt.FixedLenFeature into a dict of tf.FixedLenFeature."""
unlabeled_features = {}
for name, labeled_feature in features.items():
shape = [ax.size for ax in labeled_feature.axes]
if any(size is None for size in shape):
# This should be caught on the TensorFlow side, but it isn't yet:
# https://github.com/tensorflow/tensorflow/issues/2874
raise ValueError('axes with unknown size are not supported')
dtype = labeled_feature.dtype
default_value = labeled_feature.default_value
unlabeled_features[name] = parsing_ops.FixedLenFeature(
shape, dtype, default_value)
return unlabeled_features
@tc.returns(tc.Dict(string_types, core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike, tc.Mapping(string_types, FixedLenFeature),
tc.Optional(string_types), object)
def parse_example(serialized, features, name=None, example_names=None):
"""Parse `Example` protos into a `dict` of labeled tensors.
See tf.parse_example.
Args:
serialized: A 1-D LabeledTensor of strings, a batch of binary serialized
`Example` protos.
features: A `dict` mapping feature keys to `labeled_tensor.FixedLenFeature`
values.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping feature keys to `LabeledTensor` values. The single axis
from `serialized` will be prepended to the axes provided by each feature.
Raises:
ValueError: if any feature is invalid.
"""
serialized = core.convert_to_labeled_tensor(serialized)
unlabeled_features = _labeled_to_unlabeled_features(features)
unlabeled_parsed = parsing_ops.parse_example(
serialized.tensor, unlabeled_features, name, example_names)
parsed = {}
for name, parsed_feature in unlabeled_parsed.items():
axes = list(serialized.axes.values()) + features[name].axes
parsed[name] = core.LabeledTensor(parsed_feature, axes)
return parsed
@tc.returns(tc.Dict(string_types, core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike, tc.Mapping(string_types, FixedLenFeature),
tc.Optional(string_types), object)
def parse_single_example(serialized, features, name=None, example_names=None):
"""Parses a single `Example` proto.
See tf.parse_single_example.
Args:
serialized: A scalar string Tensor or LabeledTensor, a single serialized
Example.
features: A `dict` mapping feature keys to `labeled_tensor.FixedLenFeature`
values.
name: A name for this operation (optional).
example_names: (Optional) A scalar string Tensor, the associated name.
Returns:
A `dict` mapping feature keys to `LabeledTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
serialized = core.convert_to_labeled_tensor(serialized)
unlabeled_features = _labeled_to_unlabeled_features(features)
unlabeled_parsed = parsing_ops.parse_single_example(
serialized.tensor, unlabeled_features, name, example_names)
parsed = {}
for name, parsed_feature in unlabeled_parsed.items():
parsed[name] = core.LabeledTensor(parsed_feature, features[name].axes)
return parsed
@tc.returns(core.LabeledTensor)
@tc.accepts(dtypes.DType, tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def placeholder(dtype, axes, name=None):
"""Create a placeholder for a labeled tensor.
For example:
lt.placeholder(tf.float32, ['batch', ('channel', ['r', 'g', 'b'])])
See tf.compat.v1.placeholder for more details.
Args:
dtype: The type of elements in the tensor to be fed.
axes: sequence of strings (denoting axes of unknown size) and/or objects
convertable to lt.Axis to label the result.
name: Optional op name.
Returns:
Placeholder labeled tensor.
"""
with ops.name_scope(name, 'lt_placeholder', []) as scope:
axes = core.Axes([(axis, None) if isinstance(axis, string_types) else axis
for axis in axes])
shape = [axis.size for axis in axes.values()]
tensor = array_ops.placeholder(dtype, shape, name=scope)
return core.LabeledTensor(tensor, axes)
|
tensorflow-master
|
tensorflow/contrib/labeled_tensor/python/ops/io_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import re
import textwrap
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import test_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test as test_lib
class AxisTest(test_lib.TestCase):
def setUp(self):
d_7 = tensor_shape.Dimension(7)
p_rgb = ['red', 'green', 'blue']
self.i_7 = core.Axis('7', d_7)
self.i_7p = core.Axis('7prime', d_7)
self.i_rgb = core.Axis('rgb', p_rgb)
self.i_range = core.Axis('range', range(7))
self.i_unknown = core.Axis('unknown', None)
def test_equality(self):
axes = [self.i_7, self.i_7p, self.i_rgb, self.i_range, self.i_unknown]
for i, axis_0 in enumerate(axes):
for j, axis_1 in enumerate(axes):
if i == j:
self.assertEqual(axis_0, axis_1)
else:
self.assertNotEqual(axis_0, axis_1)
def test_axis_value(self):
self.assertEqual(self.i_7.value, tensor_shape.Dimension(7))
self.assertTrue(self.i_range.value == tuple(range(7)))
def test_axis_input(self):
axes = [self.i_7, self.i_7p, self.i_rgb, self.i_range, self.i_unknown]
for axis in axes:
self.assertEqual(axis, core.Axis(axis.name, axis.value))
def test_axis_value_input(self):
axis = self.i_range
for value in [range(7), list(range(7)), np.arange(7)]:
self.assertEqual(axis, core.Axis(axis.name, value))
def test_size(self):
self.assertEqual(len(self.i_7), 7)
self.assertEqual(len(self.i_rgb), 3)
self.assertEqual(len(self.i_range), 7)
self.assertEqual(self.i_unknown.size, None)
def test_concat_single(self):
red = core.Axis('rgb', ['red'])
self.assertEqual(core.concat_axes([red]), red)
def test_concat_many(self):
red = core.Axis('rgb', ['red'])
green = core.Axis('rgb', ['green'])
blue = core.Axis('rgb', ['blue'])
red_green_blue = core.Axis('rgb', ['red', 'green', 'blue'])
self.assertEqual(core.concat_axes([red, green, blue]), red_green_blue)
def test_concat_different_names(self):
red = core.Axis('red', ['red'])
green = core.Axis('green', ['red'])
with self.assertRaises(ValueError):
core.concat_axes([red, green])
def test_concat_unknown(self):
red = core.Axis('rgb', None)
green = core.Axis('rgb', None)
self.assertEqual(core.concat_axes([red, green]), red)
def test_repr(self):
self.assertEqual("Axis('7', Dimension(7))", repr(self.i_7))
def test_invalid_input(self):
with self.assertRaises(TypeError):
core.Axis('foo', [{}])
with self.assertRaises(ValueError):
core.Axis('foo', [1, 2, 3, 1])
red = core.Axis('foo', ['red'])
with self.assertRaises(tc.Error):
core.concat_axes([red, 1])
def test_as_axis(self):
self.assertEqual(self.i_7, core.as_axis(('7', 7)))
self.assertEqual(self.i_7, core.as_axis(self.i_7))
class AxesTest(test_lib.TestCase):
def setUp(self):
d_7 = tensor_shape.Dimension(7)
d_8 = tensor_shape.Dimension(8)
p_rgb = ['red', 'green', 'blue']
p_range = range(7)
self.i_8 = core.Axis('8', d_8)
self.a0 = core.Axes([('d7', d_7)])
self.a1 = core.Axes([('d7', d_7)])
self.a2 = core.Axes([('d7', d_7), ('rgb', p_rgb)])
self.a3 = core.Axes([('8', d_8), ('range', p_range)])
def test_equality(self):
self.assertEqual(self.a0, self.a0)
self.assertEqual(self.a0, self.a1)
self.assertNotEqual(self.a0, self.a2)
def test_repr(self):
self.assertEqual("Axes([('d7', Dimension(7))])", repr(self.a0))
def test_remove(self):
a = self.a3.remove('range')
self.assertEqual(a, core.Axes([self.i_8]))
with self.assertRaises(KeyError):
self.a3.remove('foobar')
def test_typecheck_error_message(self):
pattern = ('List(Union(labeled_tensor.Axis, Tuple(..., '
'Union(Union(numpy.ndarray, %s, list, tuple), '
'Optional(Union(tensorflow.Dimension, int))))))' %
range.__name__)
regexp = re.escape(pattern).replace(re.escape('...'), '.*')
with self.assertRaisesRegexp(tc.Error, 'allowed type ' + regexp):
core.Axes(None)
class LabeledTensorTest(test_util.Base):
def setUp(self):
tensor = array_ops.ones([7, 3, 8, 1])
a0 = ('x', range(7))
a1 = ('channel', ['red', 'green', 'blue'])
a2 = ('y', 8)
a3 = ('z', tensor_shape.Dimension(1))
self.lt = core.LabeledTensor(tensor, [a0, a1, a2, a3])
def test_repr(self):
pattern = textwrap.dedent("""\
<LabeledTensor '...' shape=(7, 3, 8, 1) dtype=float32
axes=[('x', ...),
('channel', ...),
('y', Dimension(8)),
('z', Dimension(1))]>""")
regexp = re.escape(pattern).replace(re.escape('...'), '.*')
self.assertRegexpMatches(repr(self.lt), regexp)
def test_reuse_existing_axes(self):
alt_lt = core.LabeledTensor(self.lt.tensor, self.lt.axes)
self.assertLabeledTensorsEqual(alt_lt, self.lt)
def test_reuse_existing_axis_objects(self):
alt_lt = core.LabeledTensor(self.lt.tensor, self.lt.axes.values())
self.assertLabeledTensorsEqual(alt_lt, self.lt)
def test_indexing_scalars(self):
actual = self.lt[:, :, :, 0]
expected = core.LabeledTensor(self.lt.tensor[:, :, :, 0],
list(self.lt.axes.values())[:-1])
self.assertLabeledTensorsEqual(actual, expected)
actual = self.lt[1, :, :, 0]
expected = core.LabeledTensor(self.lt.tensor[1, :, :, 0],
list(self.lt.axes.values())[1:-1])
self.assertLabeledTensorsEqual(actual, expected)
actual = self.lt[1, 2, :, 0]
expected = core.LabeledTensor(self.lt.tensor[1, 2, :, 0],
list(self.lt.axes.values())[2:-1])
self.assertLabeledTensorsEqual(actual, expected)
def test_indexing_1d(self):
lt_1d = self.lt[1, 2, :, 0]
actual = lt_1d[3]
expected = core.LabeledTensor(lt_1d.tensor[3], [])
self.assertLabeledTensorsEqual(actual, expected)
def test_indexing_slices(self):
actual = self.lt[:3, :, :, :]
axes = [('x', range(3))] + list(self.lt.axes.values())[1:]
expected = core.LabeledTensor(self.lt.tensor[:3, :, :, :], axes)
self.assertLabeledTensorsEqual(actual, expected)
def test_invalid_indexing(self):
with self.assertRaises(ValueError):
self.lt[0] # pylint: disable=pointless-statement
with self.assertRaises(ValueError):
self.lt[:, :, :, :, 0] # pylint: disable=pointless-statement
def test_unknown_size(self):
tensor = array_ops.placeholder(dtypes.string, [None])
actual = core.LabeledTensor(tensor, ['x'])
self.assertIsNone(actual.axes['x'].size)
self.assertIsNone(actual.axes['x'].value.value)
def test_eq(self):
self.assertEqual(self.lt, self.lt)
self.assertNotEqual(self.lt, self.lt.tensor)
self.assertNotEqual(self.lt.tensor, self.lt)
def test_hash(self):
lt1 = self.lt
lt2 = core.LabeledTensor(self.lt.tensor, self.lt.axes)
self.assertEqual(lt1, lt2)
self.assertEqual(hash(lt1), hash(lt2))
def test_name(self):
self.assertEqual(self.lt.name, self.lt.tensor.name)
def test_dtype(self):
self.assertEqual(self.lt.dtype, self.lt.tensor.dtype)
def test_shape(self):
self.assertEqual(self.lt.shape, self.lt.tensor.shape)
def test_get_shape(self):
self.assertEqual(self.lt.get_shape(), self.lt.tensor.get_shape())
def test_convert_to_tensor(self):
expected = self.lt.tensor
actual = ops.convert_to_tensor(self.lt)
self.assertIs(expected, actual)
class Base(test_util.Base):
def setUp(self):
self.x_size = 7
self.channel_size = 3
self.z_size = 4
self.probs_size = 11
tensor = math_ops.range(0, self.x_size * self.channel_size * self.z_size *
self.probs_size)
tensor = array_ops.reshape(
tensor, [self.x_size, self.channel_size, self.z_size, self.probs_size])
a0 = ('x', range(self.x_size))
a1 = ('channel', ['red', 'green', 'blue'])
a2 = 'z'
a3 = ('probs', np.linspace(0.0, 1.0, self.probs_size))
self.tensor = tensor
self.a0 = a0
self.a1 = a1
self.a2 = a2
self.a3 = a3
self.original_lt = core.LabeledTensor(tensor, [a0, a1, a2, a3])
self.x_probs_lt = core.slice_function(self.original_lt,
{'z': 0,
'channel': 0})
self.channel_probs_lt = core.slice_function(self.original_lt,
{'x': 3,
'z': 0})
class IdentityTest(Base):
def test_name(self):
identity_lt = core.identity(self.original_lt)
self.assertIn('lt_identity', identity_lt.name)
class SliceFunctionTest(Base):
def test_name(self):
select_lt = core.slice_function(self.original_lt, {'channel': 1})
self.assertIn('lt_slice', select_lt.name)
def test_scalar(self):
select_lt = core.slice_function(self.original_lt, {'channel': 1})
golden_lt = core.LabeledTensor(self.tensor[:, 1, :, :],
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slice(self):
select_lt = core.slice_function(self.original_lt, {'channel': slice(0, 2)})
a1_sliced = ('channel', ['red', 'green'])
golden_lt = core.LabeledTensor(self.tensor[:, :2, :, :],
[self.a0, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slices(self):
select_lt = core.slice_function(
self.original_lt, {'x': slice(1, 5),
'channel': slice(1, None)})
a0_sliced = ('x', range(1, 5))
a1_sliced = ('channel', ['green', 'blue'])
golden_lt = core.LabeledTensor(self.tensor[1:5, 1:, :, :],
[a0_sliced, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slice_unlabeled(self):
select_lt = core.slice_function(self.original_lt, {'z': slice(1, 3)})
a2_sliced = 'z'
golden_lt = core.LabeledTensor(self.tensor[:, :, 1:3, :],
[self.a0, self.a1, a2_sliced, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slice_unknown_shape(self):
lt = core.LabeledTensor(
array_ops.placeholder(dtypes.float32, [None, 1]), ['x', 'y'])
sliced_lt = core.slice_function(lt, {'y': 0})
self.assertEqual(list(sliced_lt.axes.values()), [lt.axes['x']])
class TransposeTest(Base):
def test_name(self):
transpose_lt = core.transpose(self.original_lt,
self.original_lt.axes.keys())
self.assertIn('lt_transpose', transpose_lt.name)
def test_identity(self):
transpose_lt = core.transpose(self.original_lt,
self.original_lt.axes.keys())
golden_lt = self.original_lt
self.assertLabeledTensorsEqual(transpose_lt, golden_lt)
def test(self):
transpose_lt = core.transpose(self.original_lt,
['z', 'channel', 'x', 'probs'])
golden_lt = core.LabeledTensor(
array_ops.transpose(self.tensor, [2, 1, 0, 3]),
[self.a2, self.a1, self.a0, self.a3])
self.assertLabeledTensorsEqual(transpose_lt, golden_lt)
def test_default_axis_order(self):
transpose_lt = core.transpose(self.original_lt)
golden_lt = core.LabeledTensor(
array_ops.transpose(self.tensor, [3, 2, 1, 0]),
list(reversed(list(self.original_lt.axes.values()))))
self.assertLabeledTensorsEqual(transpose_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
core.transpose(self.original_lt, ['channel', 'x', 'probs'])
with self.assertRaises(ValueError):
core.transpose(self.original_lt, ['z', 'foo', 'x', 'probs'])
class ExpandDimsTest(Base):
def test_name(self):
expand_lt = core.expand_dims(self.original_lt, self.original_lt.axes.keys())
self.assertIn('lt_expand', expand_lt.name)
def test_identity(self):
expand_lt = core.expand_dims(self.original_lt, self.original_lt.axes.keys())
golden_lt = self.original_lt
self.assertLabeledTensorsEqual(expand_lt, golden_lt)
def test(self):
expand_lt = core.expand_dims(
self.original_lt, ['foo', 'x', 'bar', 'channel', 'z', 'probs', 'grok'])
golden_lt = core.LabeledTensor(
array_ops.reshape(self.tensor, [
1, self.x_size, 1, self.channel_size, self.z_size, self.probs_size,
1
]), ['foo', self.a0, 'bar', self.a1, self.a2, self.a3, 'grok'])
self.assertLabeledTensorsEqual(expand_lt, golden_lt)
def test_label(self):
expand_lt = core.expand_dims(self.original_lt, [
'x',
'channel',
('foo', 'bar'),
'z',
'probs',
])
golden_lt = core.LabeledTensor(
array_ops.reshape(
self.tensor,
[self.x_size, self.channel_size, 1, self.z_size, self.probs_size]),
[self.a0, self.a1, ('foo', ['bar']), self.a2, self.a3])
self.assertLabeledTensorsEqual(expand_lt, golden_lt)
def test_unknown_dimension(self):
orig_lt = core.LabeledTensor(
array_ops.placeholder(dtypes.float32, [None]), ['x'])
expand_lt = core.expand_dims(orig_lt, ['x', 'y'])
self.assertEqual(expand_lt.axes, core.Axes([('x', None), ('y', 1)]))
def test_invalid_input(self):
with self.assertRaises(core.AxisOrderError):
core.expand_dims(self.original_lt,
['foo', 'not_x', 'bar', 'channel', 'z', 'probs', 'grok'])
with self.assertRaises(core.AxisOrderError):
core.expand_dims(self.original_lt,
['foo', 'z', 'bar', 'channel', 'x', 'probs', 'grok'])
class AxisOrderScopeTest(Base):
def test(self):
xyz = ['x', 'y', 'z']
abc = ['a', 'b', 'c']
self.assertIsNone(core.get_axis_order())
with core.axis_order_scope(xyz):
self.assertEqual(core.get_axis_order(), xyz)
with core.axis_order_scope():
self.assertIsNone(core.get_axis_order())
with core.axis_order_scope(abc):
self.assertEqual(core.get_axis_order(), abc)
self.assertIsNone(core.get_axis_order())
self.assertEqual(core.get_axis_order(), xyz)
self.assertIsNone(core.get_axis_order())
class CheckAxisOrderTest(Base):
def test_passes(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(array_ops.ones((1, 1, 1, 1)), axis_order)
core.check_axis_order(lt, axis_order)
lt = core.LabeledTensor(array_ops.ones((1, 1, 1)), axis_order[1:])
core.check_axis_order(lt, axis_order)
lt = core.LabeledTensor(array_ops.ones((1, 1, 1)), axis_order[:-1])
core.check_axis_order(lt, axis_order)
def test_invalid(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(array_ops.ones((1, 1, 1, 1)), axis_order)
with self.assertRaises(core.AxisOrderError):
core.check_axis_order(lt)
with self.assertRaises(core.AxisOrderError):
core.check_axis_order(lt, axis_order[:-1])
with self.assertRaises(core.AxisOrderError):
core.check_axis_order(lt, axis_order[::-1])
def test_scope(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(array_ops.ones((1, 1, 1, 1)), axis_order)
with core.axis_order_scope(axis_order):
core.check_axis_order(lt)
class ImposeAxisOrderTest(Base):
def test_identity(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(24), (1, 2, 3, 4)), axis_order)
actual = core.impose_axis_order(lt, axis_order)
self.assertLabeledTensorsEqual(lt, actual)
lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(6), (1, 2, 3)), axis_order[:3])
actual = core.impose_axis_order(lt, axis_order)
self.assertLabeledTensorsEqual(lt, actual)
def test_reverse(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(24), (1, 2, 3, 4)), axis_order)
actual = core.impose_axis_order(lt, axis_order[::-1])
expected = core.transpose(lt, axis_order[::-1])
self.assertLabeledTensorsEqual(expected, actual)
lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(6), (1, 2, 3)), axis_order[:3])
actual = core.impose_axis_order(lt, axis_order[::-1])
expected = core.transpose(lt, ['y', 'x', 'w'])
self.assertLabeledTensorsEqual(expected, actual)
def test_scope(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(24), (1, 2, 3, 4)), axis_order)
expected = core.transpose(lt, axis_order[::-1])
with core.axis_order_scope(axis_order[::-1]):
actual = core.impose_axis_order(lt)
self.assertLabeledTensorsEqual(expected, actual)
def test_invalid(self):
lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(2), (1, 2)), ['x', 'y'])
with self.assertRaises(ValueError):
core.impose_axis_order(lt)
with self.assertRaises(ValueError):
core.impose_axis_order(lt, ['x'])
class FindConsistentOrderingTest(Base):
def test(self):
cases = [
([], [], []),
(['x'], [], ['x']),
([], ['x'], ['x']),
(['x'], ['x'], ['x']),
(['x'], ['y'], ['x', 'y']),
(['y'], ['x'], ['y', 'x']),
(['x', 'y'], ['x', 'y'], ['x', 'y']),
(['x', 'y'], ['y', 'x'], None),
(['x', 'y'], ['y', 'z'], ['x', 'y', 'z']),
(['x', 'z'], ['y', 'z'], ['x', 'y', 'z']),
(['x', 'y'], ['x', 'z'], ['x', 'y', 'z']),
(['w', 'x'], ['y', 'z'], ['w', 'x', 'y', 'z']),
(['x', 'y', 'z'], ['z', 'x'], None),
(['x', 'y', 'z'], ['x'], ['x', 'y', 'z']),
([], ['x', 'y', 'z'], ['x', 'y', 'z']),
]
for a, b, expected in cases:
actual = core._find_consistent_ordering(a, b)
msg = ('unexpected ordering between %r and %r:\nexpected: %r\nactual: %r'
% (a, b, expected, actual))
self.assertEqual(expected, actual, msg=msg)
class AlignTest(Base):
def test_name(self):
align_lt_0, align_lt_1, _ = core.align(self.original_lt, self.original_lt)
self.assertIn('lt_align', align_lt_0.name)
self.assertIn('/0', align_lt_0.name)
self.assertIn('lt_align', align_lt_1.name)
self.assertIn('/1', align_lt_1.name)
def test_identical_shaped_inputs(self):
offset_tensor = self.original_lt.tensor + 1
offset_lt = core.LabeledTensor(offset_tensor, self.original_lt.axes)
align_lt, align_offset_lt, broadcast_axes = core.align(self.original_lt,
offset_lt)
self.assertLabeledTensorsEqual(align_lt, self.original_lt)
self.assertLabeledTensorsEqual(align_offset_lt, offset_lt)
self.assertEqual(broadcast_axes, self.original_lt.axes)
def test_different_inputs(self):
# The correct axis ordering is ['x', 'channel', 'probs'].
align_x_probs_lt, align_channel_probs_lt, broadcast_axes = core.align(
self.x_probs_lt, self.channel_probs_lt)
x_probs_golden_lt = core.LabeledTensor(
array_ops.reshape(self.x_probs_lt.tensor,
[self.x_size, 1, self.probs_size]),
[self.a0, 'channel', self.a3])
self.assertLabeledTensorsEqual(align_x_probs_lt, x_probs_golden_lt)
channel_probs_golden_lt = core.LabeledTensor(
array_ops.reshape(self.channel_probs_lt.tensor,
[1, self.channel_size, self.probs_size]),
['x', self.a1, self.a3])
self.assertLabeledTensorsEqual(align_channel_probs_lt,
channel_probs_golden_lt)
self.assertEqual(broadcast_axes, core.Axes([self.a0, self.a1, self.a3]))
def test_axis_order_scope(self):
xz_lt = core.LabeledTensor(array_ops.ones((2, 3)), ['x', 'z'])
yz_lt = core.LabeledTensor(array_ops.ones((4, 3)), ['y', 'z'])
_, _, broadcast_axes = core.align(xz_lt, yz_lt)
self.assertEqual(list(broadcast_axes.keys()), ['x', 'y', 'z'])
_, _, broadcast_axes = core.align(yz_lt, xz_lt)
self.assertEqual(list(broadcast_axes.keys()), ['y', 'x', 'z'])
with core.axis_order_scope(['x', 'y', 'z']):
_, _, broadcast_axes = core.align(yz_lt, xz_lt)
self.assertEqual(list(broadcast_axes.keys()), ['x', 'y', 'z'])
with core.axis_order_scope(['x', 'y']):
with self.assertRaises(core.AxisOrderError):
core.align(xz_lt, yz_lt)
with self.assertRaises(core.AxisOrderError):
core.align(yz_lt, xz_lt)
def test_invalid_input(self):
lt_0 = core.LabeledTensor(array_ops.zeros([5]), [('a', range(5))])
lt_1 = core.LabeledTensor(array_ops.zeros([5]), [('a', range(1, 6))])
with self.assertRaises(ValueError):
core.align(lt_0, lt_1)
class ConvertToLabeledTensorTest(Base):
# TODO(shoyer): Simplify these tests once we can reuse labeled tensors in
# assertLabeledTensorsEqual.
def test_labeled_tensor(self):
actual = core.convert_to_labeled_tensor(self.original_lt)
self.assertLabeledTensorsEqual(actual, self.original_lt)
def test_python_scalar(self):
actual = core.convert_to_labeled_tensor(42)
golden_lt = core.LabeledTensor(ops.convert_to_tensor(42), [])
self.assertLabeledTensorsEqual(actual, golden_lt)
def test_numpy_array(self):
actual = core.convert_to_labeled_tensor(np.array(42))
golden_lt = core.LabeledTensor(ops.convert_to_tensor(42), [])
self.assertLabeledTensorsEqual(actual, golden_lt)
def test_tensor(self):
actual = core.convert_to_labeled_tensor(constant_op.constant(42))
golden_lt = core.LabeledTensor(ops.convert_to_tensor(42), [])
self.assertLabeledTensorsEqual(actual, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
core.convert_to_labeled_tensor(math_ops.range(5))
with self.assertRaises(ValueError):
core.convert_to_labeled_tensor(np.array([1, 2]))
class DocStringCheckMixin(object):
# requires self.ops to be defined
def test_function_docstring_and_name(self):
for op_name, _, _, lt_op in self.ops:
if lt_op is not None:
self.assertIn('tf.%s' % op_name, lt_op.__doc__)
self.assertEqual(op_name, lt_op.__name__)
class UnaryOpsTestsMixin(object):
# requires self.ops and self.test_lt to be defined
def test_core_op(self):
for op_name, _, tf_op, lt_op in self.ops:
if tf_op is not None:
golden_lt = core.LabeledTensor(
tf_op(self.test_lt.tensor), self.test_lt.axes)
actual_lt = lt_op(self.test_lt)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(golden_lt, actual_lt)
def test_infix(self):
for op_name, infix_op, _, _ in self.ops:
if infix_op is not None:
expected_lt = core.LabeledTensor(
infix_op(self.test_lt.tensor), self.test_lt.axes)
actual_lt = infix_op(self.test_lt)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(expected_lt, actual_lt)
class CoreUnaryOpsTest(Base, DocStringCheckMixin, UnaryOpsTestsMixin):
def setUp(self):
super(CoreUnaryOpsTest, self).setUp()
self.ops = [
('abs', operator.abs, math_ops.abs, core.abs_function),
('neg', operator.neg, math_ops.negative, core.neg),
# TODO(shoyer): add unary + to core TensorFlow
('pos', None, None, None),
('sign', None, math_ops.sign, core.sign),
('reciprocal', None, math_ops.reciprocal, core.reciprocal),
('square', None, math_ops.square, core.square),
('round', None, math_ops.round, core.round_function),
('sqrt', None, math_ops.sqrt, core.sqrt),
('rsqrt', None, math_ops.rsqrt, core.rsqrt),
('log', None, math_ops.log, core.log),
('exp', None, math_ops.exp, core.exp),
('log', None, math_ops.log, core.log),
('ceil', None, math_ops.ceil, core.ceil),
('floor', None, math_ops.floor, core.floor),
('cos', None, math_ops.cos, core.cos),
('sin', None, math_ops.sin, core.sin),
('tan', None, math_ops.tan, core.tan),
('acos', None, math_ops.acos, core.acos),
('asin', None, math_ops.asin, core.asin),
('atan', None, math_ops.atan, core.atan),
('lgamma', None, math_ops.lgamma, core.lgamma),
('digamma', None, math_ops.digamma, core.digamma),
('erf', None, math_ops.erf, core.erf),
('erfc', None, math_ops.erfc, core.erfc),
('lgamma', None, math_ops.lgamma, core.lgamma),
]
total_size = np.prod([v.size for v in self.original_lt.axes.values()])
self.test_lt = core.LabeledTensor(
math_ops.cast(self.original_lt, dtypes.float32) / total_size,
self.original_lt.axes)
class LogicalNotTest(Base, DocStringCheckMixin, UnaryOpsTestsMixin):
def setUp(self):
super(LogicalNotTest, self).setUp()
self.ops = [('logical_not', operator.invert, math_ops.logical_not,
core.logical_not),]
self.test_lt = self.original_lt < 10
class BinaryOpsTestsMixin(object):
# requires self.ops, self.test_lt_1, self.test_lt_2, self.test_lt_1_broadcast
# and self.test_lt_2_broadcast to be defined
def test_core_op(self):
for op_name, _, tf_op, lt_op in self.ops:
golden_tensor = tf_op(self.test_lt_1_broadcast, self.test_lt_2_broadcast)
golden_lt = core.LabeledTensor(golden_tensor, self.broadcast_axes)
actual_lt = lt_op(self.test_lt_1, self.test_lt_2)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(golden_lt, actual_lt)
def test_infix(self):
for op_name, infix_op, _, lt_op in self.ops:
if infix_op is not None:
expected_lt = lt_op(self.test_lt_1, self.test_lt_2)
actual_lt = infix_op(self.test_lt_1, self.test_lt_2)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(expected_lt, actual_lt)
class CoreBinaryOpsTest(Base, DocStringCheckMixin, BinaryOpsTestsMixin):
def setUp(self):
super(CoreBinaryOpsTest, self).setUp()
self.x_probs_broadcast_tensor = array_ops.reshape(
self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])
self.channel_probs_broadcast_tensor = array_ops.reshape(
self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])
# == and != are not element-wise for tf.Tensor, so they shouldn't be
# elementwise for LabeledTensor, either.
self.ops = [
('add', operator.add, math_ops.add, core.add),
('sub', operator.sub, math_ops.subtract, core.sub),
('mul', operator.mul, math_ops.multiply, core.mul),
('div', operator.truediv, math_ops.div, core.div),
('mod', operator.mod, math_ops.mod, core.mod),
('pow', operator.pow, math_ops.pow, core.pow_function),
('equal', None, math_ops.equal, core.equal),
('less', operator.lt, math_ops.less, core.less),
('less_equal', operator.le, math_ops.less_equal, core.less_equal),
('not_equal', None, math_ops.not_equal, core.not_equal),
('greater', operator.gt, math_ops.greater, core.greater),
('greater_equal', operator.ge, math_ops.greater_equal,
core.greater_equal),
]
self.test_lt_1 = self.x_probs_lt
self.test_lt_2 = self.channel_probs_lt
self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
self.broadcast_axes = [self.a0, self.a1, self.a3]
def test_reflexive(self):
labeled_tensor = self.x_probs_lt + 1 # all elements must be >0 for division
for op_name, infix_op, _, lt_op in self.ops:
if infix_op is not None:
expected_lt = lt_op(2, labeled_tensor)
actual_lt = infix_op(2, labeled_tensor)
# Python uses greater for the reflexive version of less (and vise-versa)
if 'less' in op_name:
op_name = op_name.replace('less', 'greater')
elif 'greater' in op_name:
op_name = op_name.replace('greater', 'less')
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(expected_lt, actual_lt)
class LogicalBinaryOpsTest(Base, DocStringCheckMixin, BinaryOpsTestsMixin):
def setUp(self):
super(LogicalBinaryOpsTest, self).setUp()
self.ops = [
('logical_and', operator.and_, math_ops.logical_and, core.logical_and),
('logical_or', operator.or_, math_ops.logical_or, core.logical_or),
('logical_xor', operator.xor, math_ops.logical_xor, core.logical_xor),
]
self.test_lt_1 = self.original_lt < 10
self.test_lt_2 = self.original_lt < 5
self.test_lt_1_broadcast = self.test_lt_1.tensor
self.test_lt_2_broadcast = self.test_lt_2.tensor
self.broadcast_axes = self.test_lt_1.axes
class FloatBinaryOpsTest(Base, DocStringCheckMixin, BinaryOpsTestsMixin):
def setUp(self):
super(FloatBinaryOpsTest, self).setUp()
self.ops = [
('igamma', None, math_ops.igamma, core.igamma),
('igammac', None, math_ops.igammac, core.igammac),
('zeta', None, math_ops.zeta, core.zeta),
('polygamma', None, math_ops.polygamma, core.polygamma),
('maximum', None, math_ops.maximum, core.maximum),
('minimum', None, math_ops.minimum, core.minimum),
('squared_difference', None, math_ops.squared_difference,
core.squared_difference),
]
total_size = np.prod([v.size for v in self.original_lt.axes.values()])
test_lt = core.LabeledTensor(
math_ops.cast(self.original_lt, dtypes.float32) / total_size,
self.original_lt.axes)
self.test_lt_1 = test_lt
self.test_lt_2 = 1.0 - test_lt
self.test_lt_1_broadcast = self.test_lt_1.tensor
self.test_lt_2_broadcast = self.test_lt_2.tensor
self.broadcast_axes = self.test_lt_1.axes
if __name__ == '__main__':
test_lib.main()
|
tensorflow-master
|
tensorflow/contrib/labeled_tensor/python/ops/core_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import nn
from tensorflow.contrib.labeled_tensor.python.ops import test_util
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
class NNTests(test_util.Base):
def setUp(self):
super(NNTests, self).setUp()
self.axes = ['x']
self.original_lt = core.LabeledTensor([0.0, 0.5, 1.0], self.axes)
self.other_lt = 1 - self.original_lt
def test_unary_ops(self):
ops = [
('relu', nn_ops.relu, nn.relu),
('relu6', nn_ops.relu6, nn.relu6),
('crelu', nn_ops.crelu, nn.crelu),
('elu', nn_ops.elu, nn.elu),
('softplus', nn_ops.softplus, nn.softplus),
('l2_loss', nn_ops.l2_loss, nn.l2_loss),
('softmax', nn_ops.softmax, nn.softmax),
('log_softmax', nn_ops.log_softmax, nn.log_softmax),
]
for op_name, tf_op, lt_op in ops:
golden_tensor = tf_op(self.original_lt.tensor)
golden_lt = core.LabeledTensor(golden_tensor, self.axes)
actual_lt = lt_op(self.original_lt)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(golden_lt, actual_lt)
def test_binary_ops(self):
ops = [
('sigmoid_cross_entropy_with_logits',
nn_impl.sigmoid_cross_entropy_with_logits,
nn.sigmoid_cross_entropy_with_logits),
('softmax_cross_entropy_with_logits',
nn_ops.softmax_cross_entropy_with_logits,
nn.softmax_cross_entropy_with_logits),
('sparse_softmax_cross_entropy_with_logits',
nn_ops.sparse_softmax_cross_entropy_with_logits,
nn.sparse_softmax_cross_entropy_with_logits),
]
for op_name, tf_op, lt_op in ops:
golden_tensor = tf_op(self.original_lt.tensor, self.other_lt.tensor)
golden_lt = core.LabeledTensor(golden_tensor, self.axes)
actual_lt = lt_op(self.original_lt, self.other_lt)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(golden_lt, actual_lt)
|
tensorflow-master
|
tensorflow/contrib/labeled_tensor/python/ops/nn_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to make it a bit easier to use LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import ops
from tensorflow.python.framework import ops as tf_ops
class ReshapeCoder(object):
"""Utility class for mapping to and from another shape.
For example, say you have a function `crop_center` which expects a
LabeledTensor with axes named ['batch', 'row', 'column', 'depth'], and
you have a LabeledTensor `masked_image_lt` with axes ['batch', 'row',
'column', 'channel', 'mask'].
To call `crop_center` with `masked_image_lt` you'd normally have to write:
>>> reshape_lt = lt.reshape(masked_image_lt, ['channel', 'mask'], ['depth'])
>>> crop_lt = crop_center(reshape_lt)
>>> result_lt = lt.reshape(crop_lt, ['depth'],
... [masked_image_lt.axes['channel'], masked_image_lt.axes['mask']])
ReshapeCoder takes care of this renaming logic for you, allowing you to
instead write:
>>> rc = ReshapeCoder(['channel', 'mask'], ['depth'])
>>> result_lt = rc.decode(crop_center(rc.encode(masked_image_lt)))
Here, `decode` restores the original axes 'channel' and 'mask', so
`crop_center` must not have modified the size of the 'depth' axis.
"""
@tc.accepts(object, tc.Collection(str),
tc.Collection(tc.Union(str, core.AxisLike)), tc.Optional(str))
def __init__(self, existing_axis_names, new_axes, name=None):
self._name = name
self._existing_axis_names = existing_axis_names
self._new_axes = new_axes
self._existing_axes = None
@tc.returns(core.LabeledTensor)
@tc.accepts(object, core.LabeledTensorLike)
def encode(self, labeled_tensor):
"""Reshape the input to the target shape.
If called several times, the axes named in existing_axis_names must be
identical.
Args:
labeled_tensor: The input tensor.
Returns:
The input reshaped to the target shape.
Raises:
ValueError: If the axes in existing_axis_names don't match the axes of
a tensor in a previous invocation of this method.
"""
with tf_ops.name_scope(self._name, 'lt_reshape_encode',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
reshape_lt = ops.reshape(labeled_tensor,
self._existing_axis_names,
self._new_axes,
name=scope)
axes = [labeled_tensor.axes[n] for n in self._existing_axis_names]
if self._existing_axes is not None and self._existing_axes != axes:
raise ValueError(
'input axes %r do not match axes from previous method call %r' %
(axes, self._existing_axes))
else:
self._existing_axes = axes
return reshape_lt
@tc.returns(core.LabeledTensor)
@tc.accepts(object, core.LabeledTensorLike)
def decode(self, labeled_tensor):
"""Reshape the input to the original shape.
This is the inverse of encode.
Encode must have been called at least once prior to this method being
called.
Args:
labeled_tensor: The input tensor.
Returns:
The input reshaped to the original shape.
Raises:
ValueError: If this method was called before encode was called.
"""
if self._existing_axes is None:
raise ValueError('decode called before encode')
with tf_ops.name_scope(self._name, 'lt_reshape_decode',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
new_axis_names = [axis if isinstance(axis, string_types) else
core.as_axis(axis).name for axis in self._new_axes]
return ops.reshape(labeled_tensor,
new_axis_names,
self._existing_axes,
name=scope)
|
tensorflow-master
|
tensorflow/contrib/labeled_tensor/python/ops/sugar.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import map_fn as map_fn_lib
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes([axis] + list(
labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(
array_ops.gather(transposed.tensor, indexer), temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types,
tc.Union(slice, collections.Hashable, list)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
# Needs to be after checking for slices, since slice objects claim to be
# instances of collections.Hashable but hash() on them fails.
elif isinstance(value, collections.Hashable):
slices[axis_name] = axis.index(value)
elif isinstance(value, list):
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value]
indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64)
else:
# If type checking is working properly, this shouldn't be possible.
raise TypeError('cannot handle arbitrary types')
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike,
tc.Optional(string_types), tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(
labeled_tensor.tensor, shape, seed=seed, name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because map_fn_lib.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = map_fn_lib.map_fn(
tf_fn, labeled_tensor.tensor, dtype=first_map_lt.dtype)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def foldl(fn, labeled_tensor, initial_value, name=None):
"""Left fold on the list of tensors unpacked from labeled_tensor.
See tf.foldl.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
Its arguments are (accumulated_value, next_value).
labeled_tensor: The input tensor.
initial_value: The initial value of the accumulator.
name: Optional op name.
Returns:
The accumulated value.
"""
with ops.name_scope(name, 'lt_foldl',
[labeled_tensor, initial_value]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
initial_value = core.convert_to_labeled_tensor(initial_value)
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor, ops.Tensor)
def tf_fn(accumulator, next_element):
accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
next_element_lt = core.LabeledTensor(
next_element, list(labeled_tensor.axes.values())[1:])
return fn(accumulator_lt, next_element_lt).tensor
foldl_op = functional_ops.foldl(
tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)
return core.identity(foldl_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(
labeled_tensor.tensor, squeeze_dimensions, name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [
name for name in axis_scope_order if name in result_axis_names
]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(
a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(
labeled_tensor.tensor, reduction_dimensions, keepdims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [
name for name in multiples
if labeled_tensor.axes[name].labels is not None
]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [
axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()
]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(labeled_tensor.tensor,
padding_pairs,
mode,
name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(
tc.Union(core.Axes, tc.Collection(
tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(
labeled_tensor.tensor, msg=message, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.get_static_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
|
tensorflow-master
|
tensorflow/contrib/labeled_tensor/python/ops/ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import ops
from tensorflow.contrib.labeled_tensor.python.ops import test_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test as test_lib
class Base(test_util.Base):
def setUp(self):
super(Base, self).setUp()
self.x_size = 7
self.channel_size = 3
self.z_size = 4
self.probs_size = 11
tensor = math_ops.range(0, self.x_size * self.channel_size * self.z_size *
self.probs_size)
tensor = array_ops.reshape(
tensor, [self.x_size, self.channel_size, self.z_size, self.probs_size])
a0 = ('x', range(self.x_size))
a1 = ('channel', ['red', 'green', 'blue'])
a2 = 'z'
a3 = ('probs', np.linspace(0.0, 1.0, self.probs_size))
self.tensor = tensor
self.a0 = a0
self.a1 = a1
self.a2 = a2
self.a2_resolved = ('z', self.z_size)
self.a3 = a3
self.original_lt = core.LabeledTensor(tensor, [a0, a1, a2, a3])
self.x_probs_lt = core.slice_function(self.original_lt, {'z': 0})
self.x_probs_lt = ops.select(self.x_probs_lt, {'channel': 'red'})
self.channel_probs_lt = core.slice_function(self.original_lt,
{'x': 3,
'z': 0})
class SelectTest(Base):
def test_name(self):
select_lt = ops.select(self.original_lt, {'channel': 'green'})
self.assertIn('lt_select', select_lt.name)
def test_scalar(self):
select_lt = ops.select(self.original_lt, {'channel': 'green'})
golden_lt = core.LabeledTensor(self.tensor[:, 1, :, :],
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slice(self):
select_lt = ops.select(self.original_lt, {'channel': slice('red', 'green')})
a1_sliced = ('channel', ['red', 'green'])
golden_lt = core.LabeledTensor(self.tensor[:, :2, :, :],
[self.a0, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slices(self):
select_lt = ops.select(self.original_lt,
{'x': slice(1, 4),
'channel': slice('green', None)})
a0_sliced = ('x', range(1, 5))
a1_sliced = ('channel', ['green', 'blue'])
golden_lt = core.LabeledTensor(self.tensor[1:5, 1:, :, :],
[a0_sliced, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_list(self):
select_lt = ops.select(self.original_lt, {'channel': ['red', 'green']})
a1_sliced = ('channel', ['red', 'green'])
golden_lt = core.LabeledTensor(self.tensor[:, :2, :, :],
[self.a0, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_list_one_item(self):
select_lt = ops.select(self.original_lt, {'channel': ['red']})
a1_sliced = ('channel', ['red'])
golden_lt = core.LabeledTensor(self.tensor[:, :1, :, :],
[self.a0, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_list_zero_items(self):
select_lt = ops.select(self.original_lt, {'channel': []})
golden_lt = core.LabeledTensor(self.tensor[:, :0, :, :],
[self.a0, 'channel', self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_scalars(self):
select_lt = ops.select(self.original_lt, {'x': 1, 'channel': 'green'})
golden_lt = core.LabeledTensor(self.tensor[1, 1, :, :], [self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_tuple(self):
original_lt = core.LabeledTensor(constant_op.constant([5, 6]),
[('x', [(1, 2), (3, 4)])])
select_lt = ops.select(original_lt, {'x': (1, 2)})
golden_lt = core.LabeledTensor(constant_op.constant(5), [])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.select(self.original_lt, {'foo': 1})
with self.assertRaises(ValueError):
ops.select(self.original_lt, {'z': 1})
with self.assertRaises(KeyError):
ops.select(self.original_lt, {'channel': 'purple'})
with self.assertRaises(KeyError):
ops.select(self.original_lt, {'channel': ['red', 'purple']})
with self.assertRaises(NotImplementedError):
ops.select(self.original_lt, {'channel': ['red'], 'x': [1]})
with self.assertRaises(NotImplementedError):
ops.select(self.original_lt, {'channel': ['red'], 'x': 1})
with self.assertRaises(NotImplementedError):
ops.select(self.original_lt, {'channel': slice('red', 'green', 2)})
class ConcatTest(Base):
def setUp(self):
super(ConcatTest, self).setUp()
self.red_lt = ops.select(self.original_lt, {'channel': ['red']})
self.green_lt = ops.select(self.original_lt, {'channel': ['green']})
self.blue_lt = ops.select(self.original_lt, {'channel': ['blue']})
def test_name(self):
concat_lt = ops.concat([self.red_lt, self.blue_lt], 'channel')
self.assertIn('lt_concat', concat_lt.name)
def test(self):
concat_lt = ops.concat([self.red_lt, self.green_lt], 'channel')
golden_lt = ops.select(self.original_lt, {'channel': ['red', 'green']})
self.assertLabeledTensorsEqual(concat_lt, golden_lt)
def test_transposed(self):
green_transposed = core.transpose(self.green_lt,
['probs', 'channel', 'z', 'x'])
with self.assertRaises(ValueError):
ops.concat([self.red_lt, green_transposed], 'channel')
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.concat([], 'channel')
with self.assertRaises(ValueError):
ops.concat([self.red_lt, self.red_lt], 'channel')
with self.assertRaises(ValueError):
ops.concat([self.red_lt, self.red_lt], 'foo')
class PackTest(Base):
def test_name(self):
pack_lt = ops.pack([self.original_lt, self.original_lt], 'batch')
self.assertIn('lt_pack', pack_lt.name)
def test(self):
pack_lt = ops.pack([self.original_lt, self.original_lt], 'batch')
golden_lt = core.LabeledTensor(
array_ops.stack([self.original_lt.tensor, self.original_lt.tensor]),
['batch', self.a0, self.a1, self.a2, self.a3])
self.assertLabeledTensorsEqual(pack_lt, golden_lt)
def test_axis(self):
pack_lt = ops.pack(
[self.original_lt, self.original_lt], new_axis='batch', axis_position=4)
golden_lt = core.LabeledTensor(
array_ops.stack(
[self.original_lt.tensor, self.original_lt.tensor], axis=4),
[self.a0, self.a1, self.a2, self.a3, 'batch'])
self.assertLabeledTensorsEqual(pack_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.pack([self.original_lt, self.original_lt], 'channel')
class UnpackTest(Base):
def test_name(self):
unpack_lts = ops.unpack(self.original_lt)
for t in unpack_lts:
self.assertIn('lt_unpack', t.name)
def test(self):
unpack_lt = ops.unpack(self.original_lt)[0]
golden_lt = core.LabeledTensor(
array_ops.unstack(self.original_lt.tensor)[0],
[self.a1, self.a2, self.a3])
self.assertLabeledTensorsEqual(unpack_lt, golden_lt)
def test_axis(self):
unpack_lt = ops.unpack(self.original_lt, axis_name='z')[0]
golden_lt = core.LabeledTensor(
array_ops.unstack(
self.original_lt.tensor, axis=2)[0], [self.a0, self.a1, self.a3])
self.assertLabeledTensorsEqual(unpack_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.unpack(self.original_lt, axis_name='not_found')
class ReshapeTest(Base):
def test_name(self):
reshape_lt = ops.reshape(self.original_lt, ['channel'], ['foo'])
self.assertIn('lt_reshape', reshape_lt.name)
def test_identity(self):
reshape_lt = ops.reshape(self.original_lt,
self.original_lt.axes.keys(),
self.original_lt.axes.values())
self.assertLabeledTensorsEqual(reshape_lt, self.original_lt)
def test_known_size(self):
new_dim_size = self.channel_size * self.z_size * self.probs_size
reshape_lt = ops.reshape(self.original_lt, ['channel', 'z', 'probs'],
[('new_dim', new_dim_size)])
golden_lt = core.LabeledTensor(
array_ops.reshape(self.original_lt.tensor, [self.x_size, -1]),
[self.original_lt.axes['x'], 'new_dim'])
self.assertLabeledTensorsEqual(reshape_lt, golden_lt)
def test_unknown_size(self):
reshape_lt = ops.reshape(self.original_lt, ['channel', 'z', 'probs'],
['new_dim'])
golden_lt = core.LabeledTensor(
array_ops.reshape(self.original_lt.tensor, [self.x_size, -1]),
[self.original_lt.axes['x'], 'new_dim'])
self.assertLabeledTensorsEqual(reshape_lt, golden_lt)
def test_unknown_dimension(self):
orig_lt = core.LabeledTensor(
array_ops.placeholder(dtypes.float32, [None]), ['x'])
reshape_lt = ops.reshape(orig_lt, ['x'], ['y', ('z', 1)])
self.assertEqual(reshape_lt.axes, core.Axes([('y', None), ('z', 1)]))
with self.cached_session() as sess:
result = sess.run(reshape_lt, feed_dict={orig_lt.tensor: [1, 2]})
np.testing.assert_array_equal(result, [[1], [2]])
def test_with_labels(self):
new_dim_size = self.channel_size * self.z_size * self.probs_size
reshape_lt = ops.reshape(self.original_lt, ['channel', 'z', 'probs'],
[('new_dim', range(new_dim_size))])
golden_lt = core.LabeledTensor(
array_ops.reshape(self.original_lt.tensor, [self.x_size, -1]),
[self.original_lt.axes['x'], ('new_dim', range(new_dim_size))])
self.assertLabeledTensorsEqual(reshape_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'not contained in the set'):
ops.reshape(self.original_lt, ['foo'], ['bar'])
with self.assertRaisesRegexp(core.AxisOrderError,
'not a slice of axis names'):
ops.reshape(self.original_lt, ['probs', 'z'], ['bar'])
with self.assertRaisesRegexp(ValueError, 'at most one axis in new_axes'):
ops.reshape(self.original_lt, ['probs'], ['foo', 'bar'])
class RenameAxisTest(Base):
def test_name(self):
rename_axis_lt = ops.rename_axis(self.original_lt, 'channel', 'foo')
self.assertIn('lt_rename_axis', rename_axis_lt.name)
def test_identity(self):
rename_axis_lt = ops.rename_axis(self.original_lt, 'channel', 'channel')
self.assertLabeledTensorsEqual(rename_axis_lt, self.original_lt)
def test_new_name(self):
rename_axis_lt = ops.rename_axis(self.original_lt, 'channel', 'foo')
expected_axes = [(name if name != 'channel' else 'foo', axis.value)
for name, axis in self.original_lt.axes.items()]
expected_lt = core.LabeledTensor(self.original_lt.tensor, expected_axes)
self.assertLabeledTensorsEqual(rename_axis_lt, expected_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'not contained in the set'):
ops.rename_axis(self.original_lt, 'foo', 'bar')
class BatchTest(Base):
def setUp(self):
super(BatchTest, self).setUp()
tensors = []
for i in range(10):
offset_lt = core.LabeledTensor(constant_op.constant(i), [])
tensors.append(core.add(self.original_lt, offset_lt))
self.pack_lt = ops.pack(tensors, 'batch')
def test_name(self):
batch_ops = ops.batch(
[self.pack_lt, self.pack_lt], batch_size=2, enqueue_many=True)
for bo in batch_ops:
self.assertIn('lt_batch', bo.name)
def test_enqueue_many(self):
[batch_2_op] = ops.batch([self.pack_lt], batch_size=2, enqueue_many=True)
self.assertEqual(len(batch_2_op.axes['batch']), 2)
[batch_10_op] = ops.batch([batch_2_op], batch_size=10, enqueue_many=True)
self.assertLabeledTensorsEqual(self.pack_lt, batch_10_op)
def test_no_enqueue_many(self):
[batch_2_op] = ops.batch([self.original_lt], batch_size=2)
self.assertEqual(len(batch_2_op.axes['batch']), 2)
[batch_10_op] = ops.batch([batch_2_op], batch_size=10, enqueue_many=True)
self.assertLabeledTensorsEqual(
ops.pack(10 * [self.original_lt], 'batch'), batch_10_op)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.batch([self.original_lt], 3, enqueue_many=True)
def test_allow_smaller_final_batch(self):
[batch_2_op] = ops.batch(
[self.original_lt], batch_size=2, allow_smaller_final_batch=True)
self.assertEqual(batch_2_op.axes['batch'].size, None)
class ShuffleBatchTest(Base):
def setUp(self):
super(ShuffleBatchTest, self).setUp()
tensors = []
for i in range(10):
offset_lt = core.LabeledTensor(constant_op.constant(i), [])
tensors.append(core.add(self.original_lt, offset_lt))
self.pack_lt = ops.pack(tensors, 'batch')
def test_name(self):
batch_lts = ops.shuffle_batch(
[self.pack_lt, self.pack_lt], batch_size=2, enqueue_many=True)
for blt in batch_lts:
self.assertIn('lt_shuffle_batch', blt.name)
def test_enqueue_many(self):
[batch_2_lt] = ops.shuffle_batch(
[self.pack_lt],
batch_size=2,
enqueue_many=True,
min_after_dequeue=8,
seed=0)
self.assertEqual(len(batch_2_lt.axes['batch']), 2)
[batch_10_lt] = ops.batch([batch_2_lt], batch_size=10, enqueue_many=True)
self.assertEqual(batch_10_lt.axes, self.pack_lt.axes)
[batch_10, pack] = self.eval([batch_10_lt.tensor, self.pack_lt.tensor])
self.assertFalse((batch_10 == pack).all())
def test_allow_smaller_final_batch(self):
[batch_2_op] = ops.shuffle_batch(
[self.original_lt], batch_size=2, allow_smaller_final_batch=True)
self.assertEqual(batch_2_op.axes['batch'].size, None)
class RandomCropTest(Base):
def test_name(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 3})
self.assertIn('lt_random_crop', crop_lt.name)
def test_single(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 3})
self.assertEqual(
core.Axes([self.a0, self.a1, self.a2_resolved, ('probs', 3)]),
crop_lt.axes)
def test_double(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 3, 'channel': 2})
self.assertEqual(
core.Axes([self.a0, ('channel', 2), self.a2_resolved, ('probs', 3)]),
crop_lt.axes)
def test_size1(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 1})
self.assertEqual(
core.Axes([self.a0, self.a1, self.a2_resolved, ('probs', 1)]),
crop_lt.axes)
def test_different_seeds(self):
crop_0_lt = ops.random_crop(
self.original_lt, {'probs': 3,
'channel': 2}, seed=0)
crop_1_lt = ops.random_crop(
self.original_lt, {'probs': 3,
'channel': 2}, seed=1)
self.assertEqual(crop_0_lt.axes, crop_1_lt.axes)
[crop_0, crop_1] = self.eval([crop_0_lt.tensor, crop_1_lt.tensor])
self.assertFalse((crop_0 == crop_1).all())
def test_identical_seeds(self):
crop_0_lt = ops.random_crop(
self.original_lt, {'probs': 3,
'channel': 2}, seed=0)
crop_1_lt = ops.random_crop(
self.original_lt, {'probs': 3,
'channel': 2}, seed=0)
self.assertLabeledTensorsEqual(crop_0_lt, crop_1_lt)
def test_crop_idempotent(self):
crop_0_lt = ops.random_crop(
self.original_lt, {'probs': 3,
'channel': 2}, seed=0)
crop_1_lt = ops.random_crop(crop_0_lt, {'probs': 3, 'channel': 2}, seed=1)
self.assertLabeledTensorsEqual(crop_0_lt, crop_1_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.random_crop(self.original_lt, {'foobar': 2})
class MapFnTest(Base):
def test_name(self):
map_lt = ops.map_fn(core.identity, self.original_lt)
self.assertIn('lt_map_fn', map_lt.name)
def test_identity(self):
map_lt = ops.map_fn(core.identity, self.original_lt)
self.assertLabeledTensorsEqual(map_lt, self.original_lt)
def test_callable_object(self):
class Identity(object):
def __call__(self, other):
return other
map_lt = ops.map_fn(Identity(), self.original_lt)
self.assertLabeledTensorsEqual(map_lt, self.original_lt)
def test_slice(self):
map_lt = ops.map_fn(lambda t: core.slice_function(t, {'channel': 1}),
self.original_lt)
slice_lt = core.slice_function(self.original_lt, {'channel': 1})
self.assertLabeledTensorsEqual(map_lt, slice_lt)
def test_string(self):
def fn(entry_lt):
op = string_ops.string_join([entry_lt, 'world'])
return core.LabeledTensor(op, [])
tensor_lt = ops.constant(['hi', 'bye'], axes=['batch'])
map_lt = ops.map_fn(fn, tensor_lt)
golden_lt = ops.constant(['hiworld', 'byeworld'], axes=['batch'])
self.assertLabeledTensorsEqual(map_lt, golden_lt)
class FoldlTest(Base):
def test_name(self):
foldl_lt = ops.foldl(core.add, self.original_lt,
core.slice_function(self.original_lt, {'x': 0}))
self.assertIn('lt_foldl', foldl_lt.name)
def test_sum(self):
initializer_lt = ops.constant([0, 10], axes=['y'])
tensor_lt = ops.constant([[1, 2], [3, 4], [5, 6]], axes=['x', 'y'])
foldl_lt = ops.foldl(core.add, tensor_lt, initializer_lt)
golden_lt = ops.constant([9, 22], axes=['y'])
self.assertLabeledTensorsEqual(foldl_lt, golden_lt)
class SqueezeTest(Base):
def setUp(self):
super(SqueezeTest, self).setUp()
self.squeezable_lt = core.slice_function(
self.original_lt, {'channel': slice(0, 1),
'probs': slice(0, 1)})
def test_name(self):
squeeze_lt = ops.squeeze(self.squeezable_lt)
self.assertIn('lt_squeeze', squeeze_lt.name)
def test_none(self):
none_lt = ops.squeeze(self.squeezable_lt, None)
axes_lt = ops.squeeze(self.squeezable_lt, ['channel', 'probs'])
self.assertLabeledTensorsEqual(none_lt, axes_lt)
def test(self):
squeeze_lt = ops.squeeze(self.squeezable_lt, ['probs'])
golden_lt = core.slice_function(self.squeezable_lt, {'probs': 0})
self.assertLabeledTensorsEqual(squeeze_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.squeeze(self.original_lt, ['channel'])
with self.assertRaises(ValueError):
ops.squeeze(self.squeezable_lt, ['foo'])
class MatMulTest(Base):
def test_name(self):
x_lt = core.LabeledTensor(array_ops.ones((3,)), ['x'])
matmul_lt = ops.matmul(x_lt, x_lt)
self.assertIn('lt_matmul', matmul_lt.name)
def test_vector_vector(self):
x_lt = core.LabeledTensor(math_ops.range(3), ['x'])
matmul_lt = ops.matmul(x_lt, x_lt)
golden_lt = core.convert_to_labeled_tensor(5)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
def test_matrix_vector(self):
xy_lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(6), (2, 3)), ['x', 'y'])
y_lt = core.LabeledTensor(math_ops.range(3), ['y'])
matmul_lt = ops.matmul(xy_lt, y_lt)
golden_lt = core.LabeledTensor(
math_ops.matmul(xy_lt.tensor, array_ops.reshape(y_lt.tensor,
(-1, 1)))[:, 0], ['x'])
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(y_lt, xy_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
def test_matrix_matrix(self):
xy_lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(6), (2, 3)), ['x', 'y'])
yz_lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(12), (3, 4)), ['y', 'z'])
matmul_lt = ops.matmul(xy_lt, yz_lt)
golden_lt = core.LabeledTensor(
math_ops.matmul(xy_lt.tensor, yz_lt.tensor), ['x', 'z'])
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
transpose = lambda x: core.transpose(x, list(x.axes.keys())[::-1])
matmul_lt = ops.matmul(xy_lt, transpose(yz_lt))
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(transpose(xy_lt), yz_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(transpose(xy_lt), transpose(yz_lt))
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(yz_lt, xy_lt)
self.assertLabeledTensorsEqual(matmul_lt, transpose(golden_lt))
def test_matrix_matrix_axis_order(self):
xy_lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(6), (2, 3)), ['x', 'y'])
yz_lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(12), (3, 4)), ['y', 'z'])
golden_lt = core.LabeledTensor(
math_ops.matmul(xy_lt.tensor, yz_lt.tensor), ['x', 'z'])
with core.axis_order_scope(['x', 'y', 'z']):
matmul_lt = ops.matmul(xy_lt, yz_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(yz_lt, xy_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
def test_invalid(self):
scalar_lt = core.LabeledTensor(array_ops.ones(()), [])
x_lt = core.LabeledTensor(array_ops.ones((2,)), ['x'])
x2_lt = core.LabeledTensor(array_ops.ones((3,)), ['x'])
y_lt = core.LabeledTensor(array_ops.ones((3,)), ['y'])
xy_lt = core.LabeledTensor(array_ops.ones((2, 3)), ['x', 'y'])
xyz_lt = core.LabeledTensor(array_ops.ones((2, 3, 1)), ['x', 'y', 'z'])
with self.assertRaisesRegexp(ValueError, 'inputs with at least rank'):
ops.matmul(x_lt, scalar_lt)
with self.assertRaises(NotImplementedError):
ops.matmul(x_lt, xyz_lt)
with self.assertRaisesRegexp(ValueError, 'exactly one axis in common'):
ops.matmul(x_lt, y_lt)
with self.assertRaises(NotImplementedError):
ops.matmul(xy_lt, xy_lt)
with self.assertRaisesRegexp(ValueError, 'does not match'):
ops.matmul(x_lt, x2_lt)
class ReduceSumTest(Base):
def test_name(self):
sum_lt = ops.reduce_sum(self.original_lt, {'channel'})
self.assertIn('lt_reduce_sum', sum_lt.name)
def test_drop_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_sum(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_drop_scalar_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, 'channel')
golden_lt = core.LabeledTensor(
math_ops.reduce_sum(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_keep_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, {('channel', 'hihowareyou')})
golden_lt = core.LabeledTensor(
math_ops.reduce_sum(
self.original_lt.tensor, 1, keepdims=True),
[self.a0, ('channel', ['hihowareyou']), self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_keep_scalar_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, ('channel', 'hihowareyou'))
golden_lt = core.LabeledTensor(
math_ops.reduce_sum(
self.original_lt.tensor, 1, keepdims=True),
[self.a0, ('channel', ['hihowareyou']), self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_scalar(self):
scalar_lt = core.LabeledTensor(constant_op.constant(42), [])
reduce_lt = ops.reduce_sum(scalar_lt, [])
self.assertLabeledTensorsEqual(reduce_lt, scalar_lt)
def test_empty_list(self):
reduce_lt = ops.reduce_sum(self.original_lt, [])
self.assertLabeledTensorsEqual(reduce_lt, self.original_lt)
def test_none(self):
sum_lt = ops.reduce_sum(self.original_lt)
golden_lt = core.LabeledTensor(
math_ops.reduce_sum(self.original_lt.tensor), [])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_function_docstring_and_name(self):
self.assertIn('tf.reduce_sum', ops.reduce_sum.__doc__)
self.assertEqual('reduce_sum', ops.reduce_sum.__name__)
class ReduceMeanTest(Base):
def test_name(self):
actual_lt = ops.reduce_mean(self.original_lt, {'channel'})
self.assertIn('lt_reduce_mean', actual_lt.name)
def test(self):
actual_lt = ops.reduce_mean(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_mean(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(actual_lt, golden_lt)
class ReduceProdTest(Base):
def test_name(self):
result_lt = ops.reduce_prod(self.original_lt, {'channel'})
self.assertIn('lt_reduce_prod', result_lt.name)
def test(self):
result_lt = ops.reduce_prod(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_prod(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class ReduceMinTest(Base):
def test_name(self):
result_lt = ops.reduce_min(self.original_lt, {'channel'})
self.assertIn('lt_reduce_min', result_lt.name)
def test(self):
result_lt = ops.reduce_min(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_min(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class ReduceMaxTest(Base):
def test_name(self):
result_lt = ops.reduce_max(self.original_lt, {'channel'})
self.assertIn('lt_reduce_max', result_lt.name)
def test(self):
result_lt = ops.reduce_max(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_max(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class BaseReduceBoolean(Base):
def setUp(self):
super(BaseReduceBoolean, self).setUp()
self.bool_tensor = math_ops.cast(self.original_lt.tensor > 5, dtypes.bool)
self.bool_lt = core.LabeledTensor(self.bool_tensor, self.original_lt.axes)
class ReduceAllTest(BaseReduceBoolean):
def test_name(self):
result_lt = ops.reduce_all(self.bool_lt, {'channel'})
self.assertIn('lt_reduce_all', result_lt.name)
def test(self):
result_lt = ops.reduce_all(self.bool_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_all(self.bool_tensor, 1), [self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class ReduceAnyTest(BaseReduceBoolean):
def test_name(self):
result_lt = ops.reduce_any(self.bool_lt, {'channel'})
self.assertIn('lt_reduce_any', result_lt.name)
def test(self):
result_lt = ops.reduce_any(self.bool_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_any(self.bool_tensor, 1), [self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class TileTest(Base):
def test_name(self):
tile_lt = ops.tile(self.original_lt, {'z': 2})
self.assertIn('lt_tile', tile_lt.name)
def test(self):
for multiple in [2, constant_op.constant(2)]:
tile_lt = ops.tile(self.original_lt, {'z': multiple})
golden_op = array_ops.tile(self.original_lt.tensor, [1, 1, multiple, 1])
golden_axes = [
'z' if axis.name == 'z' else axis
for axis in self.original_lt.axes.values()
]
golden_lt = core.LabeledTensor(golden_op, golden_axes)
self.assertLabeledTensorsEqual(tile_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'are not contained in the set'):
ops.tile(self.original_lt, {'foo': 5})
with self.assertRaisesRegexp(ValueError, 'axes with tick labels'):
ops.tile(self.original_lt, {'x': 5})
class PadTest(Base):
def test_name(self):
pad_lt = ops.pad(self.original_lt,
{'x': (1, 1),
'channel': ([], ['alpha'])})
self.assertIn('lt_pad', pad_lt.name)
def test(self):
pad_lt = ops.pad(self.original_lt,
{'x': (1, 1),
'channel': ([], ['alpha'])})
golden_op = array_ops.pad(self.original_lt.tensor, [[1, 1], [0, 1], [0, 0],
[0, 0]])
golden_axes = [('x', self.x_size + 2),
('channel', ['red', 'green', 'blue', 'alpha']), self.a2,
self.a3]
golden_lt = core.LabeledTensor(golden_op, golden_axes)
self.assertLabeledTensorsEqual(pad_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'are not contained in the set'):
ops.pad(self.original_lt, {'foo': (1, 1), 'channel': ([], ['alpha'])})
class ConstantTest(Base):
def test_name(self):
constant_lt = ops.constant(1)
self.assertIn('lt_constant', constant_lt.name)
def test_scalar(self):
constant_lt = ops.constant(1)
golden_lt = core.LabeledTensor(constant_op.constant(1), [])
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
def test_infer_shape(self):
constant_lt = ops.constant([1, 2], axes=['x'])
golden_lt = core.LabeledTensor(constant_op.constant([1, 2]), ['x'])
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
def test_specify_shape(self):
constant_lt = ops.constant(1, axes=[('x', 3)])
golden_lt = core.LabeledTensor(constant_op.constant(1, shape=(3,)), ['x'])
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
def test_existing_axes(self):
golden_lt = core.LabeledTensor(constant_op.constant([1, 2]), ['x'])
constant_lt = ops.constant([1, 2], axes=golden_lt.axes)
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
class ZerosLikeTest(Base):
def test_name(self):
like_lt = ops.zeros_like(self.original_lt)
self.assertIn('lt_zeros_like', like_lt.name)
def test(self):
like_lt = ops.zeros_like(self.original_lt)
golden_lt = core.LabeledTensor(
array_ops.zeros_like(self.original_lt.tensor), self.original_lt.axes)
self.assertLabeledTensorsEqual(like_lt, golden_lt)
class OnesLikeTest(Base):
def test_name(self):
like_lt = ops.ones_like(self.original_lt)
self.assertIn('lt_ones_like', like_lt.name)
def test(self):
like_lt = ops.ones_like(self.original_lt)
golden_lt = core.LabeledTensor(
array_ops.ones_like(self.original_lt.tensor), self.original_lt.axes)
self.assertLabeledTensorsEqual(like_lt, golden_lt)
class CastTest(Base):
def test_name(self):
cast_lt = ops.cast(self.original_lt, dtypes.float16)
self.assertIn('lt_cast', cast_lt.name)
def test(self):
cast_lt = ops.cast(self.original_lt, dtypes.float16)
golden_lt = core.LabeledTensor(
math_ops.cast(self.original_lt.tensor, dtypes.float16),
self.original_lt.axes)
self.assertLabeledTensorsEqual(cast_lt, golden_lt)
class VerifyTensorAllFiniteTest(Base):
def setUp(self):
super(VerifyTensorAllFiniteTest, self).setUp()
self.finite_lt = core.LabeledTensor(constant_op.constant(42.0), [])
self.nan_lt = core.LabeledTensor(constant_op.constant(np.nan), [])
self.checked_finite_lt = ops.verify_tensor_all_finite(self.finite_lt, '')
self.checked_nan_lt = ops.verify_tensor_all_finite(self.nan_lt, '')
def test_name(self):
self.assertIn('lt_verify_tensor_all_finite', self.checked_finite_lt.name)
self.assertIn('lt_verify_tensor_all_finite', self.checked_nan_lt.name)
def test_finite(self):
self.assertLabeledTensorsEqual(self.finite_lt, self.checked_finite_lt)
def test_nan(self):
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'Tensor had NaN values'):
self.eval([self.checked_nan_lt])
class BooleanMaskTest(Base):
def test_name(self):
mask = core.LabeledTensor(math_ops.range(7) > 3, [self.a0])
masked_lt = ops.boolean_mask(self.original_lt, mask)
self.assertIn('lt_boolean_mask', masked_lt.name)
def test(self):
mask = core.LabeledTensor(math_ops.range(7) > 3, [self.a0])
masked_lt = ops.boolean_mask(self.original_lt, mask)
golden_lt = core.LabeledTensor(
array_ops.boolean_mask(self.original_lt.tensor, mask.tensor),
['x', self.a1, self.a2, self.a3])
self.assertLabeledTensorsEqual(masked_lt, golden_lt)
def test_invalid_rank(self):
mask = core.LabeledTensor(array_ops.ones((7, 3)) > 3, [self.a0, self.a1])
with self.assertRaises(NotImplementedError):
ops.boolean_mask(self.original_lt, mask)
def test_mismatched_axis(self):
mask = core.LabeledTensor(math_ops.range(7) > 3, ['foo'])
with self.assertRaisesRegexp(ValueError, 'not equal'):
ops.boolean_mask(self.original_lt, mask)
class WhereTest(Base):
def test_name(self):
condition = core.LabeledTensor(math_ops.range(5) < 3, ['x'])
where_lt = ops.where(condition, condition, condition)
self.assertIn('lt_where', where_lt.name)
def test(self):
condition = core.LabeledTensor(math_ops.range(5) < 3, ['x'])
x = core.LabeledTensor(array_ops.ones(5), ['x'])
y = core.LabeledTensor(array_ops.zeros(5), ['x'])
where_lt = ops.where(condition, x, y)
golden_lt = core.LabeledTensor(
array_ops.concat([array_ops.ones(3), array_ops.zeros(2)], 0), ['x'])
self.assertLabeledTensorsEqual(where_lt, golden_lt)
def test_mismatched_axes(self):
condition = core.LabeledTensor(math_ops.range(5) < 3, ['x'])
with self.assertRaisesRegexp(ValueError, 'equal axes'):
ops.where(condition, condition[:3], condition)
with self.assertRaisesRegexp(ValueError, 'equal axes'):
ops.where(condition, condition, condition[:3])
if __name__ == '__main__':
test_lib.main()
|
tensorflow-master
|
tensorflow/contrib/labeled_tensor/python/ops/ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN Cells and additional RNN operations.
<!--From core-->
@@RNNCell
@@LayerRNNCell
@@BasicRNNCell
@@BasicLSTMCell
@@GRUCell
@@LSTMCell
@@LSTMStateTuple
@@DropoutWrapper
@@MultiRNNCell
@@DeviceWrapper
@@ResidualWrapper
<!--Used to be in core, but kept in contrib.-->
@@EmbeddingWrapper
@@InputProjectionWrapper
@@OutputProjectionWrapper
<!--Created in contrib, eventual plans to move to core.-->
@@LayerNormBasicLSTMCell
@@LSTMBlockWrapper
@@LSTMBlockCell
@@GRUBlockCell
@@GRUBlockCellV2
@@FusedRNNCell
@@FusedRNNCellAdaptor
@@TimeReversedFusedRNN
@@LSTMBlockFusedCell
@@CoupledInputForgetGateLSTMCell
@@TimeFreqLSTMCell
@@GridLSTMCell
@@BidirectionalGridLSTMCell
@@NASCell
@@UGRNNCell
@@IntersectionRNNCell
@@PhasedLSTMCell
@@ConvLSTMCell
@@Conv1DLSTMCell
@@Conv2DLSTMCell
@@Conv3DLSTMCell
@@HighwayWrapper
@@GLSTMCell
@@SRUCell
@@IndRNNCell
@@IndyGRUCell
@@IndyLSTMCell
<!--RNNCell wrappers-->
@@AttentionCellWrapper
@@CompiledWrapper
<!--RNN functions-->
@@static_rnn
@@static_state_saving_rnn
@@static_bidirectional_rnn
@@stack_bidirectional_dynamic_rnn
@@stack_bidirectional_rnn
<!--RNN utilities-->
@@transpose_batch_time
@@best_effort_input_batch_size
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import EmbeddingWrapper
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import InputProjectionWrapper
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import OutputProjectionWrapper
from tensorflow.contrib.rnn.python.ops.fused_rnn_cell import *
from tensorflow.contrib.rnn.python.ops.gru_ops import *
from tensorflow.contrib.rnn.python.ops.lstm_ops import *
from tensorflow.contrib.rnn.python.ops.rnn import *
from tensorflow.contrib.rnn.python.ops.rnn_cell import *
from tensorflow.python.ops.rnn import _best_effort_input_batch_size as best_effort_input_batch_size
from tensorflow.python.ops.rnn import _transpose_batch_time as transpose_batch_time
from tensorflow.python.ops.rnn import static_bidirectional_rnn
from tensorflow.python.ops.rnn import static_rnn
from tensorflow.python.ops.rnn import static_state_saving_rnn
from tensorflow.python.ops.rnn_cell import *
# pylint: enable=unused-import,wildcard-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/rnn/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/rnn/python/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert checkpoints using RNNCells to new name convention.
Usage:
python checkpoint_convert.py [--write_v1_checkpoint] \
'/path/to/checkpoint' '/path/to/new_checkpoint'
For example, if there is a V2 checkpoint to be converted and the files include:
/tmp/my_checkpoint/model.ckpt.data-00000-of-00001
/tmp/my_checkpoint/model.ckpt.index
/tmp/my_checkpoint/model.ckpt.meta
use the following command:
mkdir /tmp/my_converted_checkpoint &&
python checkpoint_convert.py \
/tmp/my_checkpoint/model.ckpt /tmp/my_converted_checkpoint/model.ckpt
This will generate three converted checkpoint files corresponding to the three
old ones in the new directory:
/tmp/my_converted_checkpoint/model.ckpt.data-00000-of-00001
/tmp/my_converted_checkpoint/model.ckpt.index
/tmp/my_converted_checkpoint/model.ckpt.meta
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import re
import sys
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_lib
# Mapping between old <=> new names. Externalized so that user scripts that
# may need to consume multiple checkpoint formats can use this metadata.
RNN_NAME_REPLACEMENTS = collections.OrderedDict([
############################################################################
# contrib/rnn/python/ops/core_rnn_cell_impl.py
# BasicRNNCell
('basic_rnn_cell/weights', 'basic_rnn_cell/kernel'),
('basic_rnn_cell/biases', 'basic_rnn_cell/bias'),
# GRUCell
('gru_cell/weights', 'gru_cell/kernel'),
('gru_cell/biases', 'gru_cell/bias'),
('gru_cell/gates/weights', 'gru_cell/gates/kernel'),
('gru_cell/gates/biases', 'gru_cell/gates/bias'),
('gru_cell/candidate/weights', 'gru_cell/candidate/kernel'),
('gru_cell/candidate/biases', 'gru_cell/candidate/bias'),
# BasicLSTMCell
('basic_lstm_cell/weights', 'basic_lstm_cell/kernel'),
('basic_lstm_cell/biases', 'basic_lstm_cell/bias'),
# LSTMCell
('lstm_cell/weights', 'lstm_cell/kernel'),
('lstm_cell/biases', 'lstm_cell/bias'),
('lstm_cell/projection/weights', 'lstm_cell/projection/kernel'),
('lstm_cell/projection/biases', 'lstm_cell/projection/bias'),
# OutputProjectionWrapper
('output_projection_wrapper/weights', 'output_projection_wrapper/kernel'),
('output_projection_wrapper/biases', 'output_projection_wrapper/bias'),
# InputProjectionWrapper
('input_projection_wrapper/weights', 'input_projection_wrapper/kernel'),
('input_projection_wrapper/biases', 'input_projection_wrapper/bias'),
############################################################################
# contrib/rnn/python/ops/lstm_ops.py
# LSTMBlockFusedCell ??
('lstm_block_wrapper/weights', 'lstm_block_wrapper/kernel'),
('lstm_block_wrapper/biases', 'lstm_block_wrapper/bias'),
############################################################################
# contrib/rnn/python/ops/rnn_cell.py
# LayerNormBasicLSTMCell
('layer_norm_basic_lstm_cell/weights', 'layer_norm_basic_lstm_cell/kernel'),
('layer_norm_basic_lstm_cell/biases', 'layer_norm_basic_lstm_cell/bias'),
# UGRNNCell, not found in g3, but still need it?
('ugrnn_cell/weights', 'ugrnn_cell/kernel'),
('ugrnn_cell/biases', 'ugrnn_cell/bias'),
# NASCell
('nas_rnn/weights', 'nas_rnn/kernel'),
('nas_rnn/recurrent_weights', 'nas_rnn/recurrent_kernel'),
# IntersectionRNNCell
('intersection_rnn_cell/weights', 'intersection_rnn_cell/kernel'),
('intersection_rnn_cell/biases', 'intersection_rnn_cell/bias'),
('intersection_rnn_cell/in_projection/weights',
'intersection_rnn_cell/in_projection/kernel'),
('intersection_rnn_cell/in_projection/biases',
'intersection_rnn_cell/in_projection/bias'),
# PhasedLSTMCell
('phased_lstm_cell/mask_gates/weights',
'phased_lstm_cell/mask_gates/kernel'),
('phased_lstm_cell/mask_gates/biases', 'phased_lstm_cell/mask_gates/bias'),
('phased_lstm_cell/new_input/weights', 'phased_lstm_cell/new_input/kernel'),
('phased_lstm_cell/new_input/biases', 'phased_lstm_cell/new_input/bias'),
('phased_lstm_cell/output_gate/weights',
'phased_lstm_cell/output_gate/kernel'),
('phased_lstm_cell/output_gate/biases',
'phased_lstm_cell/output_gate/bias'),
# AttentionCellWrapper
('attention_cell_wrapper/weights', 'attention_cell_wrapper/kernel'),
('attention_cell_wrapper/biases', 'attention_cell_wrapper/bias'),
('attention_cell_wrapper/attn_output_projection/weights',
'attention_cell_wrapper/attn_output_projection/kernel'),
('attention_cell_wrapper/attn_output_projection/biases',
'attention_cell_wrapper/attn_output_projection/bias'),
('attention_cell_wrapper/attention/weights',
'attention_cell_wrapper/attention/kernel'),
('attention_cell_wrapper/attention/biases',
'attention_cell_wrapper/attention/bias'),
############################################################################
# contrib/legacy_seq2seq/python/ops/seq2seq.py
('attention_decoder/weights', 'attention_decoder/kernel'),
('attention_decoder/biases', 'attention_decoder/bias'),
('attention_decoder/Attention_0/weights',
'attention_decoder/Attention_0/kernel'),
('attention_decoder/Attention_0/biases',
'attention_decoder/Attention_0/bias'),
('attention_decoder/AttnOutputProjection/weights',
'attention_decoder/AttnOutputProjection/kernel'),
('attention_decoder/AttnOutputProjection/biases',
'attention_decoder/AttnOutputProjection/bias'),
# contrib/legacy_seq2seq/python/ops/seq2seq.py before cl/140060366
('attention_decoder/Attention_0/Linear/Bias',
'attention_decoder/Attention_0/bias'),
('attention_decoder/Attention_0/Linear/Matrix',
'attention_decoder/Attention_0/kernel'),
('attention_decoder/AttnOutputProjection/Linear/Bias',
'attention_decoder/AttnOutputProjection/bias'),
('attention_decoder/AttnOutputProjection/Linear/Matrix',
'attention_decoder/AttnOutputProjection/kernel'),
('attention_decoder/LSTMCell/B', 'attention_decoder/lstm_cell/bias'),
('attention_decoder/LSTMCell/W_0', 'attention_decoder/lstm_cell/kernel'),
('attention_decoder/Linear/Bias', 'attention_decoder/bias'),
('attention_decoder/Linear/Matrix', 'attention_decoder/kernel')
])
_RNN_SHARDED_NAME_REPLACEMENTS = collections.OrderedDict([
('LSTMCell/W_', 'lstm_cell/weights/part_'),
('BasicLSTMCell/Linear/Matrix_', 'basic_lstm_cell/weights/part_'),
('GRUCell/W_', 'gru_cell/weights/part_'),
('MultiRNNCell/Cell', 'multi_rnn_cell/cell_'),
])
def _rnn_name_replacement(var_name):
for pattern in RNN_NAME_REPLACEMENTS:
if pattern in var_name:
old_var_name = var_name
var_name = var_name.replace(pattern, RNN_NAME_REPLACEMENTS[pattern])
logging.info('Converted: %s --> %s' % (old_var_name, var_name))
break
return var_name
def _rnn_name_replacement_sharded(var_name):
for pattern in _RNN_SHARDED_NAME_REPLACEMENTS:
if pattern in var_name:
old_var_name = var_name
var_name = var_name.replace(pattern,
_RNN_SHARDED_NAME_REPLACEMENTS[pattern])
logging.info('Converted: %s --> %s' % (old_var_name, var_name))
return var_name
def _split_sharded_vars(name_shape_map):
"""Split shareded variables.
Args:
name_shape_map: A dict from variable name to variable shape.
Returns:
not_sharded: Names of the non-sharded variables.
sharded: Names of the sharded variables.
"""
sharded = []
not_sharded = []
for name in name_shape_map:
if re.match(name, '_[0-9]+$'):
if re.sub('_[0-9]+$', '_1', name) in name_shape_map:
sharded.append(name)
else:
not_sharded.append(name)
else:
not_sharded.append(name)
return not_sharded, sharded
def convert_names(checkpoint_from_path,
checkpoint_to_path,
write_v1_checkpoint=False):
"""Migrates the names of variables within a checkpoint.
Args:
checkpoint_from_path: Path to source checkpoint to be read in.
checkpoint_to_path: Path to checkpoint to be written out.
write_v1_checkpoint: Whether the output checkpoint will be in V1 format.
Returns:
A dictionary that maps the new variable names to the Variable objects.
A dictionary that maps the old variable names to the new variable names.
"""
with ops.Graph().as_default():
logging.info('Reading checkpoint_from_path %s' % checkpoint_from_path)
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_from_path)
name_shape_map = reader.get_variable_to_shape_map()
not_sharded, sharded = _split_sharded_vars(name_shape_map)
new_variable_map = {}
conversion_map = {}
for var_name in not_sharded:
new_var_name = _rnn_name_replacement(var_name)
tensor = reader.get_tensor(var_name)
var = variables.Variable(tensor, name=var_name)
new_variable_map[new_var_name] = var
if new_var_name != var_name:
conversion_map[var_name] = new_var_name
for var_name in sharded:
new_var_name = _rnn_name_replacement_sharded(var_name)
var = variables.Variable(tensor, name=var_name)
new_variable_map[new_var_name] = var
if new_var_name != var_name:
conversion_map[var_name] = new_var_name
write_version = (saver_pb2.SaverDef.V1
if write_v1_checkpoint else saver_pb2.SaverDef.V2)
saver = saver_lib.Saver(new_variable_map, write_version=write_version)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
logging.info('Writing checkpoint_to_path %s' % checkpoint_to_path)
saver.save(sess, checkpoint_to_path)
logging.info('Summary:')
logging.info(' Converted %d variable name(s).' % len(new_variable_map))
return new_variable_map, conversion_map
def main(_):
convert_names(
FLAGS.checkpoint_from_path,
FLAGS.checkpoint_to_path,
write_v1_checkpoint=FLAGS.write_v1_checkpoint)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument('checkpoint_from_path', type=str,
help='Path to source checkpoint to be read in.')
parser.add_argument('checkpoint_to_path', type=str,
help='Path to checkpoint to be written out.')
parser.add_argument('--write_v1_checkpoint', action='store_true',
help='Write v1 checkpoint')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/rnn/python/tools/checkpoint_convert.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for checkpoint converter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import tempfile
from tensorflow.contrib.rnn.python.tools import checkpoint_convert
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
class CheckpointConvertTest(test.TestCase):
def setUp(self):
self._old_ckpt_path = tempfile.mktemp()
self._new_ckpt_path = tempfile.mktemp()
ops.reset_default_graph()
def tearDown(self):
for file_name in glob.glob(self._old_ckpt_path + "*"):
os.remove(file_name)
for file_name in glob.glob(self._new_ckpt_path + "*"):
os.remove(file_name)
def testReplacementDictsContainUniqueAndNonEmptyVariableNames(self):
for old_name in checkpoint_convert.RNN_NAME_REPLACEMENTS:
new_name = checkpoint_convert.RNN_NAME_REPLACEMENTS[old_name]
self.assertTrue(old_name)
self.assertTrue(new_name)
self.assertNotEqual(old_name, new_name)
for old_name in checkpoint_convert._RNN_SHARDED_NAME_REPLACEMENTS:
new_name = checkpoint_convert._RNN_SHARDED_NAME_REPLACEMENTS[old_name]
self.assertTrue(old_name)
self.assertTrue(new_name)
self.assertNotEqual(old_name, new_name)
def testConversionFromV2WithConvertedVariableNamesSucceeds(self):
variables.Variable(10.0, name="a")
for old_name in checkpoint_convert.RNN_NAME_REPLACEMENTS:
variables.Variable(20.0, name=old_name)
with session.Session() as sess:
saver = saver_lib.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, self._old_ckpt_path)
new_var_map, conversion_map = checkpoint_convert.convert_names(
self._old_ckpt_path, self._new_ckpt_path)
self.assertTrue(glob.glob(self._new_ckpt_path + "*"))
self.assertItemsEqual(
set(checkpoint_convert.RNN_NAME_REPLACEMENTS.values()).union(["a"]),
new_var_map.keys())
self.assertEqual(checkpoint_convert.RNN_NAME_REPLACEMENTS, conversion_map)
def testConversionFromV2WithoutConvertedVariableNamesSucceeds(self):
variables.Variable(10.0, name="a")
with session.Session() as sess:
saver = saver_lib.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, self._old_ckpt_path)
new_var_map, conversion_map = checkpoint_convert.convert_names(
self._old_ckpt_path, self._new_ckpt_path)
self.assertItemsEqual(["a"], new_var_map.keys())
self.assertFalse(conversion_map)
def testConversionToV1Succeeds(self):
variables.Variable(10.0, name="a")
variables.Variable(
20.0, name=list(checkpoint_convert.RNN_NAME_REPLACEMENTS.keys())[-1])
with session.Session() as sess:
saver = saver_lib.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, self._old_ckpt_path)
new_var_map, conversion_map = checkpoint_convert.convert_names(
self._old_ckpt_path, self._new_ckpt_path, write_v1_checkpoint=True)
self.assertItemsEqual(
["a", list(checkpoint_convert.RNN_NAME_REPLACEMENTS.values())[-1]],
new_var_map.keys())
self.assertEqual(
{list(checkpoint_convert.RNN_NAME_REPLACEMENTS.keys())[-1]:
list(checkpoint_convert.RNN_NAME_REPLACEMENTS.values())[-1]},
conversion_map)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/rnn/python/tools/checkpoint_convert_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Block GRU module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.rnn.python.kernel_tests import benchmarking
from tensorflow.contrib.rnn.python.ops import gru_ops
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class GRUBlockCellTest(test.TestCase):
def testNoneDimsWithDynamicRNN(self):
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 4
cell_size = 5
input_size = 6
num_steps = 7
cell = gru_ops.GRUBlockCell(cell_size)
x = array_ops.placeholder(dtypes.float32, shape=(None, None, input_size))
_, output = rnn.dynamic_rnn(
cell, x, time_major=True, dtype=dtypes.float32)
sess.run(variables.global_variables_initializer())
feed = {}
feed[x] = np.random.randn(num_steps, batch_size, input_size)
sess.run(output, feed)
def testBlockGRUToGRUCellSingleStep(self):
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 4
cell_size = 5
input_size = 6
seed = 1994
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed)
# Inputs
x = array_ops.zeros([batch_size, input_size])
h = array_ops.zeros([batch_size, cell_size])
# Values for the inputs.
x_value = np.random.rand(batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
output = rnn_cell.GRUCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([output], {x: x_value, h: h_value})
# Output from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
block_res = sess.run([output], {x: x_value, h: h_value})
self.assertEqual(len(block_res), len(basic_res))
for block, basic in zip(block_res, basic_res):
self.assertAllClose(block, basic)
def testBlockGRUToGRUCellMultiStep(self):
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 2
cell_size = 3
input_size = 3
time_steps = 4
# Random initializers.
seed = 1994
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
h = array_ops.zeros([batch_size, cell_size])
# Values for the inputs.
x_values = np.random.rand(time_steps, batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Output from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
feeds = {concat_x: x_values, h: h_value}
sess.run([variables.global_variables_initializer()])
block_res = sess.run([outputs_dynamic, state_dynamic], feeds)
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
cell = rnn_cell.GRUCell(cell_size)
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
feeds = {concat_x: x_values, h: h_value}
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([outputs_dynamic, state_dynamic], feeds)
# Check the lengths of the outputs_dynamic, and states.
self.assertEqual(len(block_res), len(basic_res))
self.assertEqual(len(block_res[0]), len(basic_res[0]))
self.assertEqual(len(block_res[1]), len(basic_res[1]))
# Check the outputs_dynamic values.
for block_output, basic_output in zip(block_res[0], basic_res[0]):
self.assertAllClose(block_output, basic_output)
# Check the state_dynamic value.
self.assertAllClose(block_res[1], block_res[1])
def testDerivativeOfBlockGRUToGRUCellSingleStep(self):
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 2
cell_size = 3
input_size = 4
seed = 1994
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed)
np.random.seed(seed)
# Inputs
x = array_ops.zeros([batch_size, input_size])
h = array_ops.zeros([batch_size, cell_size])
# Values for the inputs.
x_value = np.random.rand(batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Gradients from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
all_variables = variables.global_variables()[0:4]
[w_ru, b_ru, w_c, b_c] = all_variables
d_new_h_wrt_x = gradients_impl.gradients([output], x)
d_new_h_wrt_h = gradients_impl.gradients([output], h)
d_new_h_wrt_w_ru = gradients_impl.gradients([output], w_ru)
d_new_h_wrt_w_c = gradients_impl.gradients([output], w_c)
d_new_h_wrt_b_ru = gradients_impl.gradients([output], b_ru)
d_new_h_wrt_b_c = gradients_impl.gradients([output], b_c)
d_block_res = sess.run([
d_new_h_wrt_x, d_new_h_wrt_h, d_new_h_wrt_w_ru, d_new_h_wrt_w_c,
d_new_h_wrt_b_ru, d_new_h_wrt_b_c
], {x: x_value,
h: h_value})
# Gradients from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
output = rnn_cell.GRUCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
all_variables = variables.global_variables()[4:8]
[w_ru, b_ru, w_c, b_c] = all_variables
d_new_h_wrt_x = gradients_impl.gradients([output], x)
d_new_h_wrt_h = gradients_impl.gradients([output], h)
d_new_h_wrt_w_ru = gradients_impl.gradients([output], w_ru)
d_new_h_wrt_w_c = gradients_impl.gradients([output], w_c)
d_new_h_wrt_b_ru = gradients_impl.gradients([output], b_ru)
d_new_h_wrt_b_c = gradients_impl.gradients([output], b_c)
d_basic_res = sess.run([
d_new_h_wrt_x, d_new_h_wrt_h, d_new_h_wrt_w_ru, d_new_h_wrt_w_c,
d_new_h_wrt_b_ru, d_new_h_wrt_b_c
], {x: x_value,
h: h_value})
# Check lengths of derivative results.
self.assertEqual(len(d_block_res), len(d_basic_res))
# Check the value of every derivative result.
for block, basic in zip(d_block_res, d_basic_res):
self.assertAllClose(block, basic)
def testDerivativeOfBlockGRUToGRUCellMultiSteps(self):
batch_size = 2
cell_size = 3
input_size = 4
time_steps = 2
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
# Random initializers.
seed = 1994
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
h = array_ops.zeros([batch_size, cell_size])
# Values for the inputs.
x_values = np.random.rand(time_steps, batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
feeds = {concat_x: x_values, h: h_value}
# Gradients from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
grad_output_wrt_x = gradients_impl.gradients([outputs_dynamic[0]],
concat_x)
grad_output_wrt_h = gradients_impl.gradients([outputs_dynamic[0]], h)
sess.run([variables.global_variables_initializer()])
block_grad_res_x, block_grad_res_h = sess.run(
[grad_output_wrt_x, grad_output_wrt_h], feeds)
# Gradients from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
cell = rnn_cell.GRUCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
grad_output_wrt_x = gradients_impl.gradients([outputs_dynamic[0]],
concat_x)
grad_output_wrt_h = gradients_impl.gradients([outputs_dynamic[0]], h)
sess.run([variables.global_variables_initializer()])
basic_grad_res_x, basic_grad_res_h = sess.run(
[grad_output_wrt_x, grad_output_wrt_h], feeds)
# Check derivatives values of the outputs wrt to x.
self.assertEqual(len(block_grad_res_x), len(basic_grad_res_x))
# Check derivatives values of the outputs wrt to h.
for block, basic in zip(block_grad_res_x, basic_grad_res_x):
self.assertAllClose(block, basic)
# Check derivatives values of the outputs wrt to x.
self.assertEqual(len(block_grad_res_h), len(basic_grad_res_h))
# Check derivatives values of the outputs wrt to h.
for block, basic in zip(block_grad_res_h, basic_grad_res_h):
self.assertAllClose(block, basic)
def testGradient(self):
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 1
cell_size = 3
input_size = 2
# Inputs
x = array_ops.zeros([batch_size, input_size])
h = array_ops.zeros([batch_size, cell_size])
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
all_variables = variables.global_variables()
[w_ru, b_ru, w_c, b_c] = all_variables[:4]
error_x = gradient_checker.compute_gradient_error(
x, (batch_size, input_size), output[0], (batch_size, cell_size))
error_h = gradient_checker.compute_gradient_error(h,
(batch_size, cell_size),
output[0],
(batch_size, cell_size))
error_w_ru = gradient_checker.compute_gradient_error(
w_ru, (input_size + cell_size, 2 * cell_size), output[0],
(batch_size, cell_size))
error_w_c = gradient_checker.compute_gradient_error(
w_c, (input_size + cell_size, cell_size), output[0],
(batch_size, cell_size))
error_b_ru = gradient_checker.compute_gradient_error(
b_ru, (2 * cell_size,), output[0], (batch_size, cell_size))
error_b_c = gradient_checker.compute_gradient_error(
b_c, (cell_size,), output[0], (batch_size, cell_size))
eps = 1e-4
self.assertLess(error_x, eps)
self.assertLess(error_h, eps)
self.assertLess(error_w_ru, eps)
self.assertLess(error_w_c, eps)
self.assertLess(error_b_ru, eps)
self.assertLess(error_b_c, eps)
#### Benchmarking GRUBlockCell vs GRUCell.
def training_gru_block_vs_gru_cell(batch_size,
cell_size,
input_size,
time_steps,
use_gpu=False,
iters=30):
"""Benchmark training speed between GRUBlockCell vs GRUCell."""
ops.reset_default_graph()
with session.Session(graph=ops.Graph()) as sess:
# Specify the device which is been used.
with benchmarking.device(use_gpu):
# Random initializers.
seed = 1994
initializer = init_ops.random_uniform_initializer(-1, 1, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = vs.get_variable("concat_x",
[time_steps, batch_size, input_size])
h = vs.get_variable("h", [batch_size, cell_size])
y = vs.get_variable("y", [time_steps, batch_size, cell_size])
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
cell = rnn_cell.GRUCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
cost = math_ops.reduce_mean(math_ops.square(outputs_dynamic - y))
learning_rate = 0.01
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate).minimize(cost)
# time for a training step.
basic_time_training = benchmarking.seconds_per_run(
optimizer, sess, iters)
# Output from the basic GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
cost = math_ops.reduce_mean(math_ops.square(outputs_dynamic - y))
learning_rate = 0.01
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate).minimize(cost)
# time for a training step.
block_time_training = benchmarking.seconds_per_run(
optimizer, sess, iters)
performance_training = (
basic_time_training - block_time_training) * 100 / basic_time_training
print(",".join([
str(batch_size), str(cell_size), str(input_size), str(time_steps), str(
use_gpu), str(basic_time_training), str(block_time_training), str(
performance_training)
]))
return basic_time_training, block_time_training
def inference_gru_block_vs_gru_cell(batch_size,
cell_size,
input_size,
time_steps,
use_gpu=False,
iters=30):
"""Benchmark inference speed between GRUBlockCell vs GRUCell."""
ops.reset_default_graph()
with session.Session(graph=ops.Graph()) as sess:
with benchmarking.device(use_gpu):
# Random initializers.
seed = 1994
initializer = init_ops.random_uniform_initializer(-1, 1, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = vs.get_variable("concat_x",
[time_steps, batch_size, input_size])
h = vs.get_variable("h", [batch_size, cell_size])
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
cell = rnn_cell.GRUCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
basic_time_inference = benchmarking.seconds_per_run(
outputs_dynamic, sess, iters)
# Output from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
block_time_inference = benchmarking.seconds_per_run(
outputs_dynamic, sess, iters)
performance_inference = (basic_time_inference - block_time_inference
) * 100 / basic_time_inference
print(",".join([
str(batch_size), str(cell_size), str(input_size), str(time_steps), str(
use_gpu), str(basic_time_inference), str(block_time_inference), str(
performance_inference)
]))
return basic_time_inference, block_time_inference
def single_bprop_step_gru_block_vs_gru_cell(batch_size,
cell_size,
input_size,
use_gpu=False,
iters=30):
"""Benchmark single bprop step speed between GRUBlockCell vs GRUCell."""
ops.reset_default_graph()
with session.Session(graph=ops.Graph()) as sess:
with benchmarking.device(use_gpu):
initializer = init_ops.random_uniform_initializer(-1, 1, seed=1989)
# Inputs
x = vs.get_variable("x", [batch_size, input_size])
h = vs.get_variable("h", [batch_size, cell_size])
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
output = rnn_cell.GRUCell(cell_size)(array_ops.identity(x),
array_ops.identity(h))
sess.run([variables.global_variables_initializer()])
grad_output_wrt_input = gradients_impl.gradients([output], h)
basic_time_bprop = benchmarking.seconds_per_run(grad_output_wrt_input,
sess, iters)
# Output from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(array_ops.identity(x),
array_ops.identity(h))
sess.run([variables.global_variables_initializer()])
grad_output_wrt_input = gradients_impl.gradients([output], h)
block_time_bprop = benchmarking.seconds_per_run(grad_output_wrt_input,
sess, iters)
performance_inference = (
basic_time_bprop - block_time_bprop) * 100 / basic_time_bprop
print(",".join([
str(batch_size), str(cell_size), str(input_size), str(use_gpu), str(
basic_time_bprop), str(block_time_bprop), str(performance_inference)
]))
return basic_time_bprop, block_time_bprop
class BenchmarkGRUBlock(test.Benchmark):
def benchmarkTrainingBlockGRUVsGRUCell(self):
print("Comparison GRUBlockCell vs GRUCell")
print("--------------------------------------------------------------")
print("Training speed GRUBlockCell vs GRUCell")
print("batch_size, cell_size, input_size, time_steps, GPU, "
"basic_time_training, block_time_training, performance_training[%]")
iters = 10
for config in benchmarking.dict_product({
"use_gpu": [True, False],
"batch_size": [1, 32, 128],
"cell_size": [128, 512],
"input_size": [128, 512],
"time_steps": [50]
}):
basic_time, block_time = training_gru_block_vs_gru_cell(
config["batch_size"], config["cell_size"], config["input_size"],
config["time_steps"], config["use_gpu"], iters)
self.report_benchmark(
name="GRUCell_training_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" %
(config["batch_size"], config["cell_size"], config["input_size"],
config["time_steps"], config["use_gpu"]),
iters=iters,
wall_time=basic_time)
self.report_benchmark(
name="GRUBlockCell_training_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" %
(config["batch_size"], config["cell_size"], config["input_size"],
config["time_steps"], config["use_gpu"]),
iters=iters,
wall_time=block_time)
def benchmarkInferenceBlockGRUVsGRUCell(self):
print("--------------------------------------------------------------")
print("Inference speed GRUBlockCell vs GRUCell")
print(
"batch_size, cell_size, input_size, time_steps, GPU, "
"basic_time_inference, block_time_inference, performance_inference[%]")
iters = 10
for config in benchmarking.dict_product({
"use_gpu": [True, False],
"batch_size": [1, 32, 128],
"cell_size": [128, 512],
"input_size": [128, 512],
"time_steps": [50]
}):
basic_time, block_time = inference_gru_block_vs_gru_cell(
config["batch_size"], config["cell_size"], config["input_size"],
config["time_steps"], config["use_gpu"], iters)
self.report_benchmark(
name="GRUCell_inference_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" %
(config["batch_size"], config["cell_size"], config["input_size"],
config["time_steps"], config["use_gpu"]),
iters=iters,
wall_time=basic_time)
self.report_benchmark(
name="GRUBlockCell_inference_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" %
(config["batch_size"], config["cell_size"], config["input_size"],
config["time_steps"], config["use_gpu"]),
iters=iters,
wall_time=block_time)
def benchmarkSingleBpropStepBlockGRUVsGRUCell(self):
print("--------------------------------------------------------------")
print("Single bprop step speed GRUBlockCell vs GRUCell")
print("batch_size, cell_size, input_size, GPU, basic_time, "
"block_time, performance_inference[%]")
iters = 10
for config in benchmarking.dict_product({
"use_gpu": [True, False],
"batch_size": [1, 32, 128],
"cell_size": [128, 512],
"input_size": [128, 512]
}):
basic_time, block_time = single_bprop_step_gru_block_vs_gru_cell(
config["batch_size"], config["cell_size"], config["input_size"],
config["use_gpu"], iters)
self.report_benchmark(
name="GRUCell_Bprop_single_step_time_BS%i_CS%i_IS%i_gpu_%s" %
(config["batch_size"], config["cell_size"], config["input_size"],
config["use_gpu"]),
iters=iters,
wall_time=basic_time)
self.report_benchmark(
name="GRUBlockCell_Bprop_single_step_time_BS%i_CS%i_IS%i_gpu_%s" %
(config["batch_size"], config["cell_size"], config["input_size"],
config["use_gpu"]),
iters=iters,
wall_time=block_time)
print("--------------------------------------------------------------")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/rnn/python/kernel_tests/gru_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.rnn.python.kernel_tests import benchmarking
from tensorflow.contrib.rnn.python.ops import lstm_ops
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_bitwise_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
block_lstm = lstm_ops._block_lstm # pylint: disable=protected-access
class _MaskedRandomUniformInitializer(init_ops.RandomUniform):
"""Initializer for uniform dist tensors with trailing bits zeroed-out.
Allow returning tensors with last few mantissa bits set to 0. This potentially
helps avoid getting into precision issues when testing low precision (float16)
computation.
"""
def __init__(self,
minval=0,
maxval=None,
seed=None,
dtype=dtypes.float16,
num_valid_mantissa_bits=4):
"""Constructor.
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range of
random values to generate.
maxval: A python scalar or a scalar tensor. Upper bound of the range of
random values to generate. Defaults to 1 for float types.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
dtype: The data type. Only supports tf.float16 for now.
num_valid_mantissa_bits: number of non-zero mantissa bits, default to 4.
Raises:
ValueError: An error if `dtype` is not tf.float16.
"""
if dtype not in (dtypes.float16,):
raise ValueError("dtype: %s not supported" % dtype.name)
super(_MaskedRandomUniformInitializer, self).__init__(
minval=minval, maxval=maxval, seed=seed, dtype=dtype)
self._num_mantissa_bits = 10
self._num_valid_mantissa_bits = num_valid_mantissa_bits
def __call__(self, shape, dtype=dtypes.float16, partition_info=None):
if dtype and dtype != dtypes.float16:
raise ValueError("dtype: %s not supported" % dtype.name)
res = super(_MaskedRandomUniformInitializer, self).__call__(
shape, dtype, partition_info)
# get uint16 view of the underlying buffer.
res = gen_array_ops.bitcast(res, dtypes.uint16)
# mask the last `shift` mantissa bits.
shift = self._num_mantissa_bits - self._num_valid_mantissa_bits
mask = (0xffff >> shift) << shift
res = gen_bitwise_ops.bitwise_and(res, mask)
# restore float16 view.
return gen_array_ops.bitcast(res, dtype)
def _get_initializer(init_bound, dtype, seed):
if dtype == dtypes.float16:
return _MaskedRandomUniformInitializer(
-init_bound, init_bound, dtype=dtype, seed=seed)
else:
return init_ops.random_uniform_initializer(
-init_bound, init_bound, dtype=dtype, seed=seed)
def blocks_match(sess, use_peephole, dtype=dtypes.float32, cell_clip=None):
batch_size = 2
input_size = 3
cell_size = 4
sequence_length = 4
inputs = []
for _ in range(sequence_length):
inp = ops.convert_to_tensor(
np.random.randn(batch_size, input_size), dtype=dtype)
inputs.append(inp)
stacked_inputs = array_ops.stack(inputs)
init_bound = 1e-1 if dtype == dtypes.float16 else 1e-2
initializer = _get_initializer(init_bound, dtype=dtype, seed=19890212)
with variable_scope.variable_scope("test", initializer=initializer):
# magic naming so that the cells pick up these variables and reuse them
if use_peephole:
wci = variable_scope.get_variable(
"rnn/lstm_cell/w_i_diag", shape=[cell_size], dtype=dtype)
wcf = variable_scope.get_variable(
"rnn/lstm_cell/w_f_diag", shape=[cell_size], dtype=dtype)
wco = variable_scope.get_variable(
"rnn/lstm_cell/w_o_diag", shape=[cell_size], dtype=dtype)
w = variable_scope.get_variable(
"rnn/lstm_cell/kernel",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtype)
b = variable_scope.get_variable(
"rnn/lstm_cell/bias",
shape=[cell_size * 4],
dtype=dtype,
initializer=init_ops.zeros_initializer())
basic_cell = rnn_cell.LSTMCell(
cell_size,
use_peepholes=use_peephole,
cell_clip=cell_clip,
dtype=dtype,
state_is_tuple=True,
reuse=True)
basic_outputs_op, basic_state_op = rnn.static_rnn(
basic_cell, inputs, dtype=dtype)
if use_peephole:
_, _, _, _, _, _, block_outputs_op = block_lstm(
ops.convert_to_tensor(sequence_length, dtype=dtypes.int64),
inputs,
w,
b,
wci=wci,
wcf=wcf,
wco=wco,
cell_clip=cell_clip,
use_peephole=True)
else:
_, _, _, _, _, _, block_outputs_op = block_lstm(
ops.convert_to_tensor(sequence_length, dtype=dtypes.int64),
inputs,
w,
b,
cell_clip=cell_clip)
fused_cell = lstm_ops.LSTMBlockFusedCell(
cell_size,
cell_clip=cell_clip,
use_peephole=use_peephole,
reuse=True,
name="rnn/lstm_cell")
fused_outputs_op, fused_state_op = fused_cell(stacked_inputs, dtype=dtype)
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_state = sess.run([basic_outputs_op, basic_state_op[0]])
basic_grads = sess.run(gradients_impl.gradients(basic_outputs_op, inputs))
xs = [w, b]
if use_peephole:
xs += [wci, wcf, wco]
basic_wgrads = sess.run(gradients_impl.gradients(basic_outputs_op, xs))
block_outputs = sess.run(block_outputs_op)
block_grads = sess.run(gradients_impl.gradients(block_outputs_op, inputs))
block_wgrads = sess.run(gradients_impl.gradients(block_outputs_op, xs))
xs = [w, b]
if use_peephole:
xs += [wci, wcf, wco]
fused_outputs, fused_state = sess.run([fused_outputs_op, fused_state_op[0]])
fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs))
fused_wgrads = sess.run(gradients_impl.gradients(fused_outputs_op, xs))
return (basic_state, fused_state, basic_outputs, block_outputs,
fused_outputs, basic_grads, block_grads, fused_grads, basic_wgrads,
block_wgrads, fused_wgrads)
class LSTMBlockCellTest(test.TestCase, parameterized.TestCase):
TEST_CASES = ({
"testcase_name": "Fp32",
"dtype": dtypes.float32,
"rtol": 1e-6,
"atol": 1e-6
}, {
"testcase_name": "Fp16",
"dtype": dtypes.float16,
"rtol": 8e-3,
"atol": 8e-4
})
def testNoneDimsWithDynamicRNN(self):
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 4
num_steps = 5
input_dim = 6
cell_size = 7
cell = lstm_ops.LSTMBlockCell(cell_size)
x = array_ops.placeholder(dtypes.float32, shape=(None, None, input_dim))
output, _ = rnn.dynamic_rnn(
cell, x, time_major=True, dtype=dtypes.float32)
sess.run(variables.global_variables_initializer())
feed = {}
feed[x] = np.random.randn(num_steps, batch_size, input_dim)
sess.run(output, feed)
def testLSTMBlockCell(self):
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2)
for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 2]),
m1.name: 0.1 * np.ones([1, 2]),
m2.name: 0.1 * np.ones([1, 2]),
m3.name: 0.1 * np.ones([1, 2])
})
self.assertEqual(len(res), 5)
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
# These numbers are from testBasicLSTMCell and only test c/h.
self.assertAllClose(res[1], [[0.68967271, 0.68967271]])
self.assertAllClose(res[2], [[0.44848421, 0.44848421]])
self.assertAllClose(res[3], [[0.39897051, 0.39897051]])
self.assertAllClose(res[4], [[0.24024698, 0.24024698]])
def testCompatibleNames(self):
with self.session(use_gpu=True, graph=ops.Graph()):
cell = rnn_cell.LSTMCell(10)
pcell = rnn_cell.LSTMCell(10, use_peepholes=True)
inputs = [array_ops.zeros([4, 5])] * 6
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope="basic")
rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope="peephole")
basic_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
with self.session(use_gpu=True, graph=ops.Graph()):
cell = lstm_ops.LSTMBlockCell(10)
pcell = lstm_ops.LSTMBlockCell(10, use_peephole=True)
inputs = [array_ops.zeros([4, 5])] * 6
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope="basic")
rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope="peephole")
block_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
with self.session(use_gpu=True, graph=ops.Graph()):
cell = lstm_ops.LSTMBlockFusedCell(10)
pcell = lstm_ops.LSTMBlockFusedCell(10, use_peephole=True)
inputs = array_ops.stack([array_ops.zeros([4, 5])] * 6)
cell(inputs, dtype=dtypes.float32, scope="basic/lstm_cell")
pcell(inputs, dtype=dtypes.float32, scope="peephole/lstm_cell")
fused_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
self.assertEqual(basic_names, block_names)
self.assertEqual(basic_names, fused_names)
def testLSTMBasicToBlockCell(self):
with self.session(use_gpu=True) as sess:
x = array_ops.zeros([1, 2])
x_values = np.random.randn(1, 2)
m0_val = 0.1 * np.ones([1, 2])
m1_val = -0.1 * np.ones([1, 2])
m2_val = -0.2 * np.ones([1, 2])
m3_val = 0.2 * np.ones([1, 2])
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("basic", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[rnn_cell.BasicLSTMCell(2, state_is_tuple=True) for _ in range(2)],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
with variable_scope.variable_scope("block", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2)
for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
self.assertEqual(len(basic_res), len(block_res))
for basic, block in zip(basic_res, block_res):
self.assertAllClose(basic, block)
def testLSTMBasicToBlockCellPeeping(self):
with self.session(use_gpu=True) as sess:
x = array_ops.zeros([1, 2])
x_values = np.random.randn(1, 2)
m0_val = 0.1 * np.ones([1, 2])
m1_val = -0.1 * np.ones([1, 2])
m2_val = -0.2 * np.ones([1, 2])
m3_val = 0.2 * np.ones([1, 2])
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("basic", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[
rnn_cell.LSTMCell(2, use_peepholes=True, state_is_tuple=True)
for _ in range(2)
],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
with variable_scope.variable_scope("block", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2, use_peephole=True) for _ in range(2)],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
self.assertEqual(len(basic_res), len(block_res))
for basic, block in zip(basic_res, block_res):
self.assertAllClose(basic, block)
def LSTMBasicToBlockTestHelper(self,
dtype=dtypes.float32,
use_peephole=False,
cell_clip=None,
rtol=1e-6,
atol=1e-6):
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
(basic_state, fused_state, basic_outputs, block_outputs, fused_outputs,
basic_grads, block_grads, fused_grads, basic_wgrads, block_wgrads,
fused_wgrads) = blocks_match(
sess, use_peephole=use_peephole, dtype=dtype, cell_clip=cell_clip)
self.assertAllClose(basic_outputs, block_outputs, rtol=rtol, atol=atol)
self.assertAllClose(basic_grads, block_grads, rtol=rtol, atol=atol)
for basic, block in zip(basic_wgrads, block_wgrads):
self.assertAllClose(basic, block, rtol=rtol, atol=atol)
self.assertAllClose(basic_outputs, fused_outputs, rtol=rtol, atol=atol)
self.assertAllClose(basic_state, fused_state, rtol=rtol, atol=atol)
self.assertAllClose(basic_grads, fused_grads, rtol=rtol, atol=atol)
for basic, fused in zip(basic_wgrads, fused_wgrads):
self.assertAllClose(basic, fused, rtol=rtol, atol=atol)
@parameterized.named_parameters(*TEST_CASES)
def testLSTMBasicToBlock(self, dtype, rtol, atol):
self.LSTMBasicToBlockTestHelper(
dtype, use_peephole=False, rtol=rtol, atol=atol)
@parameterized.named_parameters(*TEST_CASES)
def testLSTMBasicToBlockPeeping(self, dtype, rtol, atol):
self.LSTMBasicToBlockTestHelper(
dtype, use_peephole=True, rtol=rtol, atol=atol)
@parameterized.named_parameters(*TEST_CASES)
def testLSTMBasicToBlockCellClip(self, dtype, rtol, atol):
self.LSTMBasicToBlockTestHelper(
dtype, use_peephole=True, cell_clip=0.5, rtol=rtol, atol=atol)
def testLSTMFusedSequenceLengths(self):
"""Verify proper support for sequence lengths in LSTMBlockFusedCell."""
with self.session(use_gpu=True) as sess:
batch_size = 3
input_size = 4
cell_size = 5
max_sequence_length = 6
inputs = []
for _ in range(max_sequence_length):
inp = ops.convert_to_tensor(
np.random.randn(batch_size, input_size), dtype=dtypes.float32)
inputs.append(inp)
seq_lengths = constant_op.constant([3, 4, 5])
cell_inputs = array_ops.stack(inputs)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890213)
with variable_scope.variable_scope("lstm_cell", initializer=initializer):
# magic naming so that the cells pick up these variables and reuse them
variable_scope.get_variable(
"kernel",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtypes.float32)
variable_scope.get_variable(
"bias",
shape=[cell_size * 4],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer())
cell = lstm_ops.LSTMBlockFusedCell(
cell_size, cell_clip=0, use_peephole=False, reuse=True,
name="lstm_cell")
fused_outputs_op, fused_state_op = cell(
cell_inputs, dtype=dtypes.float32, sequence_length=seq_lengths)
cell_vars = [
v for v in variables.trainable_variables()
if v.name.endswith("kernel") or v.name.endswith("bias")
]
# Verify that state propagation works if we turn our sequence into
# tiny (single-time) subsequences, i.e. unfuse the cell
unfused_outputs_op = []
state = None
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=True):
for i, inp in enumerate(inputs):
lengths = [int(i < l) for l in seq_lengths.eval()]
output, state = cell(
array_ops.expand_dims(inp, 0),
initial_state=state,
dtype=dtypes.float32,
sequence_length=lengths)
unfused_outputs_op.append(output[0])
unfused_outputs_op = array_ops.stack(unfused_outputs_op)
sess.run([variables.global_variables_initializer()])
unfused_outputs, unfused_state = sess.run([unfused_outputs_op, state[0]])
unfused_grads = sess.run(
gradients_impl.gradients(unfused_outputs_op, inputs))
unfused_wgrads = sess.run(
gradients_impl.gradients(unfused_outputs_op, cell_vars))
fused_outputs, fused_state = sess.run(
[fused_outputs_op, fused_state_op[0]])
fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs))
fused_wgrads = sess.run(
gradients_impl.gradients(fused_outputs_op, cell_vars))
self.assertAllClose(fused_outputs, unfused_outputs)
self.assertAllClose(fused_state, unfused_state)
self.assertAllClose(fused_grads, unfused_grads)
for fused, unfused in zip(fused_wgrads, unfused_wgrads):
self.assertAllClose(fused, unfused, rtol=1e-6, atol=1e-6)
#### Benchmarking.
class BenchmarkLSTMBlock(test.Benchmark):
def benchmarkLSTMBlockCellFpropWithDynamicRNN(self):
print("BlockLSTMCell forward propagation via dynamic_rnn().")
print("--------------------------------------------------------------")
print("LSTMBlockCell Seconds per inference.")
print("batch_size,cell_size,input_size,time_steps,use_gpu,wall_time")
iters = 10
for config in benchmarking.dict_product({
"batch_size": [1, 8, 13, 32, 67, 128],
"cell_size": [128, 250, 512, 650, 1024, 1350],
"time_steps": [40],
"use_gpu": [True, False],
"dtype": ["float32", "float16"],
}):
dtype = dtypes.float32 if config["dtype"] == "float32" else dtypes.float16
with ops.Graph().as_default():
with benchmarking.device(use_gpu=config["use_gpu"]):
inputs = variable_scope.get_variable(
"x",
dtype=dtype,
shape=[
config["time_steps"], config["batch_size"],
config["cell_size"]
])
cell = lstm_ops.LSTMBlockCell(config["cell_size"], dtype=dtype)
outputs = rnn.dynamic_rnn(cell, inputs, time_major=True, dtype=dtype)
init_op = variables.global_variables_initializer()
with session.Session() as sess:
sess.run(init_op)
wall_time = benchmarking.seconds_per_run(outputs, sess, iters)
# Print to stdout. If the TEST_REPORT_FILE_PREFIX environment variable
# is set, this will produce a copy-paste-able CSV file.
print(",".join(
map(str, [
config["dtype"], config["batch_size"], config["cell_size"],
config["cell_size"], config["time_steps"], config["use_gpu"],
wall_time
])))
benchmark_name_template = "_".join([
"LSTMBlockCell_fprop", "DT_%(dtype)s", "BS%(batch_size)i",
"CS%(cell_size)i", "IS%(cell_size)i", "TS%(time_steps)i",
"gpu_%(use_gpu)s"
])
self.report_benchmark(
name=benchmark_name_template % config,
iters=iters,
wall_time=wall_time,
extras=config)
def benchmarkLSTMBlockCellBpropWithDynamicRNN(self):
print("BlockLSTMCell backward propagation via dynamic_rnn().")
print("--------------------------------------------------------------")
print("LSTMBlockCell Seconds per inference.")
print("batch_size,cell_size,input_size,time_steps,use_gpu,wall_time")
iters = 10
for config in benchmarking.dict_product({
"batch_size": [1, 8, 13, 32, 67, 128],
"cell_size": [128, 250, 512, 650, 1024, 1350],
"time_steps": [40],
"use_gpu": [True, False],
"dtype": ["float32", "float16"],
}):
dtype = dtypes.float32 if config["dtype"] == "float32" else dtypes.float16
with ops.Graph().as_default():
with benchmarking.device(use_gpu=config["use_gpu"]):
time_steps = config["time_steps"]
batch_size = config["batch_size"]
cell_size = input_size = config["cell_size"]
inputs = variable_scope.get_variable(
"x", [time_steps, batch_size, cell_size],
trainable=False,
dtype=dtype)
with variable_scope.variable_scope(
"rnn", reuse=variable_scope.AUTO_REUSE):
w = variable_scope.get_variable(
"rnn/lstm_cell/kernel",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtype)
b = variable_scope.get_variable(
"rnn/lstm_cell/bias",
shape=[cell_size * 4],
dtype=dtype,
initializer=init_ops.zeros_initializer())
cell = lstm_ops.LSTMBlockCell(cell_size, dtype=dtype)
outputs = rnn.dynamic_rnn(
cell, inputs, time_major=True, dtype=dtype)
grads = gradients_impl.gradients(outputs, [inputs, w, b])
init_op = variables.global_variables_initializer()
with session.Session() as sess:
sess.run(init_op)
wall_time = benchmarking.seconds_per_run(grads, sess, iters)
# Print to stdout. If the TEST_REPORT_FILE_PREFIX environment variable
# is set, this will produce a copy-paste-able CSV file.
print(",".join(
map(str, [
config["dtype"], batch_size, cell_size, cell_size, time_steps,
config["use_gpu"], wall_time
])))
benchmark_name_template = "_".join([
"LSTMBlockCell_bprop", "DT_%(dtype)s", "BS%(batch_size)i",
"CS%(cell_size)i", "IS%(cell_size)i", "TS%(time_steps)i",
"gpu_%(use_gpu)s"
])
self.report_benchmark(
name=benchmark_name_template % config,
iters=iters,
wall_time=wall_time,
extras=config)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.rnn.python.ops import rnn as contrib_rnn
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class StackBidirectionalRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _createStackBidirectionalRNN(self,
use_gpu,
use_shape,
use_sequence_length,
initial_states_fw=None,
initial_states_bw=None,
scope=None):
self.layers = [2, 3]
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = array_ops.placeholder(
dtypes.int64) if use_sequence_length else None
self.cells_fw = [
rnn_cell.LSTMCell(
num_units,
input_size,
initializer=initializer,
state_is_tuple=False) for num_units in self.layers
]
self.cells_bw = [
rnn_cell.LSTMCell(
num_units,
input_size,
initializer=initializer,
state_is_tuple=False) for num_units in self.layers
]
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size) if use_shape else (None, input_size))
]
outputs, state_fw, state_bw = contrib_rnn.stack_bidirectional_rnn(
self.cells_fw,
self.cells_bw,
inputs,
initial_states_fw,
initial_states_bw,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertAlmostEqual(
out.get_shape().as_list(),
[batch_size if use_shape else None, 2 * self.layers[-1]])
input_value = np.random.randn(batch_size, input_size)
outputs = array_ops.stack(outputs)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testStackBidirectionalRNN(self, use_gpu, use_shape):
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createStackBidirectionalRNN(use_gpu, use_shape, True))
variables.global_variables_initializer().run()
# Run with pre-specified sequence lengths of 2, 3.
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward states of the first layer
# must be the same.
# For the next layers, since the input is a concat of forward and backward
# outputs of the previous layers the symmetry is broken and the following
# states and outputs differ.
# We cannot access the intermediate values between layers but we can
# check that the forward and backward states of the first layer match.
self.assertAllClose(s_fw[0], s_bw[0])
# If outputs are not concat between layers the output of the forward
# and backward would be the same but symmetric.
# Check that it is not the case.
# Due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
# First sequence in batch is length=2
# Check that the time=0 forward output is not equal to time=1 backward.
self.assertNotEqual(out[0][0][0], out[1][0][3])
self.assertNotEqual(out[0][0][1], out[1][0][4])
self.assertNotEqual(out[0][0][2], out[1][0][5])
# Check that the time=1 forward output is not equal to time=0 backward.
self.assertNotEqual(out[1][0][0], out[0][0][3])
self.assertNotEqual(out[1][0][1], out[0][0][4])
self.assertNotEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the time=0 forward output is not equal to time=2 backward.
self.assertNotEqual(out[0][1][0], out[2][1][3])
self.assertNotEqual(out[0][1][1], out[2][1][4])
self.assertNotEqual(out[0][1][2], out[2][1][5])
# Check that the time=1 forward output is not equal to time=1 backward.
self.assertNotEqual(out[1][1][0], out[1][1][3])
self.assertNotEqual(out[1][1][1], out[1][1][4])
self.assertNotEqual(out[1][1][2], out[1][1][5])
# Check that the time=2 forward output is not equal to time=0 backward.
self.assertNotEqual(out[2][1][0], out[0][1][3])
self.assertNotEqual(out[2][1][1], out[0][1][4])
self.assertNotEqual(out[2][1][2], out[0][1][5])
def _testStackBidirectionalRNNStates(self, use_gpu):
# Check that the states are correctly initialized.
# - Create a net and iterate for 3 states. Keep the state (state_3).
# - Reset states, and iterate for 5 steps. Last state is state_5.
# - Reset the sets to state_3 and iterate for 2 more steps,
# last state will be state_5'.
# - Check that the state_5 and state_5' (forward and backward) are the
# same for the first layer (it does not apply for the second layer since
# it has forward-backward dependencies).
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
batch_size = 2
# Create states placeholders.
initial_states_fw = [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, layer * 2))
for layer in self.layers
]
initial_states_bw = [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, layer * 2))
for layer in self.layers
]
# Create the net
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createStackBidirectionalRNN(use_gpu, True, True,
initial_states_fw,
initial_states_bw))
variables.global_variables_initializer().run()
# Run 3 steps.
feed_dict = {inputs[0]: input_value, sequence_length: [3, 2]}
# Initialize to empty state.
for i, layer in enumerate(self.layers):
feed_dict[initial_states_fw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
feed_dict[initial_states_bw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
_, st_3_fw, st_3_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Reset the net and run 5 steps.
feed_dict = {inputs[0]: input_value, sequence_length: [5, 3]}
for i, layer in enumerate(self.layers):
feed_dict[initial_states_fw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
feed_dict[initial_states_bw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
_, st_5_fw, st_5_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Reset the net to state_3 and run 2 more steps.
feed_dict = {inputs[0]: input_value, sequence_length: [2, 1]}
for i, _ in enumerate(self.layers):
feed_dict[initial_states_fw[i]] = st_3_fw[i]
feed_dict[initial_states_bw[i]] = st_3_bw[i]
out_5p, st_5p_fw, st_5p_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Check that the 3+2 and 5 first layer states.
self.assertAllEqual(st_5_fw[0], st_5p_fw[0])
self.assertAllEqual(st_5_bw[0], st_5p_bw[0])
def testStackBidirectionalRNN(self):
self._testStackBidirectionalRNN(use_gpu=False, use_shape=False)
self._testStackBidirectionalRNN(use_gpu=True, use_shape=False)
self._testStackBidirectionalRNN(use_gpu=False, use_shape=True)
self._testStackBidirectionalRNN(use_gpu=True, use_shape=True)
self._testStackBidirectionalRNNStates(use_gpu=False)
self._testStackBidirectionalRNNStates(use_gpu=True)
def _createStackBidirectionalDynamicRNN(self,
use_gpu,
use_shape,
use_state_tuple,
initial_states_fw=None,
initial_states_bw=None,
scope=None):
self.layers = [2, 3]
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = array_ops.placeholder(dtypes.int64)
self.cells_fw = [
rnn_cell.LSTMCell(
num_units,
input_size,
initializer=initializer,
state_is_tuple=False) for num_units in self.layers
]
self.cells_bw = [
rnn_cell.LSTMCell(
num_units,
input_size,
initializer=initializer,
state_is_tuple=False) for num_units in self.layers
]
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size) if use_shape else (None, input_size))
]
inputs_c = array_ops.stack(inputs)
inputs_c = array_ops.transpose(inputs_c, [1, 0, 2])
outputs, st_fw, st_bw = contrib_rnn.stack_bidirectional_dynamic_rnn(
self.cells_fw,
self.cells_bw,
inputs_c,
initial_states_fw=initial_states_fw,
initial_states_bw=initial_states_bw,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
# Outputs has shape (batch_size, max_length, 2* layer[-1].
output_shape = [None, max_length, 2 * self.layers[-1]]
if use_shape:
output_shape[0] = batch_size
self.assertAllEqual(outputs.get_shape().as_list(), output_shape)
input_value = np.random.randn(batch_size, input_size)
return input_value, inputs, outputs, st_fw, st_bw, sequence_length
def _testStackBidirectionalDynamicRNN(self, use_gpu, use_shape,
use_state_tuple):
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createStackBidirectionalDynamicRNN(use_gpu, use_shape,
use_state_tuple))
variables.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward states of the first layer has
# to be the same.
# For the next layers, since the input is a concat of forward and backward
# outputs of the previous layers the symmetry is broken and the following
# states and outputs differ.
# We cannot access the intermediate values between layers but we can
# check that the forward and backward states of the first layer match.
self.assertAllClose(s_fw[0], s_bw[0])
out = np.swapaxes(out, 0, 1)
# If outputs are not concat between layers the output of the forward
# and backward would be the same but symmetric.
# Check that is not the case.
# Due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
# First sequence in batch is length=2
# Check that the time=0 forward output is not equal to time=1 backward.
self.assertNotEqual(out[0][0][0], out[1][0][3])
self.assertNotEqual(out[0][0][1], out[1][0][4])
self.assertNotEqual(out[0][0][2], out[1][0][5])
# Check that the time=1 forward output is not equal to time=0 backward.
self.assertNotEqual(out[1][0][0], out[0][0][3])
self.assertNotEqual(out[1][0][1], out[0][0][4])
self.assertNotEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the time=0 forward output is not equal to time=2 backward.
self.assertNotEqual(out[0][1][0], out[2][1][3])
self.assertNotEqual(out[0][1][1], out[2][1][4])
self.assertNotEqual(out[0][1][2], out[2][1][5])
# Check that the time=1 forward output is not equal to time=1 backward.
self.assertNotEqual(out[1][1][0], out[1][1][3])
self.assertNotEqual(out[1][1][1], out[1][1][4])
self.assertNotEqual(out[1][1][2], out[1][1][5])
# Check that the time=2 forward output is not equal to time=0 backward.
self.assertNotEqual(out[2][1][0], out[0][1][3])
self.assertNotEqual(out[2][1][1], out[0][1][4])
self.assertNotEqual(out[2][1][2], out[0][1][5])
def _testStackBidirectionalDynamicRNNStates(self, use_gpu):
# Check that the states are correctly initialized.
# - Create a net and iterate for 3 states. Keep the state (state_3).
# - Reset states, and iterate for 5 steps. Last state is state_5.
# - Reset the sets to state_3 and iterate for 2 more steps,
# last state will be state_5'.
# - Check that the state_5 and state_5' (forward and backward) are the
# same for the first layer (it does not apply for the second layer since
# it has forward-backward dependencies).
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
batch_size = 2
# Create states placeholders.
initial_states_fw = [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, layer * 2))
for layer in self.layers
]
initial_states_bw = [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, layer * 2))
for layer in self.layers
]
# Create the net
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createStackBidirectionalDynamicRNN(
use_gpu,
use_shape=True,
use_state_tuple=False,
initial_states_fw=initial_states_fw,
initial_states_bw=initial_states_bw))
variables.global_variables_initializer().run()
# Run 3 steps.
feed_dict = {inputs[0]: input_value, sequence_length: [3, 2]}
# Initialize to empty state.
for i, layer in enumerate(self.layers):
feed_dict[initial_states_fw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
feed_dict[initial_states_bw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
_, st_3_fw, st_3_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Reset the net and run 5 steps.
feed_dict = {inputs[0]: input_value, sequence_length: [5, 3]}
for i, layer in enumerate(self.layers):
feed_dict[initial_states_fw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
feed_dict[initial_states_bw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
_, st_5_fw, st_5_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Reset the net to state_3 and run 2 more steps.
feed_dict = {inputs[0]: input_value, sequence_length: [2, 1]}
for i, _ in enumerate(self.layers):
feed_dict[initial_states_fw[i]] = st_3_fw[i]
feed_dict[initial_states_bw[i]] = st_3_bw[i]
out_5p, st_5p_fw, st_5p_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Check that the 3+2 and 5 first layer states.
self.assertAllEqual(st_5_fw[0], st_5p_fw[0])
self.assertAllEqual(st_5_bw[0], st_5p_bw[0])
def testBidirectionalRNN(self):
# Generate 2^3 option values
# from [True, True, True] to [False, False, False]
options = itertools.product([True, False], repeat=3)
for option in options:
self._testStackBidirectionalDynamicRNN(
use_gpu=option[0], use_shape=option[1], use_state_tuple=option[2])
# Check States.
self._testStackBidirectionalDynamicRNNStates(use_gpu=False)
self._testStackBidirectionalDynamicRNNStates(use_gpu=True)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
# REMARKS: factory(scope) is a function accepting a scope
# as an argument, such scope can be None, a string
# or a VariableScope instance.
with self.session(use_gpu=True, graph=ops.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts with the proper scope.
variables.global_variables_initializer()
all_vars = variables.global_variables()
prefix = prefix or "stack_bidirectional_rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("StackRNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testStackBidirectionalRNNScope(self):
def factory(scope):
return self._createStackBidirectionalRNN(
use_gpu=True, use_shape=True, use_sequence_length=True, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
def testBidirectionalDynamicRNNScope(self):
def factory(scope):
return self._createStackBidirectionalDynamicRNN(
use_gpu=True, use_shape=True, use_state_tuple=True, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/rnn/python/kernel_tests/rnn_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.rnn.python.ops import core_rnn_cell as legacy_rnn_cell
from tensorflow.contrib.rnn.python.ops import rnn_cell as contrib_rnn_cell
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import initializers
from tensorflow.python.keras import layers as keras_layers
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import training
from tensorflow.python.util import nest
class RNNCellTest(test.TestCase):
def _assert_cell_builds(self, cell_class, dtype, batch_size, in_size,
out_size):
cell = cell_class(out_size, dtype=dtype)
in_shape = tensor_shape.TensorShape((batch_size, in_size))
cell.build(in_shape)
state_output = cell.get_initial_state(
inputs=None, batch_size=batch_size, dtype=dtype)
cell_output, _ = cell(array_ops.zeros(in_shape, dtype), state_output)
self.assertAllEqual([batch_size, out_size], cell_output.shape.as_list())
def testCellsBuild(self):
f32 = dtypes.float32
f64 = dtypes.float64
self._assert_cell_builds(contrib_rnn_cell.IndRNNCell, f32, 5, 7, 3)
self._assert_cell_builds(contrib_rnn_cell.IndRNNCell, f64, 5, 7, 3)
self._assert_cell_builds(contrib_rnn_cell.IndyGRUCell, f32, 5, 7, 3)
self._assert_cell_builds(contrib_rnn_cell.IndyGRUCell, f64, 5, 7, 3)
self._assert_cell_builds(contrib_rnn_cell.IndyLSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(contrib_rnn_cell.IndyLSTMCell, f64, 5, 7, 3)
def testIndRNNCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = contrib_rnn_cell.IndRNNCell(2)
g, _ = cell(x, m)
self.assertEqual([
"root/ind_rnn_cell/%s_w:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/ind_rnn_cell/%s_u:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/ind_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
self.assertEqual(res[0].shape, (1, 2))
def testIndyGRUCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.185265, 0.17704]])
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test IndyGRUCell with input_size != num_units.
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.155127, 0.157328]])
def testIndyLSTMCell(self):
for dtype in [dtypes.float16, dtypes.float32]:
np_dtype = dtype.as_numpy_dtype
with self.session(graph=ops.Graph()) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2], dtype=dtype)
state_0 = (array_ops.zeros([1, 2], dtype=dtype),) * 2
state_1 = (array_ops.zeros([1, 2], dtype=dtype),) * 2
cell = rnn_cell_impl.MultiRNNCell(
[contrib_rnn_cell.IndyLSTMCell(2) for _ in range(2)])
self.assertEqual(cell.dtype, None)
self.assertEqual("cell-0", cell._checkpoint_dependencies[0].name)
self.assertEqual("cell-1", cell._checkpoint_dependencies[1].name)
cell.get_config() # Should not throw an error
g, (out_state_0, out_state_1) = cell(x, (state_0, state_1))
# Layer infers the input type.
self.assertEqual(cell.dtype, dtype.name)
expected_variable_names = [
"root/multi_rnn_cell/cell_0/indy_lstm_cell/%s_w:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_0/indy_lstm_cell/%s_u:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_0/indy_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/indy_lstm_cell/%s_w:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/indy_lstm_cell/%s_u:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/indy_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME
]
self.assertEqual(expected_variable_names,
[v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, out_state_0, out_state_1], {
x.name: np.array([[1., 1.]]),
state_0[0].name: 0.1 * np.ones([1, 2]),
state_0[1].name: 0.1 * np.ones([1, 2]),
state_1[0].name: 0.1 * np.ones([1, 2]),
state_1[1].name: 0.1 * np.ones([1, 2]),
})
self.assertEqual(len(res), 3)
global_variables = variables.global_variables()
self.assertEqual(expected_variable_names,
[v.name for v in global_variables])
# Only check the range of outputs as this is just a smoke test.
self.assertAllInRange(res[0], -1.0, 1.0)
self.assertAllInRange(res[1], -1.0, 1.0)
self.assertAllInRange(res[2], -1.0, 1.0)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test IndyLSTMCell with input_size != num_units.
x = array_ops.zeros([1, 3], dtype=dtype)
state = (array_ops.zeros([1, 2], dtype=dtype),) * 2
g, out_state = contrib_rnn_cell.IndyLSTMCell(2)(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, out_state], {
x.name: np.array([[1., 1., 1.]], dtype=np_dtype),
state[0].name: 0.1 * np.ones([1, 2], dtype=np_dtype),
state[1].name: 0.1 * np.ones([1, 2], dtype=np_dtype),
})
self.assertEqual(len(res), 2)
def testLSTMCellLayerNorm(self):
with self.cached_session() as sess:
num_units = 2
num_proj = 3
batch_size = 1
input_size = 4
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
c = array_ops.zeros([batch_size, num_units])
h = array_ops.zeros([batch_size, num_proj])
state = rnn_cell_impl.LSTMStateTuple(c, h)
cell = contrib_rnn_cell.LayerNormLSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
layer_norm=True,
norm_gain=1.0,
norm_shift=0.0)
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, out_m], {
x.name: np.ones((batch_size, input_size)),
c.name: 0.1 * np.ones((batch_size, num_units)),
h.name: 0.1 * np.ones((batch_size, num_proj))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_proj))
self.assertEqual(res[1][0].shape, (batch_size, num_units))
self.assertEqual(res[1][1].shape, (batch_size, num_proj))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) < 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) < 1e-6)
def testOutputProjectionWrapper(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = legacy_rnn_cell.OutputProjectionWrapper(
rnn_cell_impl.GRUCell(3), 2)
g, new_m = cell(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.231907, 0.231907]])
def testInputProjectionWrapper(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 3])
cell = legacy_rnn_cell.InputProjectionWrapper(
rnn_cell_impl.GRUCell(3), num_proj=3)
g, new_m = cell(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])
def testEmbeddingWrapper(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1], dtype=dtypes.int32)
m = array_ops.zeros([1, 2])
embedding_cell = legacy_rnn_cell.EmbeddingWrapper(
rnn_cell_impl.GRUCell(2), embedding_classes=3, embedding_size=2)
self.assertEqual(embedding_cell.output_size, 2)
g, new_m = embedding_cell(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1]]),
m.name: np.array([[0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 2))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.17139, 0.17139]])
def testEmbeddingWrapperWithDynamicRnn(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root"):
inputs = ops.convert_to_tensor([[[0], [0]]], dtype=dtypes.int64)
input_lengths = ops.convert_to_tensor([2], dtype=dtypes.int64)
embedding_cell = legacy_rnn_cell.EmbeddingWrapper(
rnn_cell_impl.BasicLSTMCell(1, state_is_tuple=True),
embedding_classes=1,
embedding_size=2)
outputs, _ = rnn.dynamic_rnn(
cell=embedding_cell,
inputs=inputs,
sequence_length=input_lengths,
dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
# This will fail if output's dtype is inferred from input's.
sess.run(outputs)
def testSRUCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.SRUCell(2)(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.509682, 0.509682]])
def testSRUCellKerasRNN(self):
"""Tests that SRUCell works with keras RNN layer."""
cell = contrib_rnn_cell.SRUCell(10)
seq_input = ops.convert_to_tensor(
np.random.rand(2, 3, 5), name="seq_input", dtype=dtypes.float32)
rnn_layer = keras_layers.RNN(cell=cell)
rnn_outputs_keras = rnn_layer(seq_input)
with self.cached_session() as sess:
sess.run([variables.global_variables_initializer()])
self.assertEqual(sess.run(rnn_outputs_keras).shape, (2, 10))
def testSRUCellBiasType(self):
"""Tests that the bias' dtype is properly set."""
cell = contrib_rnn_cell.SRUCell(10)
cell.build((2, 3, 5))
self.assertEqual(cell._bias.dtype, dtypes.float32_ref)
cell = contrib_rnn_cell.SRUCell(10, dtype=dtypes.int32)
cell.build((2, 3, 5))
self.assertEqual(cell._bias.dtype, dtypes.int32_ref)
cell_input = ops.convert_to_tensor(
np.random.rand(2, 5), name="cell_input", dtype=dtypes.float16)
cell_state = ops.convert_to_tensor(
np.random.rand(2, 10), name="cell_state", dtype=dtypes.float16)
cell = contrib_rnn_cell.SRUCell(10)
cell(cell_input, [cell_state])
self.assertEqual(cell._bias.dtype, dtypes.float16_ref)
def testSRUCellWithDiffSize(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.SRUCell(2)(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.55255556, 0.55255556]])
def testCoupledInputForgetGateLSTMCell(self):
with self.cached_session() as sess:
num_units = 2
state_size = num_units * 2
batch_size = 3
input_size = 4
expected_output = np.array(
[[0.121753, 0.121753], [0.103349, 0.103349], [0.100178, 0.100178]],
dtype=np.float32)
expected_state = np.array(
[[0.137523, 0.137523, 0.121753, 0.121753], [
0.105450, 0.105450, 0.103349, 0.103349
], [0.100742, 0.100742, 0.100178, 0.100178]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
output, state = contrib_rnn_cell.CoupledInputForgetGateLSTMCell(
num_units=num_units, forget_bias=1.0, state_is_tuple=False)(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run(
[output, state], {
x.name:
np.array([[1., 1., 1., 1.], [2., 2., 2., 2.],
[3., 3., 3., 3.]]),
m.name:
0.1 * np.ones((batch_size, state_size))
})
# This is a smoke test: Only making sure expected values didn't change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
self.assertAllClose(res[1], expected_state)
def testTimeFreqLSTMCell(self):
with self.cached_session() as sess:
num_units = 8
state_size = num_units * 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = (input_size - feature_size) // frequency_skip + 1
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size * num_shifts])
output, state = contrib_rnn_cell.TimeFreqLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0)(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run(
[output, state], {
x.name:
np.array([[1., 1., 1., 1.], [2., 2., 2., 2.],
[3., 3., 3., 3.]]),
m.name:
0.1 * np.ones((batch_size, int(state_size * (num_shifts))))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts))
self.assertEqual(res[1].shape, (batch_size, state_size * num_shifts))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
def testGridLSTMCell(self):
with self.cached_session() as sess:
num_units = 8
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.GridLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts],
couple_input_forget_gates=True,
state_is_tuple=True)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(*(
[state_value, state_value] * num_shifts))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 2))
for ss in res[1]:
self.assertEqual(ss.shape, (batch_size, num_units))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(
np.linalg.norm((res[1].state_f00_b00_c[0, :] - res[1]
.state_f00_b00_c[i, :]))) > 1e-6)
def testGridLSTMCellWithFrequencyBlocks(self):
with self.cached_session() as sess:
num_units = 8
batch_size = 3
feature_size = 2
frequency_skip = 1
num_frequency_blocks = [1, 1]
total_blocks = num_frequency_blocks[0] + num_frequency_blocks[1]
start_freqindex_list = [0, 2]
end_freqindex_list = [2, 4]
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.GridLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=num_frequency_blocks,
start_freqindex_list=start_freqindex_list,
end_freqindex_list=end_freqindex_list,
couple_input_forget_gates=True,
state_is_tuple=True)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(*(
[state_value, state_value] * total_blocks))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape,
(batch_size, num_units * total_blocks * 2))
for ss in res[1]:
self.assertEqual(ss.shape, (batch_size, num_units))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(
np.linalg.norm((res[1].state_f00_b00_c[0, :] - res[1]
.state_f00_b00_c[i, :]))) > 1e-6)
def testGridLstmCellWithCoupledInputForgetGates(self):
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[
0.416383, 0.416383, 0.403238, 0.403238, 0.524020, 0.524020,
0.565425, 0.565425, 0.557865, 0.557865, 0.609699, 0.609699
], [
0.627331, 0.627331, 0.622393, 0.622393, 0.688342, 0.688342,
0.708078, 0.708078, 0.694245, 0.694245, 0.715171, 0.715171
], [
0.711050, 0.711050, 0.709197, 0.709197, 0.736533, 0.736533,
0.744264, 0.744264, 0.737390, 0.737390, 0.745250, 0.745250
]],
dtype=np.float32)
expected_state = np.array(
[[
0.625556, 0.625556, 0.416383, 0.416383, 0.759134, 0.759134,
0.524020, 0.524020, 0.798795, 0.798795, 0.557865, 0.557865
], [
0.875488, 0.875488, 0.627331, 0.627331, 0.936432, 0.936432,
0.688342, 0.688342, 0.941961, 0.941961, 0.694245, 0.694245
], [
0.957327, 0.957327, 0.711050, 0.711050, 0.979522, 0.979522,
0.736533, 0.736533, 0.980245, 0.980245, 0.737390, 0.737390
]],
dtype=np.float32)
for state_is_tuple in [False, True]:
with self.cached_session() as sess:
with variable_scope.variable_scope(
"state_is_tuple" + str(state_is_tuple),
initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.GridLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts],
couple_input_forget_gates=True,
state_is_tuple=state_is_tuple)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
if state_is_tuple:
state_value = constant_op.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(*(
[state_value, state_value] * num_shifts))
else:
init_state = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units * num_shifts * 2), dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
if not state_is_tuple:
self.assertAllClose(res[1], expected_state)
else:
# There should be num_shifts * 2 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 2)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testBidirectionGridLSTMCell(self):
with self.cached_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[
0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283,
0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774,
0.520789, 0.520789, 0.476968, 0.476968, 0.604341, 0.604341,
0.760207, 0.760207, 0.635773, 0.635773, 0.850218, 0.850218
], [
0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057,
0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359,
0.692621, 0.692621, 0.652363, 0.652363, 0.737517, 0.737517,
0.899558, 0.899558, 0.745984, 0.745984, 0.946840, 0.946840
], [
0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357,
0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604,
0.759940, 0.759940, 0.720652, 0.720652, 0.778552, 0.778552,
0.941606, 0.941606, 0.781035, 0.781035, 0.977731, 0.977731
]],
dtype=np.float32)
expected_state = np.array(
[[
0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293,
0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638,
0.785405, 0.785405, 0.520789, 0.520789, 0.890836, 0.890836,
0.604341, 0.604341, 0.928512, 0.928512, 0.635773, 0.635773
], [
0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811,
0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559,
0.993088, 0.993088, 0.692621, 0.692621, 1.040288, 1.040288,
0.737517, 0.737517, 1.048773, 1.048773, 0.745984, 0.745984
], [
1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919,
0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530,
1.062455, 1.062455, 0.759940, 0.759940, 1.080101, 1.080101,
0.778552, 0.778552, 1.082402, 1.082402, 0.781035, 0.781035
]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.BidirectionalGridLSTMCell(
num_units=num_units,
feature_size=feature_size,
share_time_frequency_weights=True,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts])
inputs = constant_op.constant(
np.array(
[[1.0, 1.1, 1.2, 1.3], [2.0, 2.1, 2.2, 2.3],
[3.0, 3.1, 3.2, 3.3]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(*(
[state_value, state_value] * num_shifts * 2))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 4))
self.assertAllClose(res[0], expected_output)
# There should be num_shifts * 4 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 4)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testBidirectionGridLSTMCellWithSliceOffset(self):
with self.cached_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[
0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283,
0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774,
0.322645, 0.322645, 0.276068, 0.276068, 0.584654, 0.584654,
0.690292, 0.690292, 0.640446, 0.640446, 0.840071, 0.840071
], [
0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057,
0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359,
0.493625, 0.493625, 0.449236, 0.449236, 0.730828, 0.730828,
0.865996, 0.865996, 0.749429, 0.749429, 0.944958, 0.944958
], [
0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357,
0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604,
0.608587, 0.608587, 0.566683, 0.566683, 0.777345, 0.777345,
0.925820, 0.925820, 0.782597, 0.782597, 0.976858, 0.976858
]],
dtype=np.float32)
expected_state = np.array(
[[
0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293,
0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638,
0.516575, 0.516575, 0.322645, 0.322645, 0.866628, 0.866628,
0.584654, 0.584654, 0.934002, 0.934002, 0.640446, 0.640446
], [
0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811,
0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559,
0.749836, 0.749836, 0.493625, 0.493625, 1.033488, 1.033488,
0.730828, 0.730828, 1.052186, 1.052186, 0.749429, 0.749429
], [
1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919,
0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530,
0.895999, 0.895999, 0.608587, 0.608587, 1.078978, 1.078978,
0.777345, 0.777345, 1.083843, 1.083843, 0.782597, 0.782597
]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.BidirectionalGridLSTMCell(
num_units=num_units,
feature_size=feature_size,
share_time_frequency_weights=True,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts],
backward_slice_offset=1)
inputs = constant_op.constant(
np.array(
[[1.0, 1.1, 1.2, 1.3], [2.0, 2.1, 2.2, 2.3],
[3.0, 3.1, 3.2, 3.3]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(*(
[state_value, state_value] * num_shifts * 2))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 4))
self.assertAllClose(res[0], expected_output)
# There should be num_shifts * 4 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 4)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testAttentionCellWrapperFailures(self):
with self.assertRaisesRegexp(
TypeError, rnn_cell_impl.ASSERT_LIKE_RNNCELL_ERROR_REGEXP):
contrib_rnn_cell.AttentionCellWrapper(None, 0)
num_units = 8
for state_is_tuple in [False, True]:
with ops.Graph().as_default():
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got 0"):
contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, 0, state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got -1"):
contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, -1, state_is_tuple=state_is_tuple)
with ops.Graph().as_default():
lstm_cell = rnn_cell.BasicLSTMCell(num_units, state_is_tuple=True)
with self.assertRaisesRegexp(
ValueError, "Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: *"):
contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, 4, state_is_tuple=False)
def testAttentionCellWrapperZeros(self):
num_units = 8
attn_length = 16
batch_size = 3
input_size = 4
for state_is_tuple in [False, True]:
with ops.Graph().as_default():
with self.cached_session() as sess:
with variable_scope.variable_scope(
"state_is_tuple_" + str(state_is_tuple)):
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = array_ops.zeros([batch_size, num_units], dtype=np.float32)
attn_state_zeros = array_ops.zeros(
[batch_size, attn_length * num_units], dtype=np.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = array_ops.zeros(
[
batch_size,
num_units * 2 + attn_length * num_units + num_units
],
dtype=np.float32)
inputs = array_ops.zeros(
[batch_size, input_size], dtype=dtypes.float32)
output, state = cell(inputs, zero_state)
self.assertEquals(output.get_shape(), [batch_size, num_units])
if state_is_tuple:
self.assertEquals(len(state), 3)
self.assertEquals(len(state[0]), 2)
self.assertEquals(state[0][0].get_shape(),
[batch_size, num_units])
self.assertEquals(state[0][1].get_shape(),
[batch_size, num_units])
self.assertEquals(state[1].get_shape(), [batch_size, num_units])
self.assertEquals(state[2].get_shape(),
[batch_size, attn_length * num_units])
tensors = [output] + list(state)
else:
self.assertEquals(state.get_shape(), [
batch_size,
num_units * 2 + num_units + attn_length * num_units
])
tensors = [output, state]
zero_result = sum(
[math_ops.reduce_sum(math_ops.abs(x)) for x in tensors])
sess.run(variables.global_variables_initializer())
self.assertTrue(sess.run(zero_result) < 1e-6)
def testAttentionCellWrapperValues(self):
num_units = 8
attn_length = 16
batch_size = 3
for state_is_tuple in [False, True]:
with ops.Graph().as_default():
with self.cached_session() as sess:
with variable_scope.variable_scope(
"state_is_tuple_" + str(state_is_tuple)):
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = constant_op.constant(
0.1 * np.ones([batch_size, num_units], dtype=np.float32),
dtype=dtypes.float32)
attn_state_zeros = constant_op.constant(
0.1 * np.ones(
[batch_size, attn_length * num_units], dtype=np.float32),
dtype=dtypes.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = constant_op.constant(
0.1 * np.ones(
[
batch_size,
num_units * 2 + num_units + attn_length * num_units
],
dtype=np.float32),
dtype=dtypes.float32)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, zero_state)
if state_is_tuple:
concat_state = array_ops.concat(
[state[0][0], state[0][1], state[1], state[2]], 1)
else:
concat_state = state
sess.run(variables.global_variables_initializer())
output, state = sess.run([output, concat_state])
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((output[0, :] - output[i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((state[0, :] - state[i, :]))) > 1e-6)
def _testAttentionCellWrapperCorrectResult(self):
num_units = 4
attn_length = 6
batch_size = 2
expected_output = np.array(
[[1.068372, 0.45496, -0.678277, 0.340538],
[1.018088, 0.378983, -0.572179, 0.268591]],
dtype=np.float32)
expected_state = np.array(
[[
0.74946702, 0.34681597, 0.26474735, 1.06485605, 0.38465962,
0.11420801, 0.10272158, 0.30925757, 0.63899988, 0.7181077,
0.47534478, 0.33715725, 0.58086717, 0.49446869, 0.7641536,
0.12814975, 0.92231739, 0.89857256, 0.21889746, 0.38442063,
0.53481543, 0.8876909, 0.45823169, 0.5905602, 0.78038228,
0.56501579, 0.03971386, 0.09870267, 0.8074435, 0.66821432,
0.99211812, 0.12295902, 1.14606023, 0.34370938, -0.79251152,
0.51843399
], [
0.5179342, 0.48682183, -0.25426468, 0.96810579, 0.28809637,
0.13607743, -0.11446252, 0.26792109, 0.78047138, 0.63460857,
0.49122369, 0.52007174, 0.73000264, 0.66986895, 0.73576689,
0.86301267, 0.87887371, 0.35185754, 0.93417215, 0.64732957,
0.63173044, 0.66627824, 0.53644657, 0.20477486, 0.98458421,
0.38277245, 0.03746676, 0.92510188, 0.57714164, 0.84932971,
0.36127412, 0.12125921, 1.1362772, 0.34361625, -0.78150457,
0.70582712
]],
dtype=np.float32)
seed = 12345
random_seed.set_random_seed(seed)
rnn_scope = None
for state_is_tuple in [False, True]:
with session.Session() as sess:
with variable_scope.variable_scope(
"state_is_tuple",
reuse=state_is_tuple,
initializer=init_ops.glorot_uniform_initializer()):
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
# This is legacy behavior to preserve the test. Weight
# sharing no longer works by creating a new RNNCell in the
# same variable scope; so here we restore the scope of the
# RNNCells after the first use below.
if rnn_scope is not None:
(cell._scope, lstm_cell._scope) = rnn_scope # pylint: disable=protected-access,unpacking-non-sequence
zeros1 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 1)
zeros2 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 2)
zeros3 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 3)
attn_state_zeros = random_ops.random_uniform(
(batch_size, attn_length * num_units), 0.0, 1.0, seed=seed + 4)
zero_state = ((zeros1, zeros2), zeros3, attn_state_zeros)
if not state_is_tuple:
zero_state = array_ops.concat([
zero_state[0][0], zero_state[0][1], zero_state[1], zero_state[2]
], 1)
inputs = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 5)
output, state = cell(inputs, zero_state)
# This is legacy behavior to preserve the test. Weight
# sharing no longer works by creating a new RNNCell in the
# same variable scope; so here we store the scope of the
# first RNNCell for reuse above.
if rnn_scope is None:
rnn_scope = (cell._scope, lstm_cell._scope) # pylint: disable=protected-access
if state_is_tuple:
state = array_ops.concat(
[state[0][0], state[0][1], state[1], state[2]], 1)
sess.run(variables.global_variables_initializer())
self.assertAllClose(sess.run(output), expected_output)
self.assertAllClose(sess.run(state), expected_state)
def testNASCell(self):
num_units = 6
batch_size = 3
expected_output = np.array(
[[0.576751, 0.576751, 0.576751, 0.576751, 0.576751, 0.576751],
[0.618936, 0.618936, 0.618936, 0.618936, 0.618936, 0.618936],
[0.627393, 0.627393, 0.627393, 0.627393, 0.627393, 0.627393]])
expected_state = np.array([[
0.71579772, 0.71579772, 0.71579772, 0.71579772, 0.71579772, 0.71579772,
0.57675087, 0.57675087, 0.57675087, 0.57675087, 0.57675087, 0.57675087
], [
0.78041625, 0.78041625, 0.78041625, 0.78041625, 0.78041625, 0.78041625,
0.6189357, 0.6189357, 0.61893570, 0.6189357, 0.6189357, 0.6189357
], [
0.79457647, 0.79457647, 0.79457647, 0.79457647, 0.79457653, 0.79457653,
0.62739348, 0.62739348, 0.62739348, 0.62739348, 0.62739348, 0.62739348
]])
with self.cached_session() as sess:
with variable_scope.variable_scope(
"nas_test", initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.NASCell(num_units=num_units)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = rnn_cell.LSTMStateTuple(state_value, state_value)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
# There should be 2 states in the tuple.
self.assertEqual(len(res[1]), 2)
# Checking the shape of each state to be batch_size * num_units
new_c, new_h = res[1]
self.assertEqual(new_c.shape[0], batch_size)
self.assertEqual(new_c.shape[1], num_units)
self.assertEqual(new_h.shape[0], batch_size)
self.assertEqual(new_h.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testNASCellProj(self):
num_units = 6
batch_size = 3
num_proj = 5
expected_output = np.array(
[[1.697418, 1.697418, 1.697418, 1.697418,
1.697418], [1.840037, 1.840037, 1.840037, 1.840037, 1.840037],
[1.873985, 1.873985, 1.873985, 1.873985, 1.873985]])
expected_state = np.array([[
0.69855207, 0.69855207, 0.69855207, 0.69855207, 0.69855207, 0.69855207,
1.69741797, 1.69741797, 1.69741797, 1.69741797, 1.69741797
], [
0.77073824, 0.77073824, 0.77073824, 0.77073824, 0.77073824, 0.77073824,
1.84003687, 1.84003687, 1.84003687, 1.84003687, 1.84003687
], [
0.78973997, 0.78973997, 0.78973997, 0.78973997, 0.78973997, 0.78973997,
1.87398517, 1.87398517, 1.87398517, 1.87398517, 1.87398517
]])
with self.cached_session() as sess:
with variable_scope.variable_scope(
"nas_proj_test", initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.NASCell(num_units=num_units, num_proj=num_proj)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value_c = constant_op.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
state_value_h = constant_op.constant(
0.1 * np.ones((batch_size, num_proj), dtype=np.float32),
dtype=dtypes.float32)
init_state = rnn_cell.LSTMStateTuple(state_value_c, state_value_h)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
# There should be 2 states in the tuple.
self.assertEqual(len(res[1]), 2)
# Checking the shape of each state to be batch_size * num_units
new_c, new_h = res[1]
self.assertEqual(new_c.shape[0], batch_size)
self.assertEqual(new_c.shape[1], num_units)
self.assertEqual(new_h.shape[0], batch_size)
self.assertEqual(new_h.shape[1], num_proj)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
@test_util.run_in_graph_and_eager_modes
def testNASCellKerasRNN(self):
"""Tests that NASCell works with keras RNN layer."""
cell = contrib_rnn_cell.NASCell(10)
seq_input = ops.convert_to_tensor(
np.random.rand(2, 3, 5), name="seq_input", dtype=dtypes.float32)
rnn_layer = keras_layers.RNN(cell=cell)
rnn_outputs = rnn_layer(seq_input)
self.evaluate([variables.global_variables_initializer()])
self.assertEqual(self.evaluate(rnn_outputs).shape, (2, 10))
def testUGRNNCell(self):
num_units = 2
batch_size = 3
expected_state_and_output = np.array(
[[0.13752282, 0.13752282], [0.10545051, 0.10545051],
[0.10074195, 0.10074195]],
dtype=np.float32)
with self.cached_session() as sess:
with variable_scope.variable_scope(
"ugrnn_cell_test", initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.UGRNNCell(num_units=num_units)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
init_state = constant_op.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values didn't change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_state_and_output)
self.assertAllClose(res[1], expected_state_and_output)
def testIntersectionRNNCell(self):
num_units = 2
batch_size = 3
expected_state = np.array(
[[0.13752282, 0.13752282], [0.10545051, 0.10545051],
[0.10074195, 0.10074195]],
dtype=np.float32)
expected_output = np.array(
[[2.00431061, 2.00431061], [4.00060606, 4.00060606],
[6.00008249, 6.00008249]],
dtype=np.float32)
with self.cached_session() as sess:
with variable_scope.variable_scope(
"intersection_rnn_cell_test",
initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.IntersectionRNNCell(
num_units=num_units, num_in_proj=num_units)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
init_state = constant_op.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values didn't change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
self.assertAllClose(res[1], expected_state)
def testIntersectionRNNCellFailure(self):
num_units = 2
batch_size = 3
cell = contrib_rnn_cell.IntersectionRNNCell(num_units=num_units)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
init_state = constant_op.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError,
"Must have input size == output size for "
"Intersection RNN. To fix, num_in_proj should "
"be set to num_units at cell init."):
cell(inputs, init_state)
def testPhasedLSTMCell(self):
with self.cached_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
expected_state_c = np.array(
[[0.00072015, 0.00036633], [0.00083481, 0.00047266],
[0.00085111, 0.00053054]],
dtype=np.float32)
expected_state_h = np.array(
[[0.0005159, 0.00026243], [0.00062958, 0.00035646],
[0.00064732, 0.00040351]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
t = array_ops.zeros([batch_size, 1], dtype=dtypes.float64)
x = array_ops.zeros([batch_size, input_size])
c0 = array_ops.zeros([batch_size, 2])
h0 = array_ops.zeros([batch_size, 2])
state0 = rnn_cell.LSTMStateTuple(c0, h0)
output, state = contrib_rnn_cell.PhasedLSTMCell(num_units=num_units)(
(t, x), state0)
sess.run([variables.global_variables_initializer()])
res = sess.run(
[output, state], {
t.name:
np.array([[1.], [2.], [3.]]),
x.name:
np.array([[1., 1., 1., 1.], [2., 2., 2., 2.],
[3., 3., 3., 3.]]),
})
# This is a smoke test, making sure expected values are unchanged.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], res[1].h)
self.assertAllClose(res[1].c, expected_state_c)
self.assertAllClose(res[1].h, expected_state_h)
def testConv1DLSTMCell(self):
with self.cached_session() as sess:
shape = [2, 1]
filter_size = [3]
num_features = 1
expected_state_c = np.array(
[[[1.4375670191], [1.4375670191]], [[2.7542609292], [2.7542609292]]],
dtype=np.float32)
expected_state_h = np.array(
[[[0.6529865603], [0.6529865603]], [[0.8736877431], [0.8736877431]]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(1.0 / 2.0)):
x = array_ops.placeholder(dtypes.float32, [None, None, 1])
cell = contrib_rnn_cell.Conv1DLSTMCell(
input_shape=shape,
kernel_shape=filter_size,
output_channels=num_features)
hidden = cell.zero_state(array_ops.shape(x)[0], dtypes.float32)
output, state = cell(x, hidden)
sess.run([variables.global_variables_initializer()])
res = sess.run(
[output, state], {
hidden[0].name: np.array([[[1.], [1.]], [[2.], [2.]]]),
x.name: np.array([[[1.], [1.]], [[2.], [2.]]]),
})
# This is a smoke test, making sure expected values are unchanged.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], res[1].h)
self.assertAllClose(res[1].c, expected_state_c)
self.assertAllClose(res[1].h, expected_state_h)
def testConv2DLSTMCell(self):
with self.cached_session() as sess:
shape = [2, 2, 1]
filter_size = [3, 3]
num_features = 1
expected_state_c = np.array(
[[[[1.4375670191], [1.4375670191]], [[1.4375670191], [1.4375670191]]],
[[[2.7542609292], [2.7542609292]], [[2.7542609292], [2.7542609292]]
]],
dtype=np.float32)
expected_state_h = np.array(
[[[[0.6529865603], [0.6529865603]], [[0.6529865603], [0.6529865603]]],
[[[0.8736877431], [0.8736877431]], [[0.8736877431], [0.8736877431]]
]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(1.0 / 4.0)):
x = array_ops.placeholder(dtypes.float32, [None, None, None, 1])
cell = contrib_rnn_cell.Conv2DLSTMCell(
input_shape=shape,
kernel_shape=filter_size,
output_channels=num_features)
hidden = cell.zero_state(array_ops.shape(x)[0], dtypes.float32)
output, state = cell(x, hidden)
sess.run([variables.global_variables_initializer()])
res = sess.run(
[output, state], {
hidden[0].name:
np.array([[[[1.], [1.]], [[1.], [1.]]], [[[2.], [2.]],
[[2.], [2.]]]]),
x.name:
np.array([[[[1.], [1.]], [[1.], [1.]]], [[[2.], [2.]],
[[2.], [2.]]]]),
})
# This is a smoke test, making sure expected values are unchanged.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], res[1].h)
self.assertAllClose(res[1].c, expected_state_c)
self.assertAllClose(res[1].h, expected_state_h)
def testConv3DLSTMCell(self):
with self.cached_session() as sess:
shape = [2, 2, 2, 1]
filter_size = [3, 3, 3]
num_features = 1
expected_state_c = np.array(
[[[[[1.4375670191], [1.4375670191]], [[1.4375670191], [1.4375670191]]
], [[[1.4375670191], [1.4375670191]], [[1.4375670191],
[1.4375670191]]]],
[[[[2.7542609292], [2.7542609292]], [[2.7542609292], [2.7542609292]]
], [[[2.7542609292], [2.7542609292]], [[2.7542609292],
[2.7542609292]]]]],
dtype=np.float32)
expected_state_h = np.array(
[[[[[0.6529865603], [0.6529865603]], [[0.6529865603], [0.6529865603]]
], [[[0.6529865603], [0.6529865603]], [[0.6529865603],
[0.6529865603]]]],
[[[[0.8736877431], [0.8736877431]], [[0.8736877431], [0.8736877431]]
], [[[0.8736877431], [0.8736877431]], [[0.8736877431],
[0.8736877431]]]]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(1.0 / 8.0)):
x = array_ops.placeholder(dtypes.float32, [None, None, None, None, 1])
cell = contrib_rnn_cell.Conv3DLSTMCell(
input_shape=shape,
kernel_shape=filter_size,
output_channels=num_features)
hidden = cell.zero_state(array_ops.shape(x)[0], dtypes.float32)
output, state = cell(x, hidden)
sess.run([variables.global_variables_initializer()])
res = sess.run(
[output, state], {
hidden[0].name:
np.array([[[[[1.], [1.]], [[1.], [1.]]], [[[1.], [1.]], [[
1.
], [1.]]]], [[[[2.], [2.]], [[2.], [2.]]],
[[[2.], [2.]], [[2.], [2.]]]]]),
x.name:
np.array([[[[[1.], [1.]], [[1.], [1.]]], [[[1.], [1.]], [[
1.
], [1.]]]], [[[[2.], [2.]], [[2.], [2.]]], [[[2.], [2.]],
[[2.], [2.]]]]])
})
# This is a smoke test, making sure expected values are unchanged.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], res[1].h)
self.assertAllClose(res[1].c, expected_state_c)
self.assertAllClose(res[1].h, expected_state_h)
def testHighwayWrapper(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"base_cell", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
base_cell = rnn_cell.GRUCell(3)
g, m_new = base_cell(x, m)
with variable_scope.variable_scope(
"hw_cell", initializer=init_ops.constant_initializer(0.5)):
hw_cell = contrib_rnn_cell.HighwayWrapper(
rnn_cell.GRUCell(3), carry_bias_init=-100.0)
g_res, m_new_res = hw_cell(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, g_res, m_new, m_new_res], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.1, 0.1]])
})
# As carry_bias_init is very negative, the carry gate is 'open' and the
# transform gate is 'closed'. This means the output equals the input.
self.assertAllClose(res[1], res[0])
# States are left untouched
self.assertAllClose(res[2], res[3])
def testGLSTMCell(self):
# Ensure that G-LSTM matches LSTM when number_of_groups = 1
batch_size = 2
num_units = 4
number_of_groups = 1
# Try with input dimension equal to num_units or not.
for num_inputs in [num_units, num_units + number_of_groups]:
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root1_%d" % num_inputs,
initializer=init_ops.constant_initializer(0.5)):
x = array_ops.ones([batch_size, num_inputs])
# When number_of_groups = 1, G-LSTM is equivalent to regular LSTM
gcell = contrib_rnn_cell.GLSTMCell(
num_units=num_units, number_of_groups=number_of_groups)
cell = rnn_cell.LSTMCell(num_units=num_units)
self.assertTrue(isinstance(gcell.state_size, tuple))
zero_state = gcell.zero_state(
batch_size=batch_size, dtype=dtypes.float32)
gh, gs = gcell(x, zero_state)
h, g = cell(x, zero_state)
sess.run([variables.global_variables_initializer()])
glstm_result = sess.run([gh, gs])
lstm_result = sess.run([h, g])
self.assertAllClose(glstm_result[0], lstm_result[0], 1e-5)
self.assertAllClose(glstm_result[1], lstm_result[1], 1e-5)
# Test that G-LSTM subgroup act like corresponding sub-LSTMs
batch_size = 2
num_units = 4
number_of_groups = 2
# Try with num_inputs equal to or not equal to num_units.
for num_inputs in [num_units, num_units + number_of_groups]:
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root2_%d" % num_inputs,
initializer=init_ops.constant_initializer(0.5)):
# input for G-LSTM with 2 groups
glstm_input = array_ops.ones([batch_size, num_inputs])
gcell = contrib_rnn_cell.GLSTMCell(
num_units=num_units, number_of_groups=number_of_groups)
gcell_zero_state = gcell.zero_state(
batch_size=batch_size, dtype=dtypes.float32)
gh, gs = gcell(glstm_input, gcell_zero_state)
# input for LSTM cell simulating single G-LSTM group
lstm_input = array_ops.ones(
[batch_size, num_inputs / number_of_groups])
# note division by number_of_groups. This cell one simulates G-LSTM
# group
cell = rnn_cell.LSTMCell(num_units=int(num_units / number_of_groups))
cell_zero_state = cell.zero_state(
batch_size=batch_size, dtype=dtypes.float32)
h, g = cell(lstm_input, cell_zero_state)
sess.run([variables.global_variables_initializer()])
[gh_res, h_res] = sess.run([gh, h])
self.assertAllClose(gh_res[:, 0:int(num_units / number_of_groups)],
h_res, 1e-5)
self.assertAllClose(gh_res[:, int(num_units / number_of_groups):],
h_res, 1e-5)
def testGLSTMCellFailure(self):
batch_size = 2
num_units = 4
number_of_groups = 2
with self.cached_session():
with variable_scope.variable_scope(
"glstm_failure", initializer=init_ops.constant_initializer(0.5)):
gcell = contrib_rnn_cell.GLSTMCell(
num_units=num_units, number_of_groups=number_of_groups)
gcell_zero_state = gcell.zero_state(
batch_size=batch_size, dtype=dtypes.float32)
# Try an input with statically-unknown innermost dimension.
glstm_input = array_ops.placeholder(
dtypes.float32, shape=[batch_size, None])
with self.assertRaisesRegexp(ValueError,
"input size must be statically known"):
gcell(glstm_input, gcell_zero_state)
# Try an input whose innermost dimension isn't divisible into groups.
glstm_input = array_ops.placeholder(
dtypes.float32, shape=[batch_size, 3])
with self.assertRaisesRegexp(
ValueError,
r"input size \(3\) must be divisible by number_of_groups \(2\)"):
gcell(glstm_input, gcell_zero_state)
def testCFNCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root"):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = contrib_rnn_cell.CFNCell(
units=2,
kernel_initializer=initializers.Constant(0.5))
g, _ = cell(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.17188203, 0.17188203]])
with variable_scope.variable_scope("other"):
# Test CFN with input_size != num_units.
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 2])
cell = contrib_rnn_cell.CFNCell(
units=2,
kernel_initializer=initializers.Constant(0.5))
g, _ = cell(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.15535763, 0.15535763]])
def testCFNCellEndToEnd(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = utils.to_categorical(y_train)
cell = contrib_rnn_cell.CFNCell(output_shape)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
self.assertEqual(state.shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state)
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables.global_variables_initializer()])
_, outputs, state = sess.run(
[train_op, outputs, state], {inputs: x_train, predict: y_train})
self.assertEqual(len(outputs), batch)
self.assertEqual(len(state), batch)
def testMinimalRNNCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root"):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = contrib_rnn_cell.MinimalRNNCell(
units=2,
kernel_initializer=initializers.Constant(0.5))
g, _ = cell(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.18899589, 0.18899589]])
with variable_scope.variable_scope(
"other"):
# Test MinimalRNN with input_size != num_units.
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 2])
cell = contrib_rnn_cell.MinimalRNNCell(
units=2,
kernel_initializer=initializers.Constant(0.5))
g, _ = cell(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.19554167, 0.19554167]])
def testMinimalRNNCellEndToEnd(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = utils.to_categorical(y_train)
cell = contrib_rnn_cell.MinimalRNNCell(output_shape)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
self.assertEqual(state.shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state)
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables.global_variables_initializer()])
_, outputs, state = sess.run(
[train_op, outputs, state], {inputs: x_train, predict: y_train})
self.assertEqual(len(outputs), batch)
self.assertEqual(len(state), batch)
def testNTMCell(self):
expected_output = np.array(
[[-0.04973561, -0.00020032, -0.09586009, -0.05049511],
[-0.02199885, 0.02302885, -0.05558189, -0.02051288],
[-0.01399924, 0.02543444, -0.06975862, -0.03782758],
[-0.02238393, 0.0135776, -0.09102941, -0.05594013]],
dtype=np.float32)
expected_read_vector_list = np.array(
[[1e-6, 1e-6, 1e-6, 1e-6], [1e-6, 1e-6, 1e-6, 1e-6],
[1e-6, 1e-6, 1e-6, 1e-6], [1e-6, 1e-6, 1e-6, 1e-6]],
dtype=np.float32)
expected_w_list = np.array(
[[[0.15837428, 0.21354634, 0.22115856, 0.21117255, 0.19574821],
[0.15826838, 0.2150458, 0.2228198, 0.20747298, 0.19639312],
[0.15750293, 0.21550071, 0.22280747, 0.20737495, 0.19681393],
[0.15763053, 0.21473582, 0.22187267, 0.20920397, 0.19655706]],
[[0.21703579, 0.19425659, 0.22143759, 0.18024713, 0.18702294],
[0.2164267, 0.19451937, 0.22112325, 0.18051708, 0.18741359],
[0.21567065, 0.1947548, 0.22107735, 0.18058982, 0.18790732],
[0.2163743, 0.194361, 0.22131558, 0.18042919, 0.1875199]]],
dtype=np.float32)
expected_M_0 = np.array(
[[-0.00553495, -0.01089884, 0.00683121, -0.00273276],
[-0.00495392, -0.00975483, 0.00611433, -0.00244583],
[-0.00564722, -0.0111199, 0.00696973, -0.0027882],
[-0.00459658, -0.00905126, 0.00567345, -0.00226937],
[-0.00476941, -0.00939155, 0.00588669, -0.00235472]],
dtype=np.float32)
with session.Session() as sess:
with variable_scope.variable_scope("root"):
seed = 1234
random_seed.set_random_seed(seed)
batch_size = 4
inputs = random_ops.random_uniform((batch_size, 4),
0.0,
1.0,
seed=seed + 1)
cell = contrib_rnn_cell.NTMCell(
controller=rnn_cell_impl.LSTMCell(num_units=4),
memory_size=5,
memory_vector_dim=4,
read_head_num=1,
write_head_num=1)
output, state = cell(inputs, cell.zero_state(batch_size,
dtypes.float32))
sess.run([variables.global_variables_initializer()])
res, read_vector_list, w_list, M = sess.run(
[output, state.read_vector_list, state.w_list, state.M])
# Smoke test
self.assertAllClose(res, expected_output)
self.assertAllClose(read_vector_list[0], expected_read_vector_list)
self.assertAllClose(w_list, expected_w_list)
self.assertAllClose(M[0], expected_M_0)
class LayerNormBasicLSTMCellTest(test.TestCase):
# NOTE: all the values in the current test case have been calculated.
def testBasicLSTMCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
c0 = array_ops.zeros([1, 2])
h0 = array_ops.zeros([1, 2])
state0 = rnn_cell.LSTMStateTuple(c0, h0)
c1 = array_ops.zeros([1, 2])
h1 = array_ops.zeros([1, 2])
state1 = rnn_cell.LSTMStateTuple(c1, h1)
state = (state0, state1)
single_cell = lambda: contrib_rnn_cell.LayerNormBasicLSTMCell(2)
cell = rnn_cell.MultiRNNCell([single_cell() for _ in range(2)])
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, out_m], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_state0_c = np.array([[-1.0, 1.0]])
expected_state0_h = np.array([[-0.38079708, 0.38079708]])
expected_state1_c = np.array([[-1.0, 1.0]])
expected_state1_h = np.array([[-0.38079708, 0.38079708]])
actual_h = res[0]
actual_state0_c = res[1][0].c
actual_state0_h = res[1][0].h
actual_state1_c = res[1][1].c
actual_state1_h = res[1][1].h
self.assertAllClose(actual_h, expected_h, 1e-5)
self.assertAllClose(expected_state0_c, actual_state0_c, 1e-5)
self.assertAllClose(expected_state0_h, actual_state0_h, 1e-5)
self.assertAllClose(expected_state1_c, actual_state1_c, 1e-5)
self.assertAllClose(expected_state1_h, actual_state1_h, 1e-5)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros(
[1, 3]) # Test BasicLSTMCell with input_size != num_units.
c = array_ops.zeros([1, 2])
h = array_ops.zeros([1, 2])
state = rnn_cell.LSTMStateTuple(c, h)
cell = contrib_rnn_cell.LayerNormBasicLSTMCell(2)
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, out_m], {
x.name: np.array([[1., 1., 1.]]),
c.name: 0.1 * np.asarray([[0, 1]]),
h.name: 0.1 * np.asarray([[2, 3]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_c = np.array([[-1.0, 1.0]])
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c, 1e-5)
self.assertAllClose(res[1].h, expected_h, 1e-5)
def testBasicLSTMCellWithoutNorm(self):
"""Tests that BasicLSTMCell with layer_norm=False."""
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
c0 = array_ops.zeros([1, 2])
h0 = array_ops.zeros([1, 2])
state0 = rnn_cell.LSTMStateTuple(c0, h0)
c1 = array_ops.zeros([1, 2])
h1 = array_ops.zeros([1, 2])
state1 = rnn_cell.LSTMStateTuple(c1, h1)
state = (state0, state1)
single_cell = lambda: contrib_rnn_cell.LayerNormBasicLSTMCell(2, layer_norm=False) # pylint: disable=line-too-long
cell = rnn_cell.MultiRNNCell([single_cell() for _ in range(2)])
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, out_m], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[0.70230919, 0.72581059]])
expected_state0_c = np.array([[0.8020075, 0.89599884]])
expected_state0_h = np.array([[0.56668288, 0.60858738]])
expected_state1_c = np.array([[1.17500675, 1.26892781]])
expected_state1_h = np.array([[0.70230919, 0.72581059]])
actual_h = res[0]
actual_state0_c = res[1][0].c
actual_state0_h = res[1][0].h
actual_state1_c = res[1][1].c
actual_state1_h = res[1][1].h
self.assertAllClose(actual_h, expected_h, 1e-5)
self.assertAllClose(expected_state0_c, actual_state0_c, 1e-5)
self.assertAllClose(expected_state0_h, actual_state0_h, 1e-5)
self.assertAllClose(expected_state1_c, actual_state1_c, 1e-5)
self.assertAllClose(expected_state1_h, actual_state1_h, 1e-5)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros(
[1, 3]) # Test BasicLSTMCell with input_size != num_units.
c = array_ops.zeros([1, 2])
h = array_ops.zeros([1, 2])
state = rnn_cell.LSTMStateTuple(c, h)
cell = contrib_rnn_cell.LayerNormBasicLSTMCell(2, layer_norm=False)
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, out_m], {
x.name: np.array([[1., 1., 1.]]),
c.name: 0.1 * np.asarray([[0, 1]]),
h.name: 0.1 * np.asarray([[2, 3]]),
})
expected_h = np.array([[0.64121795, 0.68166804]])
expected_c = np.array([[0.88477188, 0.98103917]])
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c, 1e-5)
self.assertAllClose(res[1].h, expected_h, 1e-5)
def testBasicLSTMCellWithStateTuple(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
c0 = array_ops.zeros([1, 2])
h0 = array_ops.zeros([1, 2])
state0 = rnn_cell.LSTMStateTuple(c0, h0)
c1 = array_ops.zeros([1, 2])
h1 = array_ops.zeros([1, 2])
state1 = rnn_cell.LSTMStateTuple(c1, h1)
cell = rnn_cell.MultiRNNCell(
[contrib_rnn_cell.LayerNormBasicLSTMCell(2) for _ in range(2)])
h, (s0, s1) = cell(x, (state0, state1))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[h, s0, s1], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_h0 = np.array([[-0.38079708, 0.38079708]])
expected_c0 = np.array([[-1.0, 1.0]])
expected_h1 = np.array([[-0.38079708, 0.38079708]])
expected_c1 = np.array([[-1.0, 1.0]])
self.assertEqual(len(res), 3)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c0, 1e-5)
self.assertAllClose(res[1].h, expected_h0, 1e-5)
self.assertAllClose(res[2].c, expected_c1, 1e-5)
self.assertAllClose(res[2].h, expected_h1, 1e-5)
def testBasicLSTMCellWithStateTupleLayerNorm(self):
"""The results of LSTMCell and LayerNormBasicLSTMCell should be the same."""
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
c0 = array_ops.zeros([1, 2])
h0 = array_ops.zeros([1, 2])
state0 = rnn_cell_impl.LSTMStateTuple(c0, h0)
c1 = array_ops.zeros([1, 2])
h1 = array_ops.zeros([1, 2])
state1 = rnn_cell_impl.LSTMStateTuple(c1, h1)
cell = rnn_cell_impl.MultiRNNCell([
contrib_rnn_cell.LayerNormLSTMCell(
2, layer_norm=True, norm_gain=1.0, norm_shift=0.0)
for _ in range(2)
])
h, (s0, s1) = cell(x, (state0, state1))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[h, s0, s1], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_h0 = np.array([[-0.38079708, 0.38079708]])
expected_c0 = np.array([[-1.0, 1.0]])
expected_h1 = np.array([[-0.38079708, 0.38079708]])
expected_c1 = np.array([[-1.0, 1.0]])
self.assertEqual(len(res), 3)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c0, 1e-5)
self.assertAllClose(res[1].h, expected_h0, 1e-5)
self.assertAllClose(res[2].c, expected_c1, 1e-5)
self.assertAllClose(res[2].h, expected_h1, 1e-5)
def testBasicLSTMCellWithDropout(self):
def _is_close(x, y, digits=4):
delta = x - y
return delta < 10**(-digits)
def _is_close_in(x, items, digits=4):
for i in items:
if _is_close(x, i, digits):
return True
return False
keep_prob = 0.5
c_high = 2.9998924946
c_low = 0.999983298578
h_low = 0.761552567265
h_high = 0.995008519604
num_units = 5
allowed_low = [1, 2, 3]
with self.cached_session() as sess:
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(1)):
x = array_ops.zeros([1, 5])
c = array_ops.zeros([1, 5])
h = array_ops.zeros([1, 5])
state = rnn_cell.LSTMStateTuple(c, h)
cell = contrib_rnn_cell.LayerNormBasicLSTMCell(
num_units, layer_norm=False, dropout_keep_prob=keep_prob)
g, s = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, s], {
x.name: np.ones([1, 5]),
c.name: np.ones([1, 5]),
h.name: np.ones([1, 5]),
})
# Since the returned tensors are of size [1,n]
# get the first component right now.
actual_h = res[0][0]
actual_state_c = res[1].c[0]
actual_state_h = res[1].h[0]
# For each item in `c` (the cell inner state) check that
# it is equal to one of the allowed values `c_high` (not
# dropped out) or `c_low` (dropped out) and verify that the
# corresponding item in `h` (the cell activation) is coherent.
# Count the dropped activations and check that their number is
# coherent with the dropout probability.
dropped_count = 0
self.assertTrue((actual_h == actual_state_h).all())
for citem, hitem in zip(actual_state_c, actual_state_h):
self.assertTrue(_is_close_in(citem, [c_low, c_high]))
if _is_close(citem, c_low):
self.assertTrue(_is_close(hitem, h_low))
dropped_count += 1
elif _is_close(citem, c_high):
self.assertTrue(_is_close(hitem, h_high))
self.assertIn(dropped_count, allowed_low)
def _create_multi_lstm_cell_ops(batch_size, num_units, input_depth, num_layers,
max_time, compiled):
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(-0.1, 0.1, seed=2)):
inputs = variable_scope.get_variable(
"inputs",
initializer=random_ops.random_uniform(
(max_time, batch_size, input_depth), seed=1))
maybe_xla = lambda c: contrib_rnn_cell.CompiledWrapper(c) if compiled else c
cell = rnn_cell.MultiRNNCell(
[maybe_xla(rnn_cell.LSTMCell(num_units)) for _ in range(num_layers)])
initial_state = cell.zero_state(batch_size=batch_size, dtype=dtypes.float32)
outputs, final_state = rnn.dynamic_rnn(
cell=cell, inputs=inputs, initial_state=initial_state, time_major=True)
flat_final_state = nest.flatten(final_state)
trainable_variables = variables.trainable_variables()
outputs_grad = gradients_impl.gradients(
[outputs], trainable_variables + [inputs] + nest.flatten(initial_state))
final_state_grad = gradients_impl.gradients(
flat_final_state,
trainable_variables + [inputs] + nest.flatten(initial_state))
return {
"outputs": outputs,
"final_state": flat_final_state,
"outputs_grad": outputs_grad,
"final_state_grad": final_state_grad
}
class CompiledWrapperTest(test.TestCase):
def testMultiRNNCellWithLSTMCellAndXLA(self):
# TODO(b/34735319): Don't run this test if XLA is not available.
batch_size = 16
num_units = 32
input_depth = 12
num_layers = 2
max_time = 20
atol = 1e-5
random_seed.set_random_seed(1234)
with self.session(graph=ops.Graph()) as sess:
xla_ops = _create_multi_lstm_cell_ops(
batch_size=batch_size,
num_units=num_units,
input_depth=input_depth,
num_layers=num_layers,
max_time=max_time,
compiled=True)
sess.run([variables.global_variables_initializer()])
xla_results = sess.run(xla_ops)
random_seed.set_random_seed(1234)
with self.session(graph=ops.Graph()) as sess:
non_xla_ops = _create_multi_lstm_cell_ops(
batch_size=batch_size,
num_units=num_units,
input_depth=input_depth,
num_layers=num_layers,
max_time=max_time,
compiled=False)
sess.run([variables.global_variables_initializer()])
non_xla_results = sess.run(non_xla_ops)
self.assertAllClose(
non_xla_results["outputs"], xla_results["outputs"], atol=atol)
for xla_value, non_xla_value in zip(xla_results["final_state"],
non_xla_results["final_state"]):
self.assertAllClose(xla_value, non_xla_value, atol=atol)
for xla_g, non_xla_g in zip(xla_results["outputs_grad"],
non_xla_results["outputs_grad"]):
self.assertAllClose(xla_g, non_xla_g, atol=atol)
for xla_g, non_xla_g in zip(xla_results["final_state_grad"],
non_xla_results["final_state_grad"]):
self.assertAllClose(xla_g, non_xla_g, atol=atol)
def testMultiRNNCellWithStateTuple(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m_bad = array_ops.zeros([1, 4])
m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
rnn_cell.MultiRNNCell(
[rnn_cell.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_bad)
_, ml = rnn_cell.MultiRNNCell(
[rnn_cell.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_good)
sess.run([variables.global_variables_initializer()])
res = sess.run(
ml, {
x.name: np.array([[1., 1.]]),
m_good[0].name: np.array([[0.1, 0.1]]),
m_good[1].name: np.array([[0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a
# smoke test. However, these numbers should match those of
# the test testMultiRNNCell.
self.assertAllClose(res[0], [[0.175991, 0.175991]])
self.assertAllClose(res[1], [[0.13248, 0.13248]])
class BenchmarkLSTMCellXLA(test.Benchmark):
def benchmarkDynamicRNNWithMultiLSTMCell(self):
num_layers = 3
max_time = 50
print("benchmarkDynamicRNNWithMultiLSTMCell")
print("\t" + "\t".join([
"inter_th", "intra_th", "batch_size", "num_units", "input_depth",
"device", "compiled", "wall_time"
]))
warmup_run = True
for (threads, device, num_units, batch_size, input_depth,
compiled) in itertools.product([{
"inter": 0,
"intra": 0
}, {
"inter": 1,
"intra": 4
}], ["cpu", "gpu"], [32, 512], [1, 32, 256], [32, 512], [False, True]):
if threads["inter"] != 0:
# We only care about testing inter/intra op limitations on
# CPU with small batch size, to mimic embedded devices.
if device != "cpu" or batch_size != 1:
continue
if device == "cpu" and batch_size > 32:
continue
random_seed.set_random_seed(1234)
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=threads["inter"],
intra_op_parallelism_threads=threads["intra"],
allow_soft_placement=False)
with session.Session(config=config, graph=ops.Graph()) as sess:
with ops.device("/%s:0" % device):
ops_dict = _create_multi_lstm_cell_ops(
batch_size=batch_size,
num_units=num_units,
input_depth=input_depth,
num_layers=num_layers,
max_time=max_time,
compiled=compiled)
sess.run([variables.global_variables_initializer()])
all_ops = nest.flatten(ops_dict.values())
all_ops_group = control_flow_ops.group(*all_ops)
name_suffix = ("inter_th_%d_intra_th_%d_bs_%d_units_%d_inputdepth_%d"
"_device_%s_xla_%s" %
(threads["inter"], threads["intra"], batch_size,
num_units, input_depth, device, compiled))
if warmup_run:
self.run_op_benchmark(
sess, all_ops_group, min_iters=30, name="ignore_warmup")
warmup_run = False
benchmark_results = self.run_op_benchmark(
sess,
all_ops_group,
min_iters=50,
name="benchmarkDynamicRNNWithMultiLSTMCell_%s" % name_suffix)
print("\t" + "\t".join([
"%s" % x
for x in [
threads["inter"], threads["intra"], batch_size, num_units,
input_depth, device, compiled, benchmark_results["wall_time"]
]
]))
class WeightNormLSTMCellTest(test.TestCase):
"""Compared cell output with pre-calculated values."""
def _cell_output(self, cell):
"""Calculates cell output."""
with self.cached_session() as sess:
init = init_ops.constant_initializer(0.5)
with variable_scope.variable_scope("root",
initializer=init):
x = array_ops.zeros([1, 2])
c0 = array_ops.zeros([1, 2])
h0 = array_ops.zeros([1, 2])
state0 = rnn_cell.LSTMStateTuple(c0, h0)
xout, sout = cell()(x, state0)
sess.run([variables.global_variables_initializer()])
res = sess.run([xout, sout], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
})
actual_state_c = res[1].c
actual_state_h = res[1].h
return actual_state_c, actual_state_h
def testBasicCell(self):
"""Tests cell w/o peepholes and w/o normalisation."""
def cell():
return contrib_rnn_cell.WeightNormLSTMCell(2,
norm=False,
use_peepholes=False)
actual_c, actual_h = self._cell_output(cell)
expected_c = np.array([[0.65937078, 0.74983585]])
expected_h = np.array([[0.44923624, 0.49362513]])
self.assertAllClose(expected_c, actual_c, 1e-5)
self.assertAllClose(expected_h, actual_h, 1e-5)
def testNonbasicCell(self):
"""Tests cell with peepholes and w/o normalisation."""
def cell():
return contrib_rnn_cell.WeightNormLSTMCell(2,
norm=False,
use_peepholes=True)
actual_c, actual_h = self._cell_output(cell)
expected_c = np.array([[0.65937084, 0.7574988]])
expected_h = np.array([[0.4792085, 0.53470564]])
self.assertAllClose(expected_c, actual_c, 1e-5)
self.assertAllClose(expected_h, actual_h, 1e-5)
def testBasicCellWithNorm(self):
"""Tests cell w/o peepholes and with normalisation."""
def cell():
return contrib_rnn_cell.WeightNormLSTMCell(2,
norm=True,
use_peepholes=False)
actual_c, actual_h = self._cell_output(cell)
expected_c = np.array([[0.50125383, 0.58805949]])
expected_h = np.array([[0.32770363, 0.37397948]])
self.assertAllClose(expected_c, actual_c, 1e-5)
self.assertAllClose(expected_h, actual_h, 1e-5)
def testNonBasicCellWithNorm(self):
"""Tests cell with peepholes and with normalisation."""
def cell():
return contrib_rnn_cell.WeightNormLSTMCell(2,
norm=True,
use_peepholes=True)
actual_c, actual_h = self._cell_output(cell)
expected_c = np.array([[0.50125383, 0.59587258]])
expected_h = np.array([[0.35041603, 0.40873795]])
self.assertAllClose(expected_c, actual_c, 1e-5)
self.assertAllClose(expected_h, actual_h, 1e-5)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/rnn/python/kernel_tests/rnn_cell_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.rnn.python.ops.fused_rnn_cell."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.rnn.python.ops import fused_rnn_cell
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class FusedRnnCellTest(test.TestCase):
def testBasicRNNFusedWrapper(self):
"""This test checks that using a wrapper for BasicRNN works as expected."""
with self.cached_session() as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
cell = rnn_cell.BasicRNNCell(10)
batch_size = 5
input_size = 20
timelen = 15
inputs = constant_op.constant(
np.random.randn(timelen, batch_size, input_size))
with variable_scope.variable_scope("basic", initializer=initializer):
unpacked_inputs = array_ops.unstack(inputs)
outputs, state = rnn.static_rnn(
cell, unpacked_inputs, dtype=dtypes.float64)
packed_outputs = array_ops.stack(outputs)
basic_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("basic/")
]
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_state = sess.run([packed_outputs, state])
basic_grads = sess.run(gradients_impl.gradients(packed_outputs, inputs))
basic_wgrads = sess.run(
gradients_impl.gradients(packed_outputs, basic_vars))
with variable_scope.variable_scope(
"fused_static", initializer=initializer):
fused_cell = fused_rnn_cell.FusedRNNCellAdaptor(
rnn_cell.BasicRNNCell(10))
outputs, state = fused_cell(inputs, dtype=dtypes.float64)
fused_static_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused_static/")
]
sess.run([variables.global_variables_initializer()])
fused_static_outputs, fused_static_state = sess.run([outputs, state])
fused_static_grads = sess.run(gradients_impl.gradients(outputs, inputs))
fused_static_wgrads = sess.run(
gradients_impl.gradients(outputs, fused_static_vars))
self.assertAllClose(basic_outputs, fused_static_outputs)
self.assertAllClose(basic_state, fused_static_state)
self.assertAllClose(basic_grads, fused_static_grads)
for basic, fused in zip(basic_wgrads, fused_static_wgrads):
self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
with variable_scope.variable_scope(
"fused_dynamic", initializer=initializer):
fused_cell = fused_rnn_cell.FusedRNNCellAdaptor(
rnn_cell.BasicRNNCell(10), use_dynamic_rnn=True)
outputs, state = fused_cell(inputs, dtype=dtypes.float64)
fused_dynamic_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused_dynamic/")
]
sess.run([variables.global_variables_initializer()])
fused_dynamic_outputs, fused_dynamic_state = sess.run([outputs, state])
fused_dynamic_grads = sess.run(
gradients_impl.gradients(outputs, inputs))
fused_dynamic_wgrads = sess.run(
gradients_impl.gradients(outputs, fused_dynamic_vars))
self.assertAllClose(basic_outputs, fused_dynamic_outputs)
self.assertAllClose(basic_state, fused_dynamic_state)
self.assertAllClose(basic_grads, fused_dynamic_grads)
for basic, fused in zip(basic_wgrads, fused_dynamic_wgrads):
self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
def testTimeReversedFusedRNN(self):
with self.cached_session() as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890213)
fw_cell = rnn_cell.BasicRNNCell(10)
bw_cell = rnn_cell.BasicRNNCell(10)
batch_size = 5
input_size = 20
timelen = 15
inputs = constant_op.constant(
np.random.randn(timelen, batch_size, input_size))
# test bi-directional rnn
with variable_scope.variable_scope("basic", initializer=initializer):
unpacked_inputs = array_ops.unstack(inputs)
outputs, fw_state, bw_state = rnn.static_bidirectional_rnn(
fw_cell, bw_cell, unpacked_inputs, dtype=dtypes.float64)
packed_outputs = array_ops.stack(outputs)
basic_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("basic/")
]
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_fw_state, basic_bw_state = sess.run(
[packed_outputs, fw_state, bw_state])
basic_grads = sess.run(gradients_impl.gradients(packed_outputs, inputs))
basic_wgrads = sess.run(
gradients_impl.gradients(packed_outputs, basic_vars))
with variable_scope.variable_scope("fused", initializer=initializer):
fused_cell = fused_rnn_cell.FusedRNNCellAdaptor(
rnn_cell.BasicRNNCell(10))
fused_bw_cell = fused_rnn_cell.TimeReversedFusedRNN(
fused_rnn_cell.FusedRNNCellAdaptor(rnn_cell.BasicRNNCell(10)))
fw_outputs, fw_state = fused_cell(
inputs, dtype=dtypes.float64, scope="fw")
bw_outputs, bw_state = fused_bw_cell(
inputs, dtype=dtypes.float64, scope="bw")
outputs = array_ops.concat([fw_outputs, bw_outputs], 2)
fused_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused/")
]
sess.run([variables.global_variables_initializer()])
fused_outputs, fused_fw_state, fused_bw_state = sess.run(
[outputs, fw_state, bw_state])
fused_grads = sess.run(gradients_impl.gradients(outputs, inputs))
fused_wgrads = sess.run(gradients_impl.gradients(outputs, fused_vars))
self.assertAllClose(basic_outputs, fused_outputs)
self.assertAllClose(basic_fw_state, fused_fw_state)
self.assertAllClose(basic_bw_state, fused_bw_state)
self.assertAllClose(basic_grads, fused_grads)
for basic, fused in zip(basic_wgrads, fused_wgrads):
self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/rnn/python/kernel_tests/fused_rnn_cell_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for benchmarking OpKernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import time
from tensorflow.python.framework import ops
def device(use_gpu=False):
"""TensorFlow device to assign ops to."""
if use_gpu:
return ops.device("/gpu:0")
return ops.device("/cpu:0")
def seconds_per_run(op, sess, num_runs=50):
"""Number of seconds taken to execute 'op' once on average."""
for _ in range(2):
sess.run(op)
start_time = time.time()
for _ in range(num_runs):
sess.run(op)
end_time = time.time()
time_taken = (end_time - start_time) / num_runs
return time_taken
def dict_product(dicts):
"""Constructs iterator over outer product of entries in a dict-of-lists.
Example:
>>> dict_products({"a": [1,2], "b": [3, 4]})
>>> [{"a": 1, "b": 3},
{"a": 1, "b": 4},
{"a": 2, "b": 3},
{"a": 2, "b": 4}]
Args:
dicts: dictionary with string keys and list values.
Yields:
Individual dicts from outer product.
"""
keys, values = zip(*dicts.items())
for config_values in itertools.product(*values):
yield dict(zip(keys, config_values))
|
tensorflow-master
|
tensorflow/contrib/rnn/python/kernel_tests/benchmarking.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for the Block GRU Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_rnn_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.util.deprecation import deprecated_args
LayerRNNCell = rnn_cell_impl.LayerRNNCell # pylint: disable=invalid-name
@ops.RegisterGradient("GRUBlockCell")
def _GRUBlockCellGrad(op, *grad):
r"""Gradient for GRUBlockCell.
Args:
op: Op for which the gradient is defined.
*grad: Gradients of the optimization function wrt output
for the Op.
Returns:
d_x: Gradients wrt to x
d_h: Gradients wrt to h
d_w_ru: Gradients wrt to w_ru
d_w_c: Gradients wrt to w_c
d_b_ru: Gradients wrt to b_ru
d_b_c: Gradients wrt to b_c
Mathematics behind the Gradients below:
```
d_c_bar = d_h \circ (1-u) \circ (1-c \circ c)
d_u_bar = d_h \circ (h-c) \circ u \circ (1-u)
d_r_bar_u_bar = [d_r_bar d_u_bar]
[d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T
[d_x_component_2 d_h_prevr] = d_c_bar * w_c^T
d_x = d_x_component_1 + d_x_component_2
d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + u
```
Below calculation is performed in the python wrapper for the Gradients
(not in the gradient kernel.)
```
d_w_ru = x_h_prevr^T * d_c_bar
d_w_c = x_h_prev^T * d_r_bar_u_bar
d_b_ru = sum of d_r_bar_u_bar along axis = 0
d_b_c = sum of d_c_bar along axis = 0
```
"""
x, h_prev, w_ru, w_c, b_ru, b_c = op.inputs
r, u, c, _ = op.outputs
_, _, _, d_h = grad
d_x, d_h_prev, d_c_bar, d_r_bar_u_bar = gen_rnn_ops.gru_block_cell_grad(
x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h)
x_h_prev = array_ops.concat([x, h_prev], 1)
d_w_ru = math_ops.matmul(x_h_prev, d_r_bar_u_bar, transpose_a=True)
d_b_ru = nn_ops.bias_add_grad(d_r_bar_u_bar)
x_h_prevr = array_ops.concat([x, h_prev * r], 1)
d_w_c = math_ops.matmul(x_h_prevr, d_c_bar, transpose_a=True)
d_b_c = nn_ops.bias_add_grad(d_c_bar)
return d_x, d_h_prev, d_w_ru, d_w_c, d_b_ru, d_b_c
class GRUBlockCell(LayerRNNCell):
r"""Block GRU cell implementation.
Deprecated: use GRUBlockCellV2 instead.
The implementation is based on: http://arxiv.org/abs/1406.1078
Computes the GRU cell forward propagation for 1 time step.
This kernel op implements the following mathematical equations:
Biases are initialized with:
* `b_ru` - constant_initializer(1.0)
* `b_c` - constant_initializer(0.0)
```
x_h_prev = [x, h_prev]
[r_bar u_bar] = x_h_prev * w_ru + b_ru
r = sigmoid(r_bar)
u = sigmoid(u_bar)
h_prevr = h_prev \circ r
x_h_prevr = [x h_prevr]
c_bar = x_h_prevr * w_c + b_c
c = tanh(c_bar)
h = (1-u) \circ c + u \circ h_prev
```
"""
@deprecated_args(None, "cell_size is deprecated, use num_units instead",
"cell_size")
def __init__(self,
num_units=None,
cell_size=None,
reuse=None,
name="gru_cell"):
"""Initialize the Block GRU cell.
Args:
num_units: int, The number of units in the GRU cell.
cell_size: int, The old (deprecated) name for `num_units`.
reuse: (optional) boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases. By default this is "lstm_cell", for variable-name compatibility
with `tf.compat.v1.nn.rnn_cell.GRUCell`.
Raises:
ValueError: if both cell_size and num_units are not None;
or both are None.
"""
super(GRUBlockCell, self).__init__(_reuse=reuse, name=name)
if (cell_size is None) == (num_units is None):
raise ValueError(
"Exactly one of num_units or cell_size must be provided.")
if num_units is None:
num_units = cell_size
self._cell_size = num_units
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
@property
def state_size(self):
return self._cell_size
@property
def output_size(self):
return self._cell_size
def build(self, input_shape):
# Check if the input size exist.
input_size = tensor_shape.dimension_value(input_shape[1])
if input_size is None:
raise ValueError("Expecting input_size to be set.")
self._gate_kernel = self.add_variable(
"w_ru", [input_size + self._cell_size, self._cell_size * 2])
self._gate_bias = self.add_variable(
"b_ru", [self._cell_size * 2],
initializer=init_ops.constant_initializer(1.0))
self._candidate_kernel = self.add_variable(
"w_c", [input_size + self._cell_size, self._cell_size])
self._candidate_bias = self.add_variable(
"b_c", [self._cell_size],
initializer=init_ops.constant_initializer(0.0))
self.built = True
def call(self, inputs, h_prev):
"""GRU cell."""
# Check cell_size == state_size from h_prev.
cell_size = h_prev.get_shape().with_rank(2)[1]
if cell_size != self._cell_size:
raise ValueError("Shape of h_prev[1] incorrect: cell_size %i vs %s" %
(self._cell_size, cell_size))
_gru_block_cell = gen_rnn_ops.gru_block_cell # pylint: disable=invalid-name
_, _, _, new_h = _gru_block_cell(
x=inputs,
h_prev=h_prev,
w_ru=self._gate_kernel,
w_c=self._candidate_kernel,
b_ru=self._gate_bias,
b_c=self._candidate_bias)
return new_h, new_h
class GRUBlockCellV2(GRUBlockCell):
"""Temporary GRUBlockCell impl with a different variable naming scheme.
Only differs from GRUBlockCell by variable names.
"""
def build(self, input_shape):
"""GRU cell."""
input_size = tensor_shape.dimension_value(input_shape[1])
if input_size is None:
raise ValueError("Expecting input_size to be set.")
self._gate_kernel = self.add_variable(
"gates/kernel", [input_size + self._cell_size, self._cell_size * 2])
self._gate_bias = self.add_variable(
"gates/bias", [self._cell_size * 2],
initializer=init_ops.constant_initializer(1.0))
self._candidate_kernel = self.add_variable(
"candidate/kernel", [input_size + self._cell_size, self._cell_size])
self._candidate_bias = self.add_variable(
"candidate/bias", [self._cell_size],
initializer=init_ops.constant_initializer(0.0))
self.built = True
|
tensorflow-master
|
tensorflow/contrib/rnn/python/ops/gru_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module implementing RNN Cells that used to be in core.
@@EmbeddingWrapper
@@InputProjectionWrapper
@@OutputProjectionWrapper
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
# pylint: disable=protected-access,invalid-name
RNNCell = rnn_cell_impl.RNNCell
_WEIGHTS_VARIABLE_NAME = rnn_cell_impl._WEIGHTS_VARIABLE_NAME
_BIAS_VARIABLE_NAME = rnn_cell_impl._BIAS_VARIABLE_NAME
# pylint: enable=protected-access,invalid-name
class _Linear(object):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch, n, Tensors.
output_size: int, second dimension of weight variable.
dtype: data type for variables.
build_bias: boolean, whether to build a bias variable.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Raises:
ValueError: if inputs_shape is wrong.
"""
def __init__(self,
args,
output_size,
build_bias,
bias_initializer=None,
kernel_initializer=None):
self._build_bias = build_bias
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
self._is_sequence = False
else:
self._is_sequence = True
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape.dims[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape.dims[1].value
dtype = [a.dtype for a in args][0]
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
self._weights = vs.get_variable(
_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size],
dtype=dtype,
initializer=kernel_initializer)
if build_bias:
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
self._biases = vs.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=bias_initializer)
def __call__(self, args):
if not self._is_sequence:
args = [args]
if len(args) == 1:
res = math_ops.matmul(args[0], self._weights)
else:
# Explicitly creating a one for a minor performance improvement.
one = constant_op.constant(1, dtype=dtypes.int32)
res = math_ops.matmul(array_ops.concat(args, one), self._weights)
if self._build_bias:
res = nn_ops.bias_add(res, self._biases)
return res
# TODO(xpan): Remove this function in a follow up.
def _linear(args,
output_size,
bias,
bias_initializer=None,
kernel_initializer=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch, n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Returns:
A 2D Tensor with shape `[batch, output_size]` equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape.dims[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape.dims[1].value
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size],
dtype=dtype,
initializer=kernel_initializer)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
biases = vs.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=bias_initializer)
return nn_ops.bias_add(res, biases)
class EmbeddingWrapper(RNNCell):
"""Operator adding input embedding to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the embedding on this batch-concatenated sequence, then split it and
feed into your RNN.
"""
def __init__(self,
cell,
embedding_classes,
embedding_size,
initializer=None,
reuse=None):
"""Create a cell with an added input embedding.
Args:
cell: an RNNCell, an embedding will be put before its inputs.
embedding_classes: integer, how many symbols will be embedded.
embedding_size: integer, the size of the vectors we embed into.
initializer: an initializer to use when creating the embedding;
if None, the initializer from variable scope or a default one is used.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if embedding_classes is not positive.
"""
super(EmbeddingWrapper, self).__init__(_reuse=reuse)
rnn_cell_impl.assert_like_rnncell("cell", cell)
if embedding_classes <= 0 or embedding_size <= 0:
raise ValueError("Both embedding_classes and embedding_size must be > 0: "
"%d, %d." % (embedding_classes, embedding_size))
self._cell = cell
self._embedding_classes = embedding_classes
self._embedding_size = embedding_size
self._initializer = initializer
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def call(self, inputs, state):
"""Run the cell on embedded inputs."""
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
if isinstance(state, tuple):
data_type = state[0].dtype
else:
data_type = state.dtype
embedding = vs.get_variable(
"embedding", [self._embedding_classes, self._embedding_size],
initializer=initializer,
dtype=data_type)
embedded = embedding_ops.embedding_lookup(embedding,
array_ops.reshape(inputs, [-1]))
return self._cell(embedded, state)
class InputProjectionWrapper(RNNCell):
"""Operator adding an input projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the projection on this batch-concatenated sequence, then split it.
"""
def __init__(self,
cell,
num_proj,
activation=None,
input_size=None,
reuse=None):
"""Create a cell with input projection.
Args:
cell: an RNNCell, a projection of inputs is added before it.
num_proj: Python integer. The dimension to project to.
activation: (optional) an optional activation function.
input_size: Deprecated and unused.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
"""
super(InputProjectionWrapper, self).__init__(_reuse=reuse)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
rnn_cell_impl.assert_like_rnncell("cell", cell)
self._cell = cell
self._num_proj = num_proj
self._activation = activation
self._linear = None
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def call(self, inputs, state):
"""Run the input projection and then the cell."""
# Default scope: "InputProjectionWrapper"
if self._linear is None:
self._linear = _Linear(inputs, self._num_proj, True)
projected = self._linear(inputs)
if self._activation:
projected = self._activation(projected)
return self._cell(projected, state)
class OutputProjectionWrapper(RNNCell):
"""Operator adding an output projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your outputs in time,
do the projection on this batch-concatenated sequence, then split it
if needed or directly feed into a softmax.
"""
def __init__(self, cell, output_size, activation=None, reuse=None):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
activation: (optional) an optional activation function.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
super(OutputProjectionWrapper, self).__init__(_reuse=reuse)
rnn_cell_impl.assert_like_rnncell("cell", cell)
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
self._activation = activation
self._linear = None
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def call(self, inputs, state):
"""Run the cell and output projection on inputs, starting from state."""
output, res_state = self._cell(inputs, state)
if self._linear is None:
self._linear = _Linear(output, self._output_size, True)
projected = self._linear(output)
if self._activation:
projected = self._activation(projected)
return projected, res_state
|
tensorflow-master
|
tensorflow/contrib/rnn/python/ops/core_rnn_cell.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_rnn_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
LayerRNNCell = rnn_cell_impl.LayerRNNCell # pylint: disable=invalid-name
# pylint: disable=invalid-name
def _lstm_block_cell(x,
cs_prev,
h_prev,
w,
b,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an
optional peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, ci, f, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i)
f = sigmoid(cs_prev * wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
Value to clip the 'cs' value to. Disable by setting to negative value.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
Raises:
ValueError: If cell_size is None.
"""
if wci is None:
cell_size = cs_prev.get_shape().with_rank(2).dims[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
return gen_rnn_ops.lstm_block_cell(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
use_peephole=use_peephole,
name=name)
# pylint: enable=protected-access
def _block_lstm(seq_len_max,
x,
w,
b,
cs_prev=None,
h_prev=None,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""TODO(williamchan): add doc.
Args:
seq_len_max: A `Tensor` of type `int64`.
x: A list of at least 1 `Tensor` objects of the same type.
w: A `Tensor`. Must have the same type as `x`.
b: A `Tensor`. Must have the same type as `x`.
cs_prev: A `Tensor`. Must have the same type as `x`.
h_prev: A `Tensor`. Must have the same type as `x`.
wci: A `Tensor`. Must have the same type as `x`.
wcf: A `Tensor`. Must have the same type as `x`.
wco: A `Tensor`. Must have the same type as `x`.
forget_bias: An optional `float`. Defaults to `1`.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
use_peephole: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
cs: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
f: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
o: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
ci: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
co: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
h: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
Raises:
ValueError: If `b` does not have a valid shape.
"""
dtype = x[0].dtype
batch_size = x[0].get_shape().with_rank(2).dims[0].value
cell_size4 = b.get_shape().with_rank(1).dims[0].value
if cell_size4 is None:
raise ValueError("`b` shape must not be None.")
cell_size = cell_size4 / 4
zero_state = None
if cs_prev is None or h_prev is None:
zero_state = array_ops.constant(
0, dtype=dtype, shape=[batch_size, cell_size])
if cs_prev is None:
cs_prev = zero_state
if h_prev is None:
h_prev = zero_state
if wci is None:
wci = array_ops.constant(0, dtype=dtype, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
i, cs, f, o, ci, co, h = gen_rnn_ops.block_lstm(
seq_len_max=seq_len_max,
x=array_ops.stack(x),
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
name=name,
use_peephole=use_peephole)
return array_ops.unstack(i), array_ops.unstack(cs), array_ops.unstack(
f), array_ops.unstack(o), array_ops.unstack(ci), array_ops.unstack(
co), array_ops.unstack(h)
# pylint: enable=protected-access
# pylint: enable=invalid-name
_lstm_block_cell_grad_outputs = ["cs_prev_grad", "dicfo"]
@ops.RegisterGradient("LSTMBlockCell")
def _LSTMBlockCellGrad(op, *grad):
"""Gradient for LSTMBlockCell."""
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = op.inputs
(i, cs, f, o, ci, co, _) = op.outputs
(_, cs_grad, _, _, _, _, h_grad) = grad
batch_size = x.get_shape().with_rank(2).dims[0].value
if batch_size is None:
batch_size = -1
input_size = x.get_shape().with_rank(2).dims[1].value
if input_size is None:
raise ValueError("input_size from `x` should not be None.")
cell_size = cs_prev.get_shape().with_rank(2).dims[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
(cs_prev_grad, dicfo, wci_grad, wcf_grad,
wco_grad) = gen_rnn_ops.lstm_block_cell_grad(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
i=i,
cs=cs,
f=f,
o=o,
ci=ci,
co=co,
cs_grad=cs_grad,
h_grad=h_grad,
use_peephole=op.get_attr("use_peephole"))
# Backprop from dicfo to xh.
xh_grad = math_ops.matmul(dicfo, w, transpose_b=True)
x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size))
x_grad.get_shape().merge_with(x.get_shape())
h_prev_grad = array_ops.slice(xh_grad, (0, input_size),
(batch_size, cell_size))
h_prev_grad.get_shape().merge_with(h_prev.get_shape())
# Backprop from dicfo to w.
xh = array_ops.concat([x, h_prev], 1)
w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)
w_grad.get_shape().merge_with(w.get_shape())
# Backprop from dicfo to b.
b_grad = nn_ops.bias_add_grad(dicfo)
b_grad.get_shape().merge_with(b.get_shape())
return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad)
@ops.RegisterGradient("BlockLSTM")
def _BlockLSTMGrad(op, *grad):
"""Gradient for BlockLSTM."""
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b = op.inputs
i, cs, f, o, ci, co, h = op.outputs
cs_grad = grad[1]
h_grad = grad[6]
(x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad,
b_grad) = gen_rnn_ops.block_lstm_grad(
seq_len_max=seq_len_max,
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
i=i,
cs=cs,
f=f,
o=o,
ci=ci,
co=co,
h=h,
cs_grad=cs_grad,
h_grad=h_grad,
use_peephole=op.get_attr("use_peephole"))
return [
None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad
]
class LSTMBlockCell(LayerRNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add `forget_bias` (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
Unlike `rnn_cell_impl.LSTMCell`, this is a monolithic op and should be much
faster. The weight and bias matrices should be compatible as long as the
variable scope matches.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False,
dtype=None,
reuse=None,
name="lstm_cell"):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
use_peephole: Whether to use peephole connections or not.
dtype: the variable dtype of this layer. Default to tf.float32.
reuse: (optional) boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases. By default this is "lstm_cell", for variable-name compatibility
with `tf.compat.v1.nn.rnn_cell.LSTMCell`.
When restoring from CudnnLSTM-trained checkpoints, must use
CudnnCompatibleLSTMBlockCell instead.
"""
super(LSTMBlockCell, self).__init__(_reuse=reuse, dtype=dtype, name=name)
self._num_units = num_units
self._forget_bias = forget_bias
self._use_peephole = use_peephole
self._cell_clip = cell_clip if cell_clip is not None else -1
self._names = {
"W": "kernel",
"b": "bias",
"wci": "w_i_diag",
"wcf": "w_f_diag",
"wco": "w_o_diag",
"scope": "lstm_cell"
}
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def build(self, inputs_shape):
if not inputs_shape.dims[1].value:
raise ValueError(
"Expecting inputs_shape[1] to be set: %s" % str(inputs_shape))
input_size = inputs_shape.dims[1].value
self._kernel = self.add_variable(
self._names["W"], [input_size + self._num_units, self._num_units * 4])
self._bias = self.add_variable(
self._names["b"], [self._num_units * 4],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
self._w_i_diag = self.add_variable(self._names["wci"], [self._num_units])
self._w_f_diag = self.add_variable(self._names["wcf"], [self._num_units])
self._w_o_diag = self.add_variable(self._names["wco"], [self._num_units])
self.built = True
def call(self, inputs, state):
"""Long short-term memory cell (LSTM)."""
if len(state) != 2:
raise ValueError("Expecting state to be a tuple with length 2.")
if self._use_peephole:
wci = self._w_i_diag
wcf = self._w_f_diag
wco = self._w_o_diag
else:
wci = wcf = wco = array_ops.zeros([self._num_units], dtype=self.dtype)
(cs_prev, h_prev) = state
(_, cs, _, _, _, _, h) = _lstm_block_cell(
inputs,
cs_prev,
h_prev,
self._kernel,
self._bias,
wci=wci,
wcf=wcf,
wco=wco,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
new_state = rnn_cell_impl.LSTMStateTuple(cs, h)
return h, new_state
@six.add_metaclass(abc.ABCMeta)
class LSTMBlockWrapper(base_layer.Layer):
"""This is a helper class that provides housekeeping for LSTM cells.
This may be useful for alternative LSTM and similar type of cells.
The subclasses must implement `_call_cell` method and `num_units` property.
"""
@abc.abstractproperty
def num_units(self):
"""Number of units in this cell (output dimension)."""
pass
@abc.abstractmethod
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
This method must be implemented by subclasses and does the actual work
of calling the cell.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An int32
or int64 vector (tensor) size [batch_size], values in [0, time_len) or
None.
Returns:
A pair containing:
- State: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
"""
pass
def call(self, inputs, initial_state=None, dtype=None, sequence_length=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`.
initial_state: a tuple `(initial_cell_state, initial_output)` with tensors
of shape `[batch_size, self._num_units]`. If this is not provided, the
cell is expected to create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len).`
Defaults to `time_len` for each element.
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
or a list of time_len tensors of shape `[batch_size, output_size]`,
to match the type of the `inputs`.
- Final state: a tuple `(cell_state, output)` matching `initial_state`.
Raises:
ValueError: in case of shape mismatches
"""
is_list = isinstance(inputs, list)
if is_list:
inputs = array_ops.stack(inputs)
inputs_shape = inputs.get_shape().with_rank(3)
if not inputs_shape[2]:
raise ValueError("Expecting inputs_shape[2] to be set: %s" % inputs_shape)
batch_size = inputs_shape.dims[1].value
if batch_size is None:
batch_size = array_ops.shape(inputs)[1]
time_len = inputs_shape.dims[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
# Provide default values for initial_state and dtype
if initial_state is None:
if dtype is None:
raise ValueError("Either initial_state or dtype needs to be specified")
z = array_ops.zeros(
array_ops.stack([batch_size, self.num_units]), dtype=dtype)
initial_state = z, z
else:
if len(initial_state) != 2:
raise ValueError(
"Expecting initial_state to be a tuple with length 2 or None")
if dtype is None:
dtype = initial_state[0].dtype
# create the actual cell
if sequence_length is not None:
sequence_length = ops.convert_to_tensor(sequence_length)
initial_cell_state, initial_output = initial_state # pylint: disable=unpacking-non-sequence
cell_states, outputs = self._call_cell(
inputs, initial_cell_state, initial_output, dtype, sequence_length)
if sequence_length is not None:
# Mask out the part beyond sequence_length
mask = array_ops.transpose(
array_ops.sequence_mask(sequence_length, time_len, dtype=dtype),
[1, 0])
mask = array_ops.tile(
array_ops.expand_dims(mask, [-1]), [1, 1, self.num_units])
outputs *= mask
# Prepend initial states to cell_states and outputs for indexing to work
# correctly,since we want to access the last valid state at
# sequence_length - 1, which can even be -1, corresponding to the
# initial state.
mod_cell_states = array_ops.concat(
[array_ops.expand_dims(initial_cell_state, [0]), cell_states], 0)
mod_outputs = array_ops.concat(
[array_ops.expand_dims(initial_output, [0]), outputs], 0)
final_cell_state = self._gather_states(mod_cell_states, sequence_length,
batch_size)
final_output = self._gather_states(mod_outputs, sequence_length,
batch_size)
else:
# No sequence_lengths used: final state is the last state
final_cell_state = cell_states[-1]
final_output = outputs[-1]
if is_list:
# Input was a list, so return a list
outputs = array_ops.unstack(outputs)
final_state = rnn_cell_impl.LSTMStateTuple(final_cell_state, final_output)
return outputs, final_state
def _gather_states(self, data, indices, batch_size):
"""Produce `out`, s.t. out(i, j) = data(indices(i), i, j)."""
return array_ops.gather_nd(
data, array_ops.stack([indices, math_ops.range(batch_size)], axis=1))
class LSTMBlockFusedCell(LSTMBlockWrapper):
"""FusedRNNCell implementation of LSTM.
This is an extremely efficient LSTM implementation, that uses a single TF op
for the entire LSTM. It should be both faster and more memory-efficient than
LSTMBlockCell defined above.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
The variable naming is consistent with `rnn_cell_impl.LSTMCell`.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False,
reuse=None,
dtype=None,
name="lstm_fused_cell"):
"""Initialize the LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: clip the cell to this value. Defaults is no cell clipping.
use_peephole: Whether to use peephole connections or not.
reuse: (optional) boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
dtype: the dtype of variables of this layer.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases. By default this is "lstm_cell", for variable-name compatibility
with `tf.compat.v1.nn.rnn_cell.LSTMCell`.
"""
super(LSTMBlockFusedCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype)
self._num_units = num_units
self._forget_bias = forget_bias
self._cell_clip = cell_clip if cell_clip is not None else -1
self._use_peephole = use_peephole
# Inputs must be 3-dimensional.
self.input_spec = input_spec.InputSpec(ndim=3)
@property
def num_units(self):
"""Number of units in this cell (output dimension)."""
return self._num_units
def build(self, input_shape):
input_size = input_shape.dims[2].value
self._kernel = self.add_variable(
"kernel", [input_size + self._num_units, self._num_units * 4])
self._bias = self.add_variable(
"bias", [self._num_units * 4],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
self._w_i_diag = self.add_variable("w_i_diag", [self._num_units])
self._w_f_diag = self.add_variable("w_f_diag", [self._num_units])
self._w_o_diag = self.add_variable("w_o_diag", [self._num_units])
self.built = True
def _call_cell(self,
inputs,
initial_cell_state=None,
initial_output=None,
dtype=None,
sequence_length=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape.dims[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
if self._use_peephole:
wci = self._w_i_diag
wco = self._w_o_diag
wcf = self._w_f_diag
else:
wci = wcf = wco = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = math_ops.cast(time_len, dtypes.int64)
else:
max_seq_len = math_ops.cast(math_ops.reduce_max(sequence_length),
dtypes.int64)
_, cs, _, _, _, _, h = gen_rnn_ops.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=self._kernel,
wci=wci,
wcf=wcf,
wco=wco,
b=self._bias,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
|
tensorflow-master
|
tensorflow/contrib/rnn/python/ops/lstm_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib.compiler import jit
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables # pylint: disable=unused-import
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(sharded_variable, 0, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES, concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" % (shape,
num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(
vs.get_variable(
name + "_%d" % i, [current_size] + shape[1:], dtype=dtype))
return shards
def _norm(g, b, inp, scope):
shape = inp.get_shape()[-1:]
gamma_init = init_ops.constant_initializer(g)
beta_init = init_ops.constant_initializer(b)
with vs.variable_scope(scope):
# Initialize beta and gamma for use by layer_norm.
vs.get_variable("gamma", shape=shape, initializer=gamma_init)
vs.get_variable("beta", shape=shape, initializer=beta_init)
normalized = layers.layer_norm(inp, reuse=True, scope=scope)
return normalized
class CoupledInputForgetGateLSTMCell(rnn_cell_impl.RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf
Felix Gers, Jurgen Schmidhuber, and Fred Cummins.
"Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The coupling of input and forget gate is based on:
http://arxiv.org/pdf/1503.04069.pdf
Greff et al. "LSTM: A Search Space Odyssey"
The class uses optional peep-hole connections, and an optional projection
layer.
Layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
and is applied before the internal nonlinearities.
"""
def __init__(self,
num_units,
use_peepholes=False,
initializer=None,
num_proj=None,
proj_clip=None,
num_unit_shards=1,
num_proj_shards=1,
forget_bias=1.0,
state_is_tuple=True,
activation=math_ops.tanh,
reuse=None,
layer_norm=False,
norm_gain=1.0,
norm_shift=0.0):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
"""
super(CoupledInputForgetGateLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
self._reuse = reuse
self._layer_norm = layer_norm
self._norm_gain = norm_gain
self._norm_shift = norm_shift
if num_proj:
self._state_size = (
rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
rnn_cell_impl.LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2).dims[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
concat_w = _get_concat_variable(
"W",
[input_size.value + num_proj, 3 * self._num_units],
dtype,
self._num_unit_shards)
b = vs.get_variable(
"B",
shape=[3 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
# j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat([inputs, m_prev], 1)
lstm_matrix = math_ops.matmul(cell_inputs, concat_w)
# If layer nomalization is applied, do not add bias
if not self._layer_norm:
lstm_matrix = nn_ops.bias_add(lstm_matrix, b)
j, f, o = array_ops.split(value=lstm_matrix, num_or_size_splits=3, axis=1)
# Apply layer normalization
if self._layer_norm:
j = _norm(self._norm_gain, self._norm_shift, j, "transform")
f = _norm(self._norm_gain, self._norm_shift, f, "forget")
o = _norm(self._norm_gain, self._norm_shift, o, "output")
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
f_act = sigmoid(f + self._forget_bias + w_f_diag * c_prev)
else:
f_act = sigmoid(f + self._forget_bias)
c = (f_act * c_prev + (1 - f_act) * self._activation(j))
# Apply layer normalization
if self._layer_norm:
c = _norm(self._norm_gain, self._norm_shift, c, "state")
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
concat_w_proj = _get_concat_variable("W_P",
[self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (
rnn_cell_impl.LSTMStateTuple(c, m)
if self._state_is_tuple else array_ops.concat([c, m], 1))
return m, new_state
class TimeFreqLSTMCell(rnn_cell_impl.RNNCell):
"""Time-Frequency Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
It uses peep-hole connections and optional cell clipping.
"""
def __init__(self,
num_units,
use_peepholes=False,
cell_clip=None,
initializer=None,
num_unit_shards=1,
forget_bias=1.0,
feature_size=None,
frequency_skip=1,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_unit_shards: int, How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
forget_bias: float, Biases of the forget gate are initialized by default
to 1 in order to reduce the scale of forgetting at the beginning
of the training.
feature_size: int, The size of the input feature the LSTM spans over.
frequency_skip: int, The amount the LSTM filter is shifted by in
frequency.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(TimeFreqLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._state_size = 2 * num_units
self._output_size = num_units
self._reuse = reuse
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
freq_inputs = self._make_tf_features(inputs)
dtype = inputs.dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
concat_w = _get_concat_variable(
"W", [actual_input_size + 2 * self._num_units, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B",
shape=[4 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros(
[inputs.shape.dims[0].value or inputs.get_shape()[0], self._num_units],
dtype)
for fq in range(len(freq_inputs)):
c_prev = array_ops.slice(state, [0, 2 * fq * self._num_units],
[-1, self._num_units])
m_prev = array_ops.slice(state, [0, (2 * fq + 1) * self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat([freq_inputs[fq], m_prev, m_prev_freq], 1)
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
if self._use_peepholes:
c = (
sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * tanh(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * tanh(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * tanh(c)
else:
m = sigmoid(o) * tanh(c)
m_prev_freq = m
if fq == 0:
state_out = array_ops.concat([c, m], 1)
m_out = m
else:
state_out = array_ops.concat([state_out, c, m], 1)
m_out = array_ops.concat([m_out, m], 1)
return m_out, state_out
def _make_tf_features(self, input_feat):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, batch x num_units.
Returns:
A list of frequency features, with each element containing:
- A 2D, batch x output_dim, Tensor representing the time-frequency feature
for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2).dims[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
num_feats = int(
(input_size - self._feature_size) / (self._frequency_skip)) + 1
freq_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(input_feat, [0, f * self._frequency_skip],
[-1, self._feature_size])
freq_inputs.append(cur_input)
return freq_inputs
class GridLSTMCell(rnn_cell_impl.RNNCell):
"""Grid Long short-term memory unit (LSTM) recurrent network cell.
The default is based on:
Nal Kalchbrenner, Ivo Danihelka and Alex Graves
"Grid Long Short-Term Memory," Proc. ICLR 2016.
http://arxiv.org/abs/1507.01526
When peephole connections are used, the implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
The code uses optional peephole connections, shared_weights and cell clipping.
"""
def __init__(self,
num_units,
use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None,
initializer=None,
num_unit_shards=1,
forget_bias=1.0,
feature_size=None,
frequency_skip=None,
num_frequency_blocks=None,
start_freqindex_list=None,
end_freqindex_list=None,
couple_input_forget_gates=False,
state_is_tuple=True,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: (optional) bool, default False. Set True to enable
diagonal/peephole connections.
share_time_frequency_weights: (optional) bool, default False. Set True to
enable shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, default None, if provided the cell
state is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, default 1, How to split the weight
matrix. If > 1, the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
feature_size: (optional) int, default None, The size of the input feature
the LSTM spans over.
frequency_skip: (optional) int, default None, The amount the LSTM filter
is shifted by in frequency.
num_frequency_blocks: [required] A list of frequency blocks needed to
cover the whole input feature splitting defined by start_freqindex_list
and end_freqindex_list.
start_freqindex_list: [optional], list of ints, default None, The
starting frequency index for each frequency block.
end_freqindex_list: [optional], list of ints, default None. The ending
frequency index for each frequency block.
couple_input_forget_gates: (optional) bool, default False, Whether to
couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce
model parameters and computation cost.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
ValueError: if the num_frequency_blocks list is not specified
"""
super(GridLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._share_time_frequency_weights = share_time_frequency_weights
self._couple_input_forget_gates = couple_input_forget_gates
self._state_is_tuple = state_is_tuple
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._start_freqindex_list = start_freqindex_list
self._end_freqindex_list = end_freqindex_list
self._num_frequency_blocks = num_frequency_blocks
self._total_blocks = 0
self._reuse = reuse
if self._num_frequency_blocks is None:
raise ValueError("Must specify num_frequency_blocks")
for block_index in range(len(self._num_frequency_blocks)):
self._total_blocks += int(self._num_frequency_blocks[block_index])
if state_is_tuple:
state_names = ""
for block_index in range(len(self._num_frequency_blocks)):
for freq_index in range(self._num_frequency_blocks[block_index]):
name_prefix = "state_f%02d_b%02d" % (freq_index, block_index)
state_names += ("%s_c, %s_m," % (name_prefix, name_prefix))
self._state_tuple_type = collections.namedtuple("GridLSTMStateTuple",
state_names.strip(","))
self._state_size = self._state_tuple_type(*(
[num_units, num_units] * self._total_blocks))
else:
self._state_tuple_type = None
self._state_size = num_units * self._total_blocks * 2
self._output_size = num_units * self._total_blocks * 2
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
@property
def state_tuple_type(self):
return self._state_tuple_type
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, [batch, feature_size].
state: Tensor or tuple of Tensors, 2D, [batch, state_size], depends on the
flag self._state_is_tuple.
Returns:
A tuple containing:
- A 2D, [batch, output_dim], Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, [batch, state_size], Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
batch_size = tensor_shape.dimension_value(
inputs.shape[0]) or array_ops.shape(inputs)[0]
freq_inputs = self._make_tf_features(inputs)
m_out_lst = []
state_out_lst = []
for block in range(len(freq_inputs)):
m_out_lst_current, state_out_lst_current = self._compute(
freq_inputs[block],
block,
state,
batch_size,
state_is_tuple=self._state_is_tuple)
m_out_lst.extend(m_out_lst_current)
state_out_lst.extend(state_out_lst_current)
if self._state_is_tuple:
state_out = self._state_tuple_type(*state_out_lst)
else:
state_out = array_ops.concat(state_out_lst, 1)
m_out = array_ops.concat(m_out_lst, 1)
return m_out, state_out
def _compute(self,
freq_inputs,
block,
state,
batch_size,
state_prefix="state",
state_is_tuple=True):
"""Run the actual computation of one step LSTM.
Args:
freq_inputs: list of Tensors, 2D, [batch, feature_size].
block: int, current frequency block index to process.
state: Tensor or tuple of Tensors, 2D, [batch, state_size], it depends on
the flag state_is_tuple.
batch_size: int32, batch size.
state_prefix: (optional) string, name prefix for states, defaults to
"state".
state_is_tuple: boolean, indicates whether the state is a tuple or Tensor.
Returns:
A tuple, containing:
- A list of [batch, output_dim] Tensors, representing the output of the
LSTM given the inputs and state.
- A list of [batch, state_size] Tensors, representing the LSTM state
values given the inputs and previous state.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
num_gates = 3 if self._couple_input_forget_gates else 4
dtype = freq_inputs[0].dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
concat_w_f = _get_concat_variable(
"W_f_%d" % block,
[actual_input_size + 2 * self._num_units, num_gates * self._num_units],
dtype, self._num_unit_shards)
b_f = vs.get_variable(
"B_f_%d" % block,
shape=[num_gates * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
if not self._share_time_frequency_weights:
concat_w_t = _get_concat_variable("W_t_%d" % block, [
actual_input_size + 2 * self._num_units, num_gates * self._num_units
], dtype, self._num_unit_shards)
b_t = vs.get_variable(
"B_t_%d" % block,
shape=[num_gates * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
if self._use_peepholes:
# Diagonal connections
if not self._couple_input_forget_gates:
w_f_diag_freqf = vs.get_variable(
"W_F_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_f_diag_freqt = vs.get_variable(
"W_F_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_freqf = vs.get_variable(
"W_I_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_freqt = vs.get_variable(
"W_I_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_freqf = vs.get_variable(
"W_O_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_freqt = vs.get_variable(
"W_O_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
if not self._share_time_frequency_weights:
if not self._couple_input_forget_gates:
w_f_diag_timef = vs.get_variable(
"W_F_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_f_diag_timet = vs.get_variable(
"W_F_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_timef = vs.get_variable(
"W_I_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_timet = vs.get_variable(
"W_I_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_timef = vs.get_variable(
"W_O_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_timet = vs.get_variable(
"W_O_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)
c_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)
for freq_index in range(len(freq_inputs)):
if state_is_tuple:
name_prefix = "%s_f%02d_b%02d" % (state_prefix, freq_index, block)
c_prev_time = getattr(state, name_prefix + "_c")
m_prev_time = getattr(state, name_prefix + "_m")
else:
c_prev_time = array_ops.slice(
state, [0, 2 * freq_index * self._num_units], [-1, self._num_units])
m_prev_time = array_ops.slice(
state, [0, (2 * freq_index + 1) * self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(
[freq_inputs[freq_index], m_prev_time, m_prev_freq], 1)
# F-LSTM
lstm_matrix_freq = nn_ops.bias_add(
math_ops.matmul(cell_inputs, concat_w_f), b_f)
if self._couple_input_forget_gates:
i_freq, j_freq, o_freq = array_ops.split(
value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)
f_freq = None
else:
i_freq, j_freq, f_freq, o_freq = array_ops.split(
value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)
# T-LSTM
if self._share_time_frequency_weights:
i_time = i_freq
j_time = j_freq
f_time = f_freq
o_time = o_freq
else:
lstm_matrix_time = nn_ops.bias_add(
math_ops.matmul(cell_inputs, concat_w_t), b_t)
if self._couple_input_forget_gates:
i_time, j_time, o_time = array_ops.split(
value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)
f_time = None
else:
i_time, j_time, f_time, o_time = array_ops.split(
value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)
# F-LSTM c_freq
# input gate activations
if self._use_peepholes:
i_freq_g = sigmoid(i_freq + w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_freq_g = sigmoid(i_freq)
# forget gate activations
if self._couple_input_forget_gates:
f_freq_g = 1.0 - i_freq_g
else:
if self._use_peepholes:
f_freq_g = sigmoid(f_freq + self._forget_bias + w_f_diag_freqf *
c_prev_freq + w_f_diag_freqt * c_prev_time)
else:
f_freq_g = sigmoid(f_freq + self._forget_bias)
# cell state
c_freq = f_freq_g * c_prev_freq + i_freq_g * tanh(j_freq)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_freq = clip_ops.clip_by_value(c_freq, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# T-LSTM c_freq
# input gate activations
if self._use_peepholes:
if self._share_time_frequency_weights:
i_time_g = sigmoid(i_time + w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_time_g = sigmoid(i_time + w_i_diag_timef * c_prev_freq +
w_i_diag_timet * c_prev_time)
else:
i_time_g = sigmoid(i_time)
# forget gate activations
if self._couple_input_forget_gates:
f_time_g = 1.0 - i_time_g
else:
if self._use_peepholes:
if self._share_time_frequency_weights:
f_time_g = sigmoid(f_time + self._forget_bias + w_f_diag_freqf *
c_prev_freq + w_f_diag_freqt * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias + w_f_diag_timef *
c_prev_freq + w_f_diag_timet * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias)
# cell state
c_time = f_time_g * c_prev_time + i_time_g * tanh(j_time)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_time = clip_ops.clip_by_value(c_time, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# F-LSTM m_freq
if self._use_peepholes:
m_freq = sigmoid(o_freq + w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_freq)
else:
m_freq = sigmoid(o_freq) * tanh(c_freq)
# T-LSTM m_time
if self._use_peepholes:
if self._share_time_frequency_weights:
m_time = sigmoid(o_time + w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time + w_o_diag_timef * c_freq +
w_o_diag_timet * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time) * tanh(c_time)
m_prev_freq = m_freq
c_prev_freq = c_freq
# Concatenate the outputs for T-LSTM and F-LSTM for each shift
if freq_index == 0:
state_out_lst = [c_time, m_time]
m_out_lst = [m_time, m_freq]
else:
state_out_lst.extend([c_time, m_time])
m_out_lst.extend([m_time, m_freq])
return m_out_lst, state_out_lst
def _make_tf_features(self, input_feat, slice_offset=0):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, [batch, num_units].
slice_offset: (optional) Python int, default 0, the slicing offset is only
used for the backward processing in the BidirectionalGridLSTMCell. It
specifies a different starting point instead of always 0 to enable the
forward and backward processing look at different frequency blocks.
Returns:
A list of frequency features, with each element containing:
- A 2D, [batch, output_dim], Tensor representing the time-frequency
feature for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2).dims[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
if slice_offset > 0:
# Padding to the end
inputs = array_ops.pad(input_feat,
array_ops.constant(
[0, 0, 0, slice_offset],
shape=[2, 2],
dtype=dtypes.int32), "CONSTANT")
elif slice_offset < 0:
# Padding to the front
inputs = array_ops.pad(input_feat,
array_ops.constant(
[0, 0, -slice_offset, 0],
shape=[2, 2],
dtype=dtypes.int32), "CONSTANT")
slice_offset = 0
else:
inputs = input_feat
freq_inputs = []
if not self._start_freqindex_list:
if len(self._num_frequency_blocks) != 1:
raise ValueError("Length of num_frequency_blocks"
" is not 1, but instead is %d" %
len(self._num_frequency_blocks))
num_feats = int(
(input_size - self._feature_size) / (self._frequency_skip)) + 1
if num_feats != self._num_frequency_blocks[0]:
raise ValueError(
"Invalid num_frequency_blocks, requires %d but gets %d, please"
" check the input size and filter config are correct." %
(self._num_frequency_blocks[0], num_feats))
block_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(
inputs, [0, slice_offset + f * self._frequency_skip],
[-1, self._feature_size])
block_inputs.append(cur_input)
freq_inputs.append(block_inputs)
else:
if len(self._start_freqindex_list) != len(self._end_freqindex_list):
raise ValueError("Length of start and end freqindex_list"
" does not match %d %d",
len(self._start_freqindex_list),
len(self._end_freqindex_list))
if len(self._num_frequency_blocks) != len(self._start_freqindex_list):
raise ValueError("Length of num_frequency_blocks"
" is not equal to start_freqindex_list %d %d",
len(self._num_frequency_blocks),
len(self._start_freqindex_list))
for b in range(len(self._start_freqindex_list)):
start_index = self._start_freqindex_list[b]
end_index = self._end_freqindex_list[b]
cur_size = end_index - start_index
block_feats = int(
(cur_size - self._feature_size) / (self._frequency_skip)) + 1
if block_feats != self._num_frequency_blocks[b]:
raise ValueError(
"Invalid num_frequency_blocks, requires %d but gets %d, please"
" check the input size and filter config are correct." %
(self._num_frequency_blocks[b], block_feats))
block_inputs = []
for f in range(block_feats):
cur_input = array_ops.slice(
inputs,
[0, start_index + slice_offset + f * self._frequency_skip],
[-1, self._feature_size])
block_inputs.append(cur_input)
freq_inputs.append(block_inputs)
return freq_inputs
class BidirectionalGridLSTMCell(GridLSTMCell):
"""Bidirectional GridLstm cell.
The bidirection connection is only used in the frequency direction, which
hence doesn't affect the time direction's real-time processing that is
required for online recognition systems.
The current implementation uses different weights for the two directions.
"""
def __init__(self,
num_units,
use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None,
initializer=None,
num_unit_shards=1,
forget_bias=1.0,
feature_size=None,
frequency_skip=None,
num_frequency_blocks=None,
start_freqindex_list=None,
end_freqindex_list=None,
couple_input_forget_gates=False,
backward_slice_offset=0,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: (optional) bool, default False. Set True to enable
diagonal/peephole connections.
share_time_frequency_weights: (optional) bool, default False. Set True to
enable shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, default None, if provided the cell
state is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, default 1, How to split the weight
matrix. If > 1, the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
feature_size: (optional) int, default None, The size of the input feature
the LSTM spans over.
frequency_skip: (optional) int, default None, The amount the LSTM filter
is shifted by in frequency.
num_frequency_blocks: [required] A list of frequency blocks needed to
cover the whole input feature splitting defined by start_freqindex_list
and end_freqindex_list.
start_freqindex_list: [optional], list of ints, default None, The
starting frequency index for each frequency block.
end_freqindex_list: [optional], list of ints, default None. The ending
frequency index for each frequency block.
couple_input_forget_gates: (optional) bool, default False, Whether to
couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce
model parameters and computation cost.
backward_slice_offset: (optional) int32, default 0, the starting offset to
slice the feature for backward processing.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(BidirectionalGridLSTMCell, self).__init__(
num_units, use_peepholes, share_time_frequency_weights, cell_clip,
initializer, num_unit_shards, forget_bias, feature_size, frequency_skip,
num_frequency_blocks, start_freqindex_list, end_freqindex_list,
couple_input_forget_gates, True, reuse)
self._backward_slice_offset = int(backward_slice_offset)
state_names = ""
for direction in ["fwd", "bwd"]:
for block_index in range(len(self._num_frequency_blocks)):
for freq_index in range(self._num_frequency_blocks[block_index]):
name_prefix = "%s_state_f%02d_b%02d" % (direction, freq_index,
block_index)
state_names += ("%s_c, %s_m," % (name_prefix, name_prefix))
self._state_tuple_type = collections.namedtuple(
"BidirectionalGridLSTMStateTuple", state_names.strip(","))
self._state_size = self._state_tuple_type(*(
[num_units, num_units] * self._total_blocks * 2))
self._output_size = 2 * num_units * self._total_blocks * 2
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, [batch, num_units].
state: tuple of Tensors, 2D, [batch, state_size].
Returns:
A tuple containing:
- A 2D, [batch, output_dim], Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, [batch, state_size], Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
batch_size = tensor_shape.dimension_value(
inputs.shape[0]) or array_ops.shape(inputs)[0]
fwd_inputs = self._make_tf_features(inputs)
if self._backward_slice_offset:
bwd_inputs = self._make_tf_features(inputs, self._backward_slice_offset)
else:
bwd_inputs = fwd_inputs
# Forward processing
with vs.variable_scope("fwd"):
fwd_m_out_lst = []
fwd_state_out_lst = []
for block in range(len(fwd_inputs)):
fwd_m_out_lst_current, fwd_state_out_lst_current = self._compute(
fwd_inputs[block],
block,
state,
batch_size,
state_prefix="fwd_state",
state_is_tuple=True)
fwd_m_out_lst.extend(fwd_m_out_lst_current)
fwd_state_out_lst.extend(fwd_state_out_lst_current)
# Backward processing
bwd_m_out_lst = []
bwd_state_out_lst = []
with vs.variable_scope("bwd"):
for block in range(len(bwd_inputs)):
# Reverse the blocks
bwd_inputs_reverse = bwd_inputs[block][::-1]
bwd_m_out_lst_current, bwd_state_out_lst_current = self._compute(
bwd_inputs_reverse,
block,
state,
batch_size,
state_prefix="bwd_state",
state_is_tuple=True)
bwd_m_out_lst.extend(bwd_m_out_lst_current)
bwd_state_out_lst.extend(bwd_state_out_lst_current)
state_out = self._state_tuple_type(*(fwd_state_out_lst + bwd_state_out_lst))
# Outputs are always concated as it is never used separately.
m_out = array_ops.concat(fwd_m_out_lst + bwd_m_out_lst, 1)
return m_out, state_out
# pylint: disable=protected-access
_Linear = core_rnn_cell._Linear # pylint: disable=invalid-name
# pylint: enable=protected-access
class AttentionCellWrapper(rnn_cell_impl.RNNCell):
"""Basic attention cell wrapper.
Implementation based on https://arxiv.org/abs/1601.06733.
"""
def __init__(self,
cell,
attn_length,
attn_size=None,
attn_vec_size=None,
input_size=None,
state_is_tuple=True,
reuse=None):
"""Create a cell with attention.
Args:
cell: an RNNCell, an attention is added to it.
attn_length: integer, the size of an attention window.
attn_size: integer, the size of an attention vector. Equal to
cell.output_size by default.
attn_vec_size: integer, the number of convolutional features calculated
on attention state and a size of the hidden layer built from
base cell state. Equal attn_size to by default.
input_size: integer, the size of a hidden linear layer,
built from inputs and attention. Derived from the input tensor
by default.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. By default (False), the states are all
concatenated along the column axis.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if cell returns a state tuple but the flag
`state_is_tuple` is `False` or if attn_length is zero or less.
"""
super(AttentionCellWrapper, self).__init__(_reuse=reuse)
rnn_cell_impl.assert_like_rnncell("cell", cell)
if nest.is_sequence(cell.state_size) and not state_is_tuple:
raise ValueError(
"Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: %s" % str(cell.state_size))
if attn_length <= 0:
raise ValueError(
"attn_length should be greater than zero, got %s" % str(attn_length))
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if attn_size is None:
attn_size = cell.output_size
if attn_vec_size is None:
attn_vec_size = attn_size
self._state_is_tuple = state_is_tuple
self._cell = cell
self._attn_vec_size = attn_vec_size
self._input_size = input_size
self._attn_size = attn_size
self._attn_length = attn_length
self._reuse = reuse
self._linear1 = None
self._linear2 = None
self._linear3 = None
@property
def state_size(self):
size = (self._cell.state_size, self._attn_size,
self._attn_size * self._attn_length)
if self._state_is_tuple:
return size
else:
return sum(list(size))
@property
def output_size(self):
return self._attn_size
def call(self, inputs, state):
"""Long short-term memory cell with attention (LSTMA)."""
if self._state_is_tuple:
state, attns, attn_states = state
else:
states = state
state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size])
attns = array_ops.slice(states, [0, self._cell.state_size],
[-1, self._attn_size])
attn_states = array_ops.slice(
states, [0, self._cell.state_size + self._attn_size],
[-1, self._attn_size * self._attn_length])
attn_states = array_ops.reshape(attn_states,
[-1, self._attn_length, self._attn_size])
input_size = self._input_size
if input_size is None:
input_size = inputs.get_shape().as_list()[1]
if self._linear1 is None:
self._linear1 = _Linear([inputs, attns], input_size, True)
inputs = self._linear1([inputs, attns])
cell_output, new_state = self._cell(inputs, state)
if self._state_is_tuple:
new_state_cat = array_ops.concat(nest.flatten(new_state), 1)
else:
new_state_cat = new_state
new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
with vs.variable_scope("attn_output_projection"):
if self._linear2 is None:
self._linear2 = _Linear([cell_output, new_attns], self._attn_size, True)
output = self._linear2([cell_output, new_attns])
new_attn_states = array_ops.concat(
[new_attn_states, array_ops.expand_dims(output, 1)], 1)
new_attn_states = array_ops.reshape(
new_attn_states, [-1, self._attn_length * self._attn_size])
new_state = (new_state, new_attns, new_attn_states)
if not self._state_is_tuple:
new_state = array_ops.concat(list(new_state), 1)
return output, new_state
def _attention(self, query, attn_states):
conv2d = nn_ops.conv2d
reduce_sum = math_ops.reduce_sum
softmax = nn_ops.softmax
tanh = math_ops.tanh
with vs.variable_scope("attention"):
k = vs.get_variable("attn_w",
[1, 1, self._attn_size, self._attn_vec_size])
v = vs.get_variable("attn_v", [self._attn_vec_size])
hidden = array_ops.reshape(attn_states,
[-1, self._attn_length, 1, self._attn_size])
hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
if self._linear3 is None:
self._linear3 = _Linear(query, self._attn_vec_size, True)
y = self._linear3(query)
y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
a = softmax(s)
d = reduce_sum(
array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
new_attns = array_ops.reshape(d, [-1, self._attn_size])
new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
return new_attns, new_attn_states
class HighwayWrapper(rnn_cell_impl.RNNCell):
"""RNNCell wrapper that adds highway connection on cell input and output.
Based on:
R. K. Srivastava, K. Greff, and J. Schmidhuber, "Highway networks",
arXiv preprint arXiv:1505.00387, 2015.
https://arxiv.org/abs/1505.00387
"""
def __init__(self,
cell,
couple_carry_transform_gates=True,
carry_bias_init=1.0):
"""Constructs a `HighwayWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
couple_carry_transform_gates: boolean, should the Carry and Transform gate
be coupled.
carry_bias_init: float, carry gates bias initialization.
"""
self._cell = cell
self._couple_carry_transform_gates = couple_carry_transform_gates
self._carry_bias_init = carry_bias_init
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def _highway(self, inp, out):
input_size = inp.get_shape().with_rank(2).dims[1].value
carry_weight = vs.get_variable("carry_w", [input_size, input_size])
carry_bias = vs.get_variable(
"carry_b", [input_size],
initializer=init_ops.constant_initializer(self._carry_bias_init))
carry = math_ops.sigmoid(nn_ops.xw_plus_b(inp, carry_weight, carry_bias))
if self._couple_carry_transform_gates:
transform = 1 - carry
else:
transform_weight = vs.get_variable("transform_w",
[input_size, input_size])
transform_bias = vs.get_variable(
"transform_b", [input_size],
initializer=init_ops.constant_initializer(-self._carry_bias_init))
transform = math_ops.sigmoid(
nn_ops.xw_plus_b(inp, transform_weight, transform_bias))
return inp * carry + out * transform
def __call__(self, inputs, state, scope=None):
"""Run the cell and add its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
scope: optional cell scope.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = self._cell(inputs, state, scope=scope)
nest.assert_same_structure(inputs, outputs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
nest.map_structure(assert_shape_match, inputs, outputs)
res_outputs = nest.map_structure(self._highway, inputs, outputs)
return (res_outputs, new_state)
class LayerNormBasicLSTMCell(rnn_cell_impl.RNNCell):
"""LSTM unit with layer normalization and recurrent dropout.
This class adds layer normalization and recurrent dropout to a
basic LSTM unit. Layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
and is applied before the internal nonlinearities.
Recurrent dropout is base on:
https://arxiv.org/abs/1603.05118
"Recurrent Dropout without Memory Loss"
Stanislau Semeniuta, Aliaksei Severyn, Erhardt Barth.
"""
def __init__(self,
num_units,
forget_bias=1.0,
input_size=None,
activation=math_ops.tanh,
layer_norm=True,
norm_gain=1.0,
norm_shift=0.0,
dropout_keep_prob=1.0,
dropout_prob_seed=None,
reuse=None):
"""Initializes the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
activation: Activation function of the inner states.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
dropout_keep_prob: unit Tensor or float between 0 and 1 representing the
recurrent dropout probability value. If float and 1.0, no dropout will
be applied.
dropout_prob_seed: (optional) integer, the randomness seed.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(LayerNormBasicLSTMCell, self).__init__(_reuse=reuse)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
self._forget_bias = forget_bias
self._keep_prob = dropout_keep_prob
self._seed = dropout_prob_seed
self._layer_norm = layer_norm
self._norm_gain = norm_gain
self._norm_shift = norm_shift
self._reuse = reuse
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def _norm(self, inp, scope, dtype=dtypes.float32):
shape = inp.get_shape()[-1:]
gamma_init = init_ops.constant_initializer(self._norm_gain)
beta_init = init_ops.constant_initializer(self._norm_shift)
with vs.variable_scope(scope):
# Initialize beta and gamma for use by layer_norm.
vs.get_variable("gamma", shape=shape, initializer=gamma_init, dtype=dtype)
vs.get_variable("beta", shape=shape, initializer=beta_init, dtype=dtype)
normalized = layers.layer_norm(inp, reuse=True, scope=scope)
return normalized
def _linear(self, args):
out_size = 4 * self._num_units
proj_size = args.get_shape()[-1]
dtype = args.dtype
weights = vs.get_variable("kernel", [proj_size, out_size], dtype=dtype)
out = math_ops.matmul(args, weights)
if not self._layer_norm:
bias = vs.get_variable("bias", [out_size], dtype=dtype)
out = nn_ops.bias_add(out, bias)
return out
def call(self, inputs, state):
"""LSTM cell with layer normalization and recurrent dropout."""
c, h = state
args = array_ops.concat([inputs, h], 1)
concat = self._linear(args)
dtype = args.dtype
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
if self._layer_norm:
i = self._norm(i, "input", dtype=dtype)
j = self._norm(j, "transform", dtype=dtype)
f = self._norm(f, "forget", dtype=dtype)
o = self._norm(o, "output", dtype=dtype)
g = self._activation(j)
if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)
new_c = (
c * math_ops.sigmoid(f + self._forget_bias) + math_ops.sigmoid(i) * g)
if self._layer_norm:
new_c = self._norm(new_c, "state", dtype=dtype)
new_h = self._activation(new_c) * math_ops.sigmoid(o)
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
class NASCell(rnn_cell_impl.LayerRNNCell):
"""Neural Architecture Search (NAS) recurrent network cell.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.01578
Barret Zoph and Quoc V. Le.
"Neural Architecture Search with Reinforcement Learning" Proc. ICLR 2017.
The class uses an optional projection layer.
"""
# NAS cell's architecture base.
_NAS_BASE = 8
def __init__(self, num_units, num_proj=None, use_bias=False, reuse=None,
**kwargs):
"""Initialize the parameters for a NAS cell.
Args:
num_units: int, The number of units in the NAS cell.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
use_bias: (optional) bool, If True then use biases within the cell. This
is False by default.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
**kwargs: Additional keyword arguments.
"""
super(NASCell, self).__init__(_reuse=reuse, **kwargs)
self._num_units = num_units
self._num_proj = num_proj
self._use_bias = use_bias
self._reuse = reuse
if num_proj is not None:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def build(self, inputs_shape):
input_size = tensor_shape.dimension_value(
tensor_shape.TensorShape(inputs_shape).with_rank(2)[1])
if input_size is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
num_proj = self._num_units if self._num_proj is None else self._num_proj
# Variables for the NAS cell. `recurrent_kernel` is all matrices multiplying
# the hiddenstate and `kernel` is all matrices multiplying the inputs.
self.recurrent_kernel = self.add_variable(
"recurrent_kernel", [num_proj, self._NAS_BASE * self._num_units])
self.kernel = self.add_variable(
"kernel", [input_size, self._NAS_BASE * self._num_units])
if self._use_bias:
self.bias = self.add_variable("bias",
shape=[self._NAS_BASE * self._num_units],
initializer=init_ops.zeros_initializer)
# Projection layer if specified
if self._num_proj is not None:
self.projection_weights = self.add_variable(
"projection_weights", [self._num_units, self._num_proj])
self.built = True
def call(self, inputs, state):
"""Run one step of NAS Cell.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: This must be a tuple of state Tensors, both `2-D`, with column
sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
NAS Cell after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of NAS Cell after reading `inputs`
when the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
relu = nn_ops.relu
(c_prev, m_prev) = state
m_matrix = math_ops.matmul(m_prev, self.recurrent_kernel)
inputs_matrix = math_ops.matmul(inputs, self.kernel)
if self._use_bias:
m_matrix = nn_ops.bias_add(m_matrix, self.bias)
# The NAS cell branches into 8 different splits for both the hiddenstate
# and the input
m_matrix_splits = array_ops.split(
axis=1, num_or_size_splits=self._NAS_BASE, value=m_matrix)
inputs_matrix_splits = array_ops.split(
axis=1, num_or_size_splits=self._NAS_BASE, value=inputs_matrix)
# First layer
layer1_0 = sigmoid(inputs_matrix_splits[0] + m_matrix_splits[0])
layer1_1 = relu(inputs_matrix_splits[1] + m_matrix_splits[1])
layer1_2 = sigmoid(inputs_matrix_splits[2] + m_matrix_splits[2])
layer1_3 = relu(inputs_matrix_splits[3] * m_matrix_splits[3])
layer1_4 = tanh(inputs_matrix_splits[4] + m_matrix_splits[4])
layer1_5 = sigmoid(inputs_matrix_splits[5] + m_matrix_splits[5])
layer1_6 = tanh(inputs_matrix_splits[6] + m_matrix_splits[6])
layer1_7 = sigmoid(inputs_matrix_splits[7] + m_matrix_splits[7])
# Second layer
l2_0 = tanh(layer1_0 * layer1_1)
l2_1 = tanh(layer1_2 + layer1_3)
l2_2 = tanh(layer1_4 * layer1_5)
l2_3 = sigmoid(layer1_6 + layer1_7)
# Inject the cell
l2_0 = tanh(l2_0 + c_prev)
# Third layer
l3_0_pre = l2_0 * l2_1
new_c = l3_0_pre # create new cell
l3_0 = l3_0_pre
l3_1 = tanh(l2_2 + l2_3)
# Final layer
new_m = tanh(l3_0 * l3_1)
# Projection layer if specified
if self._num_proj is not None:
new_m = math_ops.matmul(new_m, self.projection_weights)
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_m)
return new_m, new_state
class UGRNNCell(rnn_cell_impl.RNNCell):
"""Update Gate Recurrent Neural Network (UGRNN) cell.
Compromise between a LSTM/GRU and a vanilla RNN. There is only one
gate, and that is to determine whether the unit should be
integrating or computing instantaneously. This is the recurrent
idea of the feedforward Highway Network.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.09913
Jasmine Collins, Jascha Sohl-Dickstein, and David Sussillo.
"Capacity and Trainability in Recurrent Neural Networks" Proc. ICLR 2017.
"""
def __init__(self,
num_units,
initializer=None,
forget_bias=1.0,
activation=math_ops.tanh,
reuse=None):
"""Initialize the parameters for an UGRNN cell.
Args:
num_units: int, The number of units in the UGRNN cell
initializer: (optional) The initializer to use for the weight matrices.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gate, used to reduce the scale of forgetting at the beginning
of the training.
activation: (optional) Activation function of the inner states.
Default is `tf.tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(UGRNNCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._forget_bias = forget_bias
self._activation = activation
self._reuse = reuse
self._linear = None
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Run one step of UGRNN.
Args:
inputs: input Tensor, 2D, batch x input size.
state: state Tensor, 2D, batch x num units.
Returns:
new_output: batch x num units, Tensor representing the output of the UGRNN
after reading `inputs` when previous state was `state`. Identical to
`new_state`.
new_state: batch x num units, Tensor representing the state of the UGRNN
after reading `inputs` when previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
input_size = inputs.get_shape().with_rank(2).dims[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(
vs.get_variable_scope(), initializer=self._initializer):
cell_inputs = array_ops.concat([inputs, state], 1)
if self._linear is None:
self._linear = _Linear(cell_inputs, 2 * self._num_units, True)
rnn_matrix = self._linear(cell_inputs)
[g_act, c_act] = array_ops.split(
axis=1, num_or_size_splits=2, value=rnn_matrix)
c = self._activation(c_act)
g = sigmoid(g_act + self._forget_bias)
new_state = g * state + (1.0 - g) * c
new_output = new_state
return new_output, new_state
class IntersectionRNNCell(rnn_cell_impl.RNNCell):
"""Intersection Recurrent Neural Network (+RNN) cell.
Architecture with coupled recurrent gate as well as coupled depth
gate, designed to improve information flow through stacked RNNs. As the
architecture uses depth gating, the dimensionality of the depth
output (y) also should not change through depth (input size == output size).
To achieve this, the first layer of a stacked Intersection RNN projects
the inputs to N (num units) dimensions. Therefore when initializing an
IntersectionRNNCell, one should set `num_in_proj = N` for the first layer
and use default settings for subsequent layers.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.09913
Jasmine Collins, Jascha Sohl-Dickstein, and David Sussillo.
"Capacity and Trainability in Recurrent Neural Networks" Proc. ICLR 2017.
The Intersection RNN is built for use in deeply stacked
RNNs so it may not achieve best performance with depth 1.
"""
def __init__(self,
num_units,
num_in_proj=None,
initializer=None,
forget_bias=1.0,
y_activation=nn_ops.relu,
reuse=None):
"""Initialize the parameters for an +RNN cell.
Args:
num_units: int, The number of units in the +RNN cell
num_in_proj: (optional) int, The input dimensionality for the RNN.
If creating the first layer of an +RNN, this should be set to
`num_units`. Otherwise, this should be set to `None` (default).
If `None`, dimensionality of `inputs` should be equal to `num_units`,
otherwise ValueError is thrown.
initializer: (optional) The initializer to use for the weight matrices.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
y_activation: (optional) Activation function of the states passed
through depth. Default is 'tf.nn.relu`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(IntersectionRNNCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._forget_bias = forget_bias
self._num_input_proj = num_in_proj
self._y_activation = y_activation
self._reuse = reuse
self._linear1 = None
self._linear2 = None
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Run one step of the Intersection RNN.
Args:
inputs: input Tensor, 2D, batch x input size.
state: state Tensor, 2D, batch x num units.
Returns:
new_y: batch x num units, Tensor representing the output of the +RNN
after reading `inputs` when previous state was `state`.
new_state: batch x num units, Tensor representing the state of the +RNN
after reading `inputs` when previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from `inputs` via
static shape inference.
ValueError: If input size != output size (these must be equal when
using the Intersection RNN).
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
input_size = inputs.get_shape().with_rank(2).dims[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(
vs.get_variable_scope(), initializer=self._initializer):
# read-in projections (should be used for first layer in deep +RNN
# to transform size of inputs from I --> N)
if input_size.value != self._num_units:
if self._num_input_proj:
with vs.variable_scope("in_projection"):
if self._linear1 is None:
self._linear1 = _Linear(inputs, self._num_units, True)
inputs = self._linear1(inputs)
else:
raise ValueError("Must have input size == output size for "
"Intersection RNN. To fix, num_in_proj should "
"be set to num_units at cell init.")
n_dim = i_dim = self._num_units
cell_inputs = array_ops.concat([inputs, state], 1)
if self._linear2 is None:
self._linear2 = _Linear(cell_inputs, 2 * n_dim + 2 * i_dim, True)
rnn_matrix = self._linear2(cell_inputs)
gh_act = rnn_matrix[:, :n_dim] # b x n
h_act = rnn_matrix[:, n_dim:2 * n_dim] # b x n
gy_act = rnn_matrix[:, 2 * n_dim:2 * n_dim + i_dim] # b x i
y_act = rnn_matrix[:, 2 * n_dim + i_dim:2 * n_dim + 2 * i_dim] # b x i
h = tanh(h_act)
y = self._y_activation(y_act)
gh = sigmoid(gh_act + self._forget_bias)
gy = sigmoid(gy_act + self._forget_bias)
new_state = gh * state + (1.0 - gh) * h # passed thru time
new_y = gy * inputs + (1.0 - gy) * y # passed thru depth
return new_y, new_state
_REGISTERED_OPS = None
class CompiledWrapper(rnn_cell_impl.RNNCell):
"""Wraps step execution in an XLA JIT scope."""
def __init__(self, cell, compile_stateful=False):
"""Create CompiledWrapper cell.
Args:
cell: Instance of `RNNCell`.
compile_stateful: Whether to compile stateful ops like initializers
and random number generators (default: False).
"""
self._cell = cell
self._compile_stateful = compile_stateful
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def __call__(self, inputs, state, scope=None):
if self._compile_stateful:
compile_ops = True
else:
def compile_ops(node_def):
global _REGISTERED_OPS
if _REGISTERED_OPS is None:
_REGISTERED_OPS = op_def_registry.get_registered_ops()
return not _REGISTERED_OPS[node_def.op].is_stateful
with jit.experimental_jit_scope(compile_ops=compile_ops):
return self._cell(inputs, state, scope=scope)
def _random_exp_initializer(minval, maxval, seed=None, dtype=dtypes.float32):
"""Returns an exponential distribution initializer.
Args:
minval: float or a scalar float Tensor. With value > 0. Lower bound of the
range of random values to generate.
maxval: float or a scalar float Tensor. With value > minval. Upper bound of
the range of random values to generate.
seed: An integer. Used to create random seeds.
dtype: The data type.
Returns:
An initializer that generates tensors with an exponential distribution.
"""
def _initializer(shape, dtype=dtype, partition_info=None):
del partition_info # Unused.
return math_ops.exp(
random_ops.random_uniform(
shape, math_ops.log(minval), math_ops.log(maxval), dtype,
seed=seed))
return _initializer
class PhasedLSTMCell(rnn_cell_impl.RNNCell):
"""Phased LSTM recurrent network cell.
https://arxiv.org/pdf/1610.09513v1.pdf
"""
def __init__(self,
num_units,
use_peepholes=False,
leak=0.001,
ratio_on=0.1,
trainable_ratio_on=True,
period_init_min=1.0,
period_init_max=1000.0,
reuse=None):
"""Initialize the Phased LSTM cell.
Args:
num_units: int, The number of units in the Phased LSTM cell.
use_peepholes: bool, set True to enable peephole connections.
leak: float or scalar float Tensor with value in [0, 1]. Leak applied
during training.
ratio_on: float or scalar float Tensor with value in [0, 1]. Ratio of the
period during which the gates are open.
trainable_ratio_on: bool, weather ratio_on is trainable.
period_init_min: float or scalar float Tensor. With value > 0.
Minimum value of the initialized period.
The period values are initialized by drawing from the distribution:
e^U(log(period_init_min), log(period_init_max))
Where U(.,.) is the uniform distribution.
period_init_max: float or scalar float Tensor.
With value > period_init_min. Maximum value of the initialized period.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(PhasedLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._leak = leak
self._ratio_on = ratio_on
self._trainable_ratio_on = trainable_ratio_on
self._period_init_min = period_init_min
self._period_init_max = period_init_max
self._reuse = reuse
self._linear1 = None
self._linear2 = None
self._linear3 = None
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def _mod(self, x, y):
"""Modulo function that propagates x gradients."""
return array_ops.stop_gradient(math_ops.mod(x, y) - x) + x
def _get_cycle_ratio(self, time, phase, period):
"""Compute the cycle ratio in the dtype of the time."""
phase_casted = math_ops.cast(phase, dtype=time.dtype)
period_casted = math_ops.cast(period, dtype=time.dtype)
shifted_time = time - phase_casted
cycle_ratio = self._mod(shifted_time, period_casted) / period_casted
return math_ops.cast(cycle_ratio, dtype=dtypes.float32)
def call(self, inputs, state):
"""Phased LSTM Cell.
Args:
inputs: A tuple of 2 Tensor.
The first Tensor has shape [batch, 1], and type float32 or float64.
It stores the time.
The second Tensor has shape [batch, features_size], and type float32.
It stores the features.
state: rnn_cell_impl.LSTMStateTuple, state from previous timestep.
Returns:
A tuple containing:
- A Tensor of float32, and shape [batch_size, num_units], representing the
output of the cell.
- A rnn_cell_impl.LSTMStateTuple, containing 2 Tensors of float32, shape
[batch_size, num_units], representing the new state and the output.
"""
(c_prev, h_prev) = state
(time, x) = inputs
in_mask_gates = [x, h_prev]
if self._use_peepholes:
in_mask_gates.append(c_prev)
with vs.variable_scope("mask_gates"):
if self._linear1 is None:
self._linear1 = _Linear(in_mask_gates, 2 * self._num_units, True)
mask_gates = math_ops.sigmoid(self._linear1(in_mask_gates))
[input_gate, forget_gate] = array_ops.split(
axis=1, num_or_size_splits=2, value=mask_gates)
with vs.variable_scope("new_input"):
if self._linear2 is None:
self._linear2 = _Linear([x, h_prev], self._num_units, True)
new_input = math_ops.tanh(self._linear2([x, h_prev]))
new_c = (c_prev * forget_gate + input_gate * new_input)
in_out_gate = [x, h_prev]
if self._use_peepholes:
in_out_gate.append(new_c)
with vs.variable_scope("output_gate"):
if self._linear3 is None:
self._linear3 = _Linear(in_out_gate, self._num_units, True)
output_gate = math_ops.sigmoid(self._linear3(in_out_gate))
new_h = math_ops.tanh(new_c) * output_gate
period = vs.get_variable(
"period", [self._num_units],
initializer=_random_exp_initializer(self._period_init_min,
self._period_init_max))
phase = vs.get_variable(
"phase", [self._num_units],
initializer=init_ops.random_uniform_initializer(0.,
period.initial_value))
ratio_on = vs.get_variable(
"ratio_on", [self._num_units],
initializer=init_ops.constant_initializer(self._ratio_on),
trainable=self._trainable_ratio_on)
cycle_ratio = self._get_cycle_ratio(time, phase, period)
k_up = 2 * cycle_ratio / ratio_on
k_down = 2 - k_up
k_closed = self._leak * cycle_ratio
k = array_ops.where(cycle_ratio < ratio_on, k_down, k_closed)
k = array_ops.where(cycle_ratio < 0.5 * ratio_on, k_up, k)
new_c = k * new_c + (1 - k) * c_prev
new_h = k * new_h + (1 - k) * h_prev
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
class ConvLSTMCell(rnn_cell_impl.RNNCell):
"""Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self,
conv_ndims,
input_shape,
output_channels,
kernel_shape,
use_bias=True,
skip_connection=False,
forget_bias=1.0,
initializers=None,
name="conv_lstm_cell"):
"""Construct ConvLSTMCell.
Args:
conv_ndims: Convolution dimensionality (1, 2 or 3).
input_shape: Shape of the input as int tuple, excluding the batch size.
output_channels: int, number of output channels of the conv LSTM.
kernel_shape: Shape of kernel as an int tuple (of size 1, 2 or 3).
use_bias: (bool) Use bias in convolutions.
skip_connection: If set to `True`, concatenate the input to the
output of the conv LSTM. Default: `False`.
forget_bias: Forget bias.
initializers: Unused.
name: Name of the module.
Raises:
ValueError: If `skip_connection` is `True` and stride is different from 1
or if `input_shape` is incompatible with `conv_ndims`.
"""
super(ConvLSTMCell, self).__init__(name=name)
if conv_ndims != len(input_shape) - 1:
raise ValueError("Invalid input_shape {} for conv_ndims={}.".format(
input_shape, conv_ndims))
self._conv_ndims = conv_ndims
self._input_shape = input_shape
self._output_channels = output_channels
self._kernel_shape = list(kernel_shape)
self._use_bias = use_bias
self._forget_bias = forget_bias
self._skip_connection = skip_connection
self._total_output_channels = output_channels
if self._skip_connection:
self._total_output_channels += self._input_shape[-1]
state_size = tensor_shape.TensorShape(
self._input_shape[:-1] + [self._output_channels])
self._state_size = rnn_cell_impl.LSTMStateTuple(state_size, state_size)
self._output_size = tensor_shape.TensorShape(
self._input_shape[:-1] + [self._total_output_channels])
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def call(self, inputs, state, scope=None):
cell, hidden = state
new_hidden = _conv([inputs, hidden], self._kernel_shape,
4 * self._output_channels, self._use_bias)
gates = array_ops.split(
value=new_hidden, num_or_size_splits=4, axis=self._conv_ndims + 1)
input_gate, new_input, forget_gate, output_gate = gates
new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell
new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input)
output = math_ops.tanh(new_cell) * math_ops.sigmoid(output_gate)
if self._skip_connection:
output = array_ops.concat([output, inputs], axis=-1)
new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output)
return output, new_state
class Conv1DLSTMCell(ConvLSTMCell):
"""1D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_1d_lstm_cell", **kwargs):
"""Construct Conv1DLSTM. See `ConvLSTMCell` for more details."""
super(Conv1DLSTMCell, self).__init__(conv_ndims=1, name=name, **kwargs)
class Conv2DLSTMCell(ConvLSTMCell):
"""2D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_2d_lstm_cell", **kwargs):
"""Construct Conv2DLSTM. See `ConvLSTMCell` for more details."""
super(Conv2DLSTMCell, self).__init__(conv_ndims=2, name=name, **kwargs)
class Conv3DLSTMCell(ConvLSTMCell):
"""3D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_3d_lstm_cell", **kwargs):
"""Construct Conv3DLSTM. See `ConvLSTMCell` for more details."""
super(Conv3DLSTMCell, self).__init__(conv_ndims=3, name=name, **kwargs)
def _conv(args, filter_size, num_features, bias, bias_start=0.0):
"""Convolution.
Args:
args: a Tensor or a list of Tensors of dimension 3D, 4D or 5D,
batch x n, Tensors.
filter_size: int tuple of filter shape (of size 1, 2 or 3).
num_features: int, number of features.
bias: Whether to use biases in the convolution layer.
bias_start: starting value to initialize the bias; 0 by default.
Returns:
A 3D, 4D, or 5D Tensor with shape [batch ... num_features]
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
# Calculate the total size of arguments on dimension 1.
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
shape_length = len(shapes[0])
for shape in shapes:
if len(shape) not in [3, 4, 5]:
raise ValueError("Conv Linear expects 3D, 4D "
"or 5D arguments: %s" % str(shapes))
if len(shape) != len(shapes[0]):
raise ValueError("Conv Linear expects all args "
"to be of same Dimension: %s" % str(shapes))
else:
total_arg_size_depth += shape[-1]
dtype = [a.dtype for a in args][0]
# determine correct conv operation
if shape_length == 3:
conv_op = nn_ops.conv1d
strides = 1
elif shape_length == 4:
conv_op = nn_ops.conv2d
strides = shape_length * [1]
elif shape_length == 5:
conv_op = nn_ops.conv3d
strides = shape_length * [1]
# Now the computation.
kernel = vs.get_variable(
"kernel", filter_size + [total_arg_size_depth, num_features], dtype=dtype)
if len(args) == 1:
res = conv_op(args[0], kernel, strides, padding="SAME")
else:
res = conv_op(
array_ops.concat(axis=shape_length - 1, values=args),
kernel,
strides,
padding="SAME")
if not bias:
return res
bias_term = vs.get_variable(
"biases", [num_features],
dtype=dtype,
initializer=init_ops.constant_initializer(bias_start, dtype=dtype))
return res + bias_term
class GLSTMCell(rnn_cell_impl.RNNCell):
"""Group LSTM cell (G-LSTM).
The implementation is based on:
https://arxiv.org/abs/1703.10722
O. Kuchaiev and B. Ginsburg
"Factorization Tricks for LSTM Networks", ICLR 2017 workshop.
In brief, a G-LSTM cell consists of one LSTM sub-cell per group, where each
sub-cell operates on an evenly-sized sub-vector of the input and produces an
evenly-sized sub-vector of the output. For example, a G-LSTM cell with 128
units and 4 groups consists of 4 LSTMs sub-cells with 32 units each. If that
G-LSTM cell is fed a 200-dim input, then each sub-cell receives a 50-dim part
of the input and produces a 32-dim part of the output.
"""
def __init__(self,
num_units,
initializer=None,
num_proj=None,
number_of_groups=1,
forget_bias=1.0,
activation=math_ops.tanh,
reuse=None):
"""Initialize the parameters of G-LSTM cell.
Args:
num_units: int, The number of units in the G-LSTM cell
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
number_of_groups: (optional) int, number of groups to use.
If `number_of_groups` is 1, then it should be equivalent to LSTM cell
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already
has the given variables, an error is raised.
Raises:
ValueError: If `num_units` or `num_proj` is not divisible by
`number_of_groups`.
"""
super(GLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._num_proj = num_proj
self._forget_bias = forget_bias
self._activation = activation
self._number_of_groups = number_of_groups
if self._num_units % self._number_of_groups != 0:
raise ValueError("num_units must be divisible by number_of_groups")
if self._num_proj:
if self._num_proj % self._number_of_groups != 0:
raise ValueError("num_proj must be divisible by number_of_groups")
self._group_shape = [
int(self._num_proj / self._number_of_groups),
int(self._num_units / self._number_of_groups)
]
else:
self._group_shape = [
int(self._num_units / self._number_of_groups),
int(self._num_units / self._number_of_groups)
]
if num_proj:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
self._linear1 = [None] * number_of_groups
self._linear2 = None
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def _get_input_for_group(self, inputs, group_id, group_size):
"""Slices inputs into groups to prepare for processing by cell's groups.
Args:
inputs: cell input or it's previous state,
a Tensor, 2D, [batch x num_units]
group_id: group id, a Scalar, for which to prepare input
group_size: size of the group
Returns:
subset of inputs corresponding to group "group_id",
a Tensor, 2D, [batch x num_units/number_of_groups]
"""
return array_ops.slice(
input_=inputs,
begin=[0, group_id * group_size],
size=[self._batch_size, group_size],
name=("GLSTM_group%d_input_generation" % group_id))
def call(self, inputs, state):
"""Run one step of G-LSTM.
Args:
inputs: input Tensor, 2D, [batch x num_inputs]. num_inputs must be
statically-known and evenly divisible into groups. The innermost
vectors of the inputs are split into evenly-sized sub-vectors and fed
into the per-group LSTM sub-cells.
state: this must be a tuple of state Tensors, both `2-D`, with column
sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
G-LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- LSTMStateTuple representing the new state of G-LSTM cell
after reading `inputs` when the previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference, or if the input shape is incompatible
with the number of groups.
"""
(c_prev, m_prev) = state
self._batch_size = tensor_shape.dimension_value(
inputs.shape[0]) or array_ops.shape(inputs)[0]
# If the input size is statically-known, calculate and validate its group
# size. Otherwise, use the output group size.
input_size = tensor_shape.dimension_value(inputs.shape[1])
if input_size is None:
raise ValueError("input size must be statically known")
if input_size % self._number_of_groups != 0:
raise ValueError(
"input size (%d) must be divisible by number_of_groups (%d)" %
(input_size, self._number_of_groups))
input_group_size = int(input_size / self._number_of_groups)
dtype = inputs.dtype
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer):
i_parts = []
j_parts = []
f_parts = []
o_parts = []
for group_id in range(self._number_of_groups):
with vs.variable_scope("group%d" % group_id):
x_g_id = array_ops.concat(
[
self._get_input_for_group(inputs, group_id, input_group_size),
self._get_input_for_group(m_prev, group_id,
self._group_shape[0])
],
axis=1)
linear = self._linear1[group_id]
if linear is None:
linear = _Linear(x_g_id, 4 * self._group_shape[1], False)
self._linear1[group_id] = linear
R_k = linear(x_g_id) # pylint: disable=invalid-name
i_k, j_k, f_k, o_k = array_ops.split(R_k, 4, 1)
i_parts.append(i_k)
j_parts.append(j_k)
f_parts.append(f_k)
o_parts.append(o_k)
bi = vs.get_variable(
name="bias_i",
shape=[self._num_units],
dtype=dtype,
initializer=init_ops.constant_initializer(0.0, dtype=dtype))
bj = vs.get_variable(
name="bias_j",
shape=[self._num_units],
dtype=dtype,
initializer=init_ops.constant_initializer(0.0, dtype=dtype))
bf = vs.get_variable(
name="bias_f",
shape=[self._num_units],
dtype=dtype,
initializer=init_ops.constant_initializer(0.0, dtype=dtype))
bo = vs.get_variable(
name="bias_o",
shape=[self._num_units],
dtype=dtype,
initializer=init_ops.constant_initializer(0.0, dtype=dtype))
i = nn_ops.bias_add(array_ops.concat(i_parts, axis=1), bi)
j = nn_ops.bias_add(array_ops.concat(j_parts, axis=1), bj)
f = nn_ops.bias_add(array_ops.concat(f_parts, axis=1), bf)
o = nn_ops.bias_add(array_ops.concat(o_parts, axis=1), bo)
c = (
math_ops.sigmoid(f + self._forget_bias) * c_prev +
math_ops.sigmoid(i) * math_ops.tanh(j))
m = math_ops.sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection"):
if self._linear2 is None:
self._linear2 = _Linear(m, self._num_proj, False)
m = self._linear2(m)
new_state = rnn_cell_impl.LSTMStateTuple(c, m)
return m, new_state
class LayerNormLSTMCell(rnn_cell_impl.RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf
Felix Gers, Jurgen Schmidhuber, and Fred Cummins.
"Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
Layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
and is applied before the internal nonlinearities.
"""
def __init__(self,
num_units,
use_peepholes=False,
cell_clip=None,
initializer=None,
num_proj=None,
proj_clip=None,
forget_bias=1.0,
activation=None,
layer_norm=False,
norm_gain=1.0,
norm_shift=0.0,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training. Must set it manually to `0.0` when restoring from
CudnnLSTM trained checkpoints.
activation: Activation function of the inner states. Default: `tanh`.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
When restoring from CudnnLSTM-trained checkpoints, must use
CudnnCompatibleLSTMCell instead.
"""
super(LayerNormLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._forget_bias = forget_bias
self._activation = activation or math_ops.tanh
self._layer_norm = layer_norm
self._norm_gain = norm_gain
self._norm_shift = norm_shift
if num_proj:
self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_proj))
self._output_size = num_proj
else:
self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_units))
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def _linear(self,
args,
output_size,
bias,
bias_initializer=None,
kernel_initializer=None,
layer_norm=False):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a Variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
layer_norm: boolean, whether to apply layer normalization.
Returns:
A 2D Tensor with shape [batch x output_size] taking value
sum_i(args[i] * W[i]), where each W[i] is a newly created Variable.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if tensor_shape.dimension_value(shape[1]) is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += tensor_shape.dimension_value(shape[1])
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
"kernel", [total_arg_size, output_size],
dtype=dtype,
initializer=kernel_initializer)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
biases = vs.get_variable(
"bias", [output_size], dtype=dtype, initializer=bias_initializer)
if not layer_norm:
res = nn_ops.bias_add(res, biases)
return res
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: this must be a tuple of state Tensors,
both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
(c_prev, m_prev) = state
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2).dims[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer) as unit_scope:
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = self._linear(
[inputs, m_prev],
4 * self._num_units,
bias=True,
bias_initializer=None,
layer_norm=self._layer_norm)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
if self._layer_norm:
i = _norm(self._norm_gain, self._norm_shift, i, "input")
j = _norm(self._norm_gain, self._norm_shift, j, "transform")
f = _norm(self._norm_gain, self._norm_shift, f, "forget")
o = _norm(self._norm_gain, self._norm_shift, o, "output")
# Diagonal connections
if self._use_peepholes:
with vs.variable_scope(unit_scope):
w_f_diag = vs.get_variable(
"w_f_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"w_i_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"w_o_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (
sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * self._activation(j))
else:
c = (
sigmoid(f + self._forget_bias) * c_prev +
sigmoid(i) * self._activation(j))
if self._layer_norm:
c = _norm(self._norm_gain, self._norm_shift, c, "state")
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection"):
m = self._linear(m, self._num_proj, bias=False)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (rnn_cell_impl.LSTMStateTuple(c, m))
return m, new_state
class SRUCell(rnn_cell_impl.LayerRNNCell):
"""SRU, Simple Recurrent Unit.
Implementation based on
Training RNNs as Fast as CNNs (cf. https://arxiv.org/abs/1709.02755).
This variation of RNN cell is characterized by the simplified data
dependence
between hidden states of two consecutive time steps. Traditionally, hidden
states from a cell at time step t-1 needs to be multiplied with a matrix
W_hh before being fed into the ensuing cell at time step t.
This flavor of RNN replaces the matrix multiplication between h_{t-1}
and W_hh with a pointwise multiplication, resulting in performance
gain.
Args:
num_units: int, The number of units in the SRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: (optional) String, the name of the layer. Layers with the same name
will share weights, but to avoid mistakes we require reuse=True in such
cases.
**kwargs: Additional keyword arguments.
"""
def __init__(self, num_units, activation=None, reuse=None, name=None,
**kwargs):
super(SRUCell, self).__init__(_reuse=reuse, name=name, **kwargs)
self._num_units = num_units
self._activation = activation or math_ops.tanh
# Restrict inputs to be 2-dimensional matrices
self.input_spec = input_spec.InputSpec(ndim=2)
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def build(self, inputs_shape):
if tensor_shape.dimension_value(inputs_shape[1]) is None:
raise ValueError(
"Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape)
input_depth = tensor_shape.dimension_value(inputs_shape[1])
# pylint: disable=protected-access
self._kernel = self.add_variable(
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
shape=[input_depth, 4 * self._num_units])
# pylint: enable=protected-access
self._bias = self.add_variable(
rnn_cell_impl._BIAS_VARIABLE_NAME, # pylint: disable=protected-access
shape=[2 * self._num_units],
initializer=init_ops.zeros_initializer)
self._built = True
def call(self, inputs, state):
"""Simple recurrent unit (SRU) with num_units cells."""
U = math_ops.matmul(inputs, self._kernel) # pylint: disable=invalid-name
x_bar, f_intermediate, r_intermediate, x_tx = array_ops.split(
value=U, num_or_size_splits=4, axis=1)
f_r = math_ops.sigmoid(
nn_ops.bias_add(
array_ops.concat([f_intermediate, r_intermediate], 1), self._bias))
f, r = array_ops.split(value=f_r, num_or_size_splits=2, axis=1)
c = f * state + (1.0 - f) * x_bar
h = r * self._activation(c) + (1.0 - r) * x_tx
return h, c
class WeightNormLSTMCell(rnn_cell_impl.RNNCell):
"""Weight normalized LSTM Cell. Adapted from `rnn_cell_impl.LSTMCell`.
The weight-norm implementation is based on:
https://arxiv.org/abs/1602.07868
Tim Salimans, Diederik P. Kingma.
Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks
The default LSTM implementation based on:
https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf
Felix Gers, Jurgen Schmidhuber, and Fred Cummins.
"Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999.
The class uses optional peephole connections, optional cell clipping
and an optional projection layer.
The optional peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
"""
def __init__(self,
num_units,
norm=True,
use_peepholes=False,
cell_clip=None,
initializer=None,
num_proj=None,
proj_clip=None,
forget_bias=1,
activation=None,
reuse=None):
"""Initialize the parameters of a weight-normalized LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
norm: If `True`, apply normalization to the weight matrices. If False,
the result is identical to that obtained from `rnn_cell_impl.LSTMCell`
use_peepholes: bool, set `True` to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(WeightNormLSTMCell, self).__init__(_reuse=reuse)
self._scope = "wn_lstm_cell"
self._num_units = num_units
self._norm = norm
self._initializer = initializer
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._num_proj = num_proj
self._proj_clip = proj_clip
self._activation = activation or math_ops.tanh
self._forget_bias = forget_bias
self._weights_variable_name = "kernel"
self._bias_variable_name = "bias"
if num_proj:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def _normalize(self, weight, name):
"""Apply weight normalization.
Args:
weight: a 2D tensor with known number of columns.
name: string, variable name for the normalizer.
Returns:
A tensor with the same shape as `weight`.
"""
output_size = weight.get_shape().as_list()[1]
g = vs.get_variable(name, [output_size], dtype=weight.dtype)
return nn_impl.l2_normalize(weight, axis=0) * g
def _linear(self,
args,
output_size,
norm,
bias,
bias_initializer=None,
kernel_initializer=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
norm: bool, whether to normalize the weights.
bias: boolean, whether to add a bias term or not.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if tensor_shape.dimension_value(shape[1]) is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += tensor_shape.dimension_value(shape[1])
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
self._weights_variable_name, [total_arg_size, output_size],
dtype=dtype,
initializer=kernel_initializer)
if norm:
wn = []
st = 0
with ops.control_dependencies(None):
for i in range(len(args)):
en = st + tensor_shape.dimension_value(shapes[i][1])
wn.append(
self._normalize(weights[st:en, :], name="norm_{}".format(i)))
st = en
weights = array_ops.concat(wn, axis=0)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
biases = vs.get_variable(
self._bias_variable_name, [output_size],
dtype=dtype,
initializer=bias_initializer)
return nn_ops.bias_add(res, biases)
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: A tuple of state Tensors, both `2-D`, with column sizes
`c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
dtype = inputs.dtype
num_units = self._num_units
sigmoid = math_ops.sigmoid
c, h = state
input_size = inputs.get_shape().with_rank(2).dims[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(self._scope, initializer=self._initializer):
concat = self._linear(
[inputs, h], 4 * num_units, norm=self._norm, bias=True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
if self._use_peepholes:
w_f_diag = vs.get_variable("w_f_diag", shape=[num_units], dtype=dtype)
w_i_diag = vs.get_variable("w_i_diag", shape=[num_units], dtype=dtype)
w_o_diag = vs.get_variable("w_o_diag", shape=[num_units], dtype=dtype)
new_c = (
c * sigmoid(f + self._forget_bias + w_f_diag * c) +
sigmoid(i + w_i_diag * c) * self._activation(j))
else:
new_c = (
c * sigmoid(f + self._forget_bias) +
sigmoid(i) * self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
new_c = clip_ops.clip_by_value(new_c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
new_h = sigmoid(o + w_o_diag * new_c) * self._activation(new_c)
else:
new_h = sigmoid(o) * self._activation(new_c)
if self._num_proj is not None:
with vs.variable_scope("projection"):
new_h = self._linear(
new_h, self._num_proj, norm=self._norm, bias=False)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
new_h = clip_ops.clip_by_value(new_h, -self._proj_clip,
self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
class IndRNNCell(rnn_cell_impl.LayerRNNCell):
"""Independently Recurrent Neural Network (IndRNN) cell
(cf. https://arxiv.org/abs/1803.04831).
Args:
num_units: int, The number of units in the RNN cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
dtype: Default dtype of the layer (default of `None` means use the type
of the first input). Required when `build` is called before `call`.
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
name=None,
dtype=None):
super(IndRNNCell, self).__init__(_reuse=reuse, name=name, dtype=dtype)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
self._activation = activation or math_ops.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def build(self, inputs_shape):
if tensor_shape.dimension_value(inputs_shape[1]) is None:
raise ValueError(
"Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape)
input_depth = tensor_shape.dimension_value(inputs_shape[1])
# pylint: disable=protected-access
self._kernel_w = self.add_variable(
"%s_w" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
shape=[input_depth, self._num_units])
self._kernel_u = self.add_variable(
"%s_u" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
shape=[1, self._num_units],
initializer=init_ops.random_uniform_initializer(
minval=-1, maxval=1, dtype=self.dtype))
self._bias = self.add_variable(
rnn_cell_impl._BIAS_VARIABLE_NAME,
shape=[self._num_units],
initializer=init_ops.zeros_initializer(dtype=self.dtype))
# pylint: enable=protected-access
self.built = True
def call(self, inputs, state):
"""IndRNN: output = new_state = act(W * input + u * state + B)."""
gate_inputs = math_ops.matmul(inputs, self._kernel_w) + (
state * self._kernel_u)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
output = self._activation(gate_inputs)
return output, output
class IndyGRUCell(rnn_cell_impl.LayerRNNCell):
r"""Independently Gated Recurrent Unit cell.
Based on IndRNNs (https://arxiv.org/abs/1803.04831) and similar to GRUCell,
yet with the \\(U_r\\), \\(U_z\\), and \\(U\\) matrices in equations 5, 6, and
8 of http://arxiv.org/abs/1406.1078 respectively replaced by diagonal
matrices, i.e. a Hadamard product with a single vector:
$$r_j = \sigma\left([\mathbf W_r\mathbf x]_j +
[\mathbf u_r\circ \mathbf h_{(t-1)}]_j\right)$$
$$z_j = \sigma\left([\mathbf W_z\mathbf x]_j +
[\mathbf u_z\circ \mathbf h_{(t-1)}]_j\right)$$
$$\tilde{h}^{(t)}_j = \phi\left([\mathbf W \mathbf x]_j +
[\mathbf u \circ \mathbf r \circ \mathbf h_{(t-1)}]_j\right)$$
where \\(\circ\\) denotes the Hadamard operator. This means that each IndyGRU
node sees only its own state, as opposed to seeing all states in the same
layer.
Args:
num_units: int, The number of units in the GRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
kernel_initializer: (optional) The initializer to use for the weight
matrices applied to the input.
bias_initializer: (optional) The initializer to use for the bias.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
dtype: Default dtype of the layer (default of `None` means use the type
of the first input). Required when `build` is called before `call`.
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None,
name=None,
dtype=None):
super(IndyGRUCell, self).__init__(_reuse=reuse, name=name, dtype=dtype)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
self._activation = activation or math_ops.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def build(self, inputs_shape):
if tensor_shape.dimension_value(inputs_shape[1]) is None:
raise ValueError(
"Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape)
input_depth = tensor_shape.dimension_value(inputs_shape[1])
# pylint: disable=protected-access
self._gate_kernel_w = self.add_variable(
"gates/%s_w" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
shape=[input_depth, 2 * self._num_units],
initializer=self._kernel_initializer)
self._gate_kernel_u = self.add_variable(
"gates/%s_u" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
shape=[1, 2 * self._num_units],
initializer=init_ops.random_uniform_initializer(
minval=-1, maxval=1, dtype=self.dtype))
self._gate_bias = self.add_variable(
"gates/%s" % rnn_cell_impl._BIAS_VARIABLE_NAME,
shape=[2 * self._num_units],
initializer=(self._bias_initializer
if self._bias_initializer is not None else
init_ops.constant_initializer(1.0, dtype=self.dtype)))
self._candidate_kernel_w = self.add_variable(
"candidate/%s" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
shape=[input_depth, self._num_units],
initializer=self._kernel_initializer)
self._candidate_kernel_u = self.add_variable(
"candidate/%s_u" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
shape=[1, self._num_units],
initializer=init_ops.random_uniform_initializer(
minval=-1, maxval=1, dtype=self.dtype))
self._candidate_bias = self.add_variable(
"candidate/%s" % rnn_cell_impl._BIAS_VARIABLE_NAME,
shape=[self._num_units],
initializer=(self._bias_initializer
if self._bias_initializer is not None else
init_ops.zeros_initializer(dtype=self.dtype)))
# pylint: enable=protected-access
self.built = True
def call(self, inputs, state):
"""Recurrently independent Gated Recurrent Unit (GRU) with nunits cells."""
gate_inputs = math_ops.matmul(inputs, self._gate_kernel_w) + (
gen_array_ops.tile(state, [1, 2]) * self._gate_kernel_u)
gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias)
value = math_ops.sigmoid(gate_inputs)
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
candidate = math_ops.matmul(inputs, self._candidate_kernel_w) + (
r_state * self._candidate_kernel_u)
candidate = nn_ops.bias_add(candidate, self._candidate_bias)
c = self._activation(candidate)
new_h = u * state + (1 - u) * c
return new_h, new_h
class IndyLSTMCell(rnn_cell_impl.LayerRNNCell):
r"""Basic IndyLSTM recurrent network cell.
Based on IndRNNs (https://arxiv.org/abs/1803.04831) and similar to
BasicLSTMCell, yet with the \\(U_f\\), \\(U_i\\), \\(U_o\\) and \\(U_c\\)
matrices in the regular LSTM equations replaced by diagonal matrices, i.e. a
Hadamard product with a single vector:
$$f_t = \sigma_g\left(W_f x_t + u_f \circ h_{t-1} + b_f\right)$$
$$i_t = \sigma_g\left(W_i x_t + u_i \circ h_{t-1} + b_i\right)$$
$$o_t = \sigma_g\left(W_o x_t + u_o \circ h_{t-1} + b_o\right)$$
$$c_t = f_t \circ c_{t-1} +
i_t \circ \sigma_c\left(W_c x_t + u_c \circ h_{t-1} + b_c\right)$$
where \\(\circ\\) denotes the Hadamard operator. This means that each IndyLSTM
node sees only its own state \\(h\\) and \\(c\\), as opposed to seeing all
states in the same layer.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For a detailed analysis of IndyLSTMs, see https://arxiv.org/abs/1903.08023.
"""
def __init__(self,
num_units,
forget_bias=1.0,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None,
name=None,
dtype=None):
"""Initialize the IndyLSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
Must set to `0.0` manually when restoring from CudnnLSTM-trained
checkpoints.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
kernel_initializer: (optional) The initializer to use for the weight
matrix applied to the inputs.
bias_initializer: (optional) The initializer to use for the bias.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
dtype: Default dtype of the layer (default of `None` means use the type
of the first input). Required when `build` is called before `call`.
"""
super(IndyLSTMCell, self).__init__(_reuse=reuse, name=name, dtype=dtype)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
self._forget_bias = forget_bias
self._activation = activation or math_ops.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def build(self, inputs_shape):
if tensor_shape.dimension_value(inputs_shape[1]) is None:
raise ValueError(
"Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape)
input_depth = tensor_shape.dimension_value(inputs_shape[1])
# pylint: disable=protected-access
self._kernel_w = self.add_variable(
"%s_w" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
shape=[input_depth, 4 * self._num_units],
initializer=self._kernel_initializer)
self._kernel_u = self.add_variable(
"%s_u" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
shape=[1, 4 * self._num_units],
initializer=init_ops.random_uniform_initializer(
minval=-1, maxval=1, dtype=self.dtype))
self._bias = self.add_variable(
rnn_cell_impl._BIAS_VARIABLE_NAME,
shape=[4 * self._num_units],
initializer=(self._bias_initializer
if self._bias_initializer is not None else
init_ops.zeros_initializer(dtype=self.dtype)))
# pylint: enable=protected-access
self.built = True
def call(self, inputs, state):
"""Independent Long short-term memory cell (IndyLSTM).
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: An `LSTMStateTuple` of state tensors, each shaped
`[batch_size, num_units]`.
Returns:
A pair containing the new hidden state, and the new state (a
`LSTMStateTuple`).
"""
sigmoid = math_ops.sigmoid
one = constant_op.constant(1, dtype=dtypes.int32)
c, h = state
gate_inputs = math_ops.matmul(inputs, self._kernel_w)
gate_inputs += gen_array_ops.tile(h, [1, 4]) * self._kernel_u
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(
value=gate_inputs, num_or_size_splits=4, axis=one)
forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype)
# Note that using `add` and `multiply` instead of `+` and `*` gives a
# performance improvement. So using those at the cost of readability.
add = math_ops.add
multiply = math_ops.multiply
new_c = add(
multiply(c, sigmoid(add(f, forget_bias_tensor))),
multiply(sigmoid(i), self._activation(j)))
new_h = multiply(self._activation(new_c), sigmoid(o))
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
NTMControllerState = collections.namedtuple(
"NTMControllerState",
("controller_state", "read_vector_list", "w_list", "M", "time"))
class NTMCell(rnn_cell_impl.LayerRNNCell):
"""Neural Turing Machine Cell with RNN controller.
Implementation based on:
https://arxiv.org/abs/1807.08518
Mark Collier, Joeran Beel
which is in turn based on the source code of:
https://github.com/snowkylin/ntm
and of course the original NTM paper:
Neural Turing Machines
https://arxiv.org/abs/1410.5401
A Graves, G Wayne, I Danihelka
"""
def __init__(self,
controller,
memory_size,
memory_vector_dim,
read_head_num,
write_head_num,
shift_range=1,
output_dim=None,
clip_value=20,
dtype=dtypes.float32,
name=None):
"""Initialize the NTM Cell.
Args:
controller: an RNNCell, the RNN controller.
memory_size: int, The number of memory locations in the NTM memory
matrix
memory_vector_dim: int, The dimensionality of each location in the NTM
memory matrix
read_head_num: int, The number of read heads from the controller into
memory
write_head_num: int, The number of write heads from the controller into
memory
shift_range: int, The number of places to the left/right it is possible
to iterate the previous address to in a single step
output_dim: int, The number of dimensions to make a linear projection of
the NTM controller outputs to. If None, no linear projection is
applied
clip_value: float, The maximum absolute value the controller parameters
are clipped to
dtype: Default dtype of the layer (default of `None` means use the type
of the first input). Required when `build` is called before `call`.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
"""
super(NTMCell, self).__init__(dtype=dtype, name=name)
rnn_cell_impl.assert_like_rnncell("NTM RNN controller cell", controller)
self.controller = controller
self.memory_size = memory_size
self.memory_vector_dim = memory_vector_dim
self.read_head_num = read_head_num
self.write_head_num = write_head_num
self.clip_value = clip_value
self.output_dim = output_dim
self.shift_range = shift_range
self.num_parameters_per_head = (
self.memory_vector_dim + 2 * self.shift_range + 4)
self.num_heads = self.read_head_num + self.write_head_num
self.total_parameter_num = (
self.num_parameters_per_head * self.num_heads +
self.memory_vector_dim * 2 * self.write_head_num)
@property
def state_size(self):
return NTMControllerState(
controller_state=self.controller.state_size,
read_vector_list=[
self.memory_vector_dim for _ in range(self.read_head_num)
],
w_list=[
self.memory_size
for _ in range(self.read_head_num + self.write_head_num)
],
M=tensor_shape.TensorShape([self.memory_size * self.memory_vector_dim]),
time=tensor_shape.TensorShape([]))
@property
def output_size(self):
return self.output_dim
def build(self, inputs_shape):
if self.output_dim is None:
if inputs_shape[1].value is None:
raise ValueError(
"Expected inputs.shape[-1] to be known, saw shape: %s" %
inputs_shape)
else:
self.output_dim = inputs_shape[1].value
def _create_linear_initializer(input_size, dtype=dtypes.float32):
stddev = 1.0 / math.sqrt(input_size)
return init_ops.truncated_normal_initializer(stddev=stddev, dtype=dtype)
self._params_kernel = self.add_variable(
"parameters_kernel",
shape=[self.controller.output_size, self.total_parameter_num],
initializer=_create_linear_initializer(self.controller.output_size))
self._params_bias = self.add_variable(
"parameters_bias",
shape=[self.total_parameter_num],
initializer=init_ops.constant_initializer(0.0, dtype=self.dtype))
self._output_kernel = self.add_variable(
"output_kernel",
shape=[
self.controller.output_size +
self.memory_vector_dim * self.read_head_num, self.output_dim
],
initializer=_create_linear_initializer(self.controller.output_size +
self.memory_vector_dim *
self.read_head_num))
self._output_bias = self.add_variable(
"output_bias",
shape=[self.output_dim],
initializer=init_ops.constant_initializer(0.0, dtype=self.dtype))
self._init_read_vectors = [
self.add_variable(
"initial_read_vector_%d" % i,
shape=[1, self.memory_vector_dim],
initializer=initializers.glorot_uniform())
for i in range(self.read_head_num)
]
self._init_address_weights = [
self.add_variable(
"initial_address_weights_%d" % i,
shape=[1, self.memory_size],
initializer=initializers.glorot_uniform())
for i in range(self.read_head_num + self.write_head_num)
]
self._M = self.add_variable(
"memory",
shape=[self.memory_size, self.memory_vector_dim],
initializer=init_ops.constant_initializer(1e-6, dtype=self.dtype))
self.built = True
def call(self, x, prev_state):
# Addressing Mechanisms (Sec 3.3)
def _prev_read_vector_list_initial_value():
return [
self._expand(
math_ops.tanh(
array_ops.squeeze(
math_ops.matmul(
array_ops.ones([1, 1]), self._init_read_vectors[i]))),
dim=0,
N=x.shape[0].value or array_ops.shape(x)[0])
for i in range(self.read_head_num)
]
prev_read_vector_list = control_flow_ops.cond(
math_ops.equal(prev_state.time,
0), _prev_read_vector_list_initial_value, lambda:
prev_state.read_vector_list)
if self.read_head_num == 1:
prev_read_vector_list = [prev_read_vector_list]
controller_input = array_ops.concat([x] + prev_read_vector_list, axis=1)
controller_output, controller_state = self.controller(
controller_input, prev_state.controller_state)
parameters = math_ops.matmul(controller_output, self._params_kernel)
parameters = nn_ops.bias_add(parameters, self._params_bias)
parameters = clip_ops.clip_by_value(parameters, -self.clip_value,
self.clip_value)
head_parameter_list = array_ops.split(
parameters[:, :self.num_parameters_per_head * self.num_heads],
self.num_heads,
axis=1)
erase_add_list = array_ops.split(
parameters[:, self.num_parameters_per_head * self.num_heads:],
2 * self.write_head_num,
axis=1)
def _prev_w_list_initial_value():
return [
self._expand(
nn_ops.softmax(
array_ops.squeeze(
math_ops.matmul(
array_ops.ones([1, 1]),
self._init_address_weights[i]))),
dim=0,
N=x.shape[0].value or array_ops.shape(x)[0])
for i in range(self.read_head_num + self.write_head_num)
]
prev_w_list = control_flow_ops.cond(
math_ops.equal(prev_state.time, 0),
_prev_w_list_initial_value, lambda: prev_state.w_list)
if (self.read_head_num + self.write_head_num) == 1:
prev_w_list = [prev_w_list]
prev_M = control_flow_ops.cond(
math_ops.equal(prev_state.time, 0), lambda: self._expand(
self._M, dim=0, N=x.shape[0].value or array_ops.shape(x)[0]),
lambda: prev_state.M)
w_list = []
for i, head_parameter in enumerate(head_parameter_list):
k = math_ops.tanh(head_parameter[:, 0:self.memory_vector_dim])
beta = nn_ops.softplus(head_parameter[:, self.memory_vector_dim])
g = math_ops.sigmoid(head_parameter[:, self.memory_vector_dim + 1])
s = nn_ops.softmax(head_parameter[:, self.memory_vector_dim +
2:(self.memory_vector_dim + 2 +
(self.shift_range * 2 + 1))])
gamma = nn_ops.softplus(head_parameter[:, -1]) + 1
w = self._addressing(k, beta, g, s, gamma, prev_M, prev_w_list[i])
w_list.append(w)
# Reading (Sec 3.1)
read_w_list = w_list[:self.read_head_num]
read_vector_list = []
for i in range(self.read_head_num):
read_vector = math_ops.reduce_sum(
array_ops.expand_dims(read_w_list[i], dim=2) * prev_M, axis=1)
read_vector_list.append(read_vector)
# Writing (Sec 3.2)
write_w_list = w_list[self.read_head_num:]
M = prev_M
for i in range(self.write_head_num):
w = array_ops.expand_dims(write_w_list[i], axis=2)
erase_vector = array_ops.expand_dims(
math_ops.sigmoid(erase_add_list[i * 2]), axis=1)
add_vector = array_ops.expand_dims(
math_ops.tanh(erase_add_list[i * 2 + 1]), axis=1)
erase_M = array_ops.ones_like(M) - math_ops.matmul(w, erase_vector)
M = M * erase_M + math_ops.matmul(w, add_vector)
output = math_ops.matmul(
array_ops.concat([controller_output] + read_vector_list, axis=1),
self._output_kernel)
output = nn_ops.bias_add(output, self._output_bias)
output = clip_ops.clip_by_value(output, -self.clip_value, self.clip_value)
return output, NTMControllerState(
controller_state=controller_state,
read_vector_list=read_vector_list,
w_list=w_list,
M=M,
time=prev_state.time + 1)
def _expand(self, x, dim, N):
return array_ops.concat([array_ops.expand_dims(x, dim) for _ in range(N)],
axis=dim)
def _addressing(self, k, beta, g, s, gamma, prev_M, prev_w):
# Sec 3.3.1 Focusing by Content
k = array_ops.expand_dims(k, axis=2)
inner_product = math_ops.matmul(prev_M, k)
k_norm = math_ops.sqrt(
math_ops.reduce_sum(math_ops.square(k), axis=1, keepdims=True))
M_norm = math_ops.sqrt(
math_ops.reduce_sum(math_ops.square(prev_M), axis=2, keepdims=True))
norm_product = M_norm * k_norm
# eq (6)
K = array_ops.squeeze(inner_product / (norm_product + 1e-8))
K_amplified = math_ops.exp(array_ops.expand_dims(beta, axis=1) * K)
# eq (5)
w_c = K_amplified / math_ops.reduce_sum(K_amplified, axis=1, keepdims=True)
# Sec 3.3.2 Focusing by Location
g = array_ops.expand_dims(g, axis=1)
# eq (7)
w_g = g * w_c + (1 - g) * prev_w
s = array_ops.concat([
s[:, :self.shift_range + 1],
array_ops.zeros([
s.shape[0].value or array_ops.shape(s)[0], self.memory_size -
(self.shift_range * 2 + 1)
]), s[:, -self.shift_range:]
],
axis=1)
t = array_ops.concat(
[array_ops.reverse(s, axis=[1]),
array_ops.reverse(s, axis=[1])],
axis=1)
s_matrix = array_ops.stack([
t[:, self.memory_size - i - 1:self.memory_size * 2 - i - 1]
for i in range(self.memory_size)
],
axis=1)
# eq (8)
w_ = math_ops.reduce_sum(
array_ops.expand_dims(w_g, axis=1) * s_matrix, axis=2)
w_sharpen = math_ops.pow(w_, array_ops.expand_dims(gamma, axis=1))
# eq (9)
w = w_sharpen / math_ops.reduce_sum(w_sharpen, axis=1, keepdims=True)
return w
def zero_state(self, batch_size, dtype):
read_vector_list = [
array_ops.zeros([batch_size, self.memory_vector_dim])
for _ in range(self.read_head_num)
]
w_list = [
array_ops.zeros([batch_size, self.memory_size])
for _ in range(self.read_head_num + self.write_head_num)
]
controller_init_state = self.controller.zero_state(batch_size, dtype)
M = array_ops.zeros([batch_size, self.memory_size, self.memory_vector_dim])
return NTMControllerState(
controller_state=controller_init_state,
read_vector_list=read_vector_list,
w_list=w_list,
M=M,
time=0)
class MinimalRNNCell(rnn_cell_impl.LayerRNNCell):
"""MinimalRNN cell.
The implementation is based on:
https://arxiv.org/pdf/1806.05394v2.pdf
Minmin Chen, Jeffrey Pennington, Samuel S. Schoenholz.
"Dynamical Isometry and a Mean Field Theory of RNNs: Gating Enables Signal
Propagation in Recurrent Neural Networks." ICML, 2018.
A MinimalRNN cell first projects the input to the hidden space. The new
hidden state is then calculated as a weighted sum of the projected input and
the previous hidden state, using a single update gate.
"""
def __init__(self,
units,
activation="tanh",
kernel_initializer="glorot_uniform",
bias_initializer="ones",
name=None,
dtype=None,
**kwargs):
"""Initialize the parameters for a MinimalRNN cell.
Args:
units: int, The number of units in the MinimalRNN cell.
activation: Nonlinearity to use in the feedforward network. Default:
`tanh`.
kernel_initializer: The initializer to use for the weight in the update
gate and feedforward network. Default: `glorot_uniform`.
bias_initializer: The initializer to use for the bias in the update
gate. Default: `ones`.
name: String, the name of the cell.
dtype: Default dtype of the cell.
**kwargs: Dict, keyword named properties for common cell attributes.
"""
super(MinimalRNNCell, self).__init__(name=name, dtype=dtype, **kwargs)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self.units = units
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
@property
def state_size(self):
return self.units
@property
def output_size(self):
return self.units
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% str(inputs_shape))
input_size = inputs_shape[-1]
# pylint: disable=protected-access
# self._kernel contains W_x, W, V
self.kernel = self.add_weight(
name=rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
shape=[input_size + 2 * self.units, self.units],
initializer=self.kernel_initializer)
self.bias = self.add_weight(
name=rnn_cell_impl._BIAS_VARIABLE_NAME,
shape=[self.units],
initializer=self.bias_initializer)
# pylint: enable=protected-access
self.built = True
def call(self, inputs, state):
"""Run one step of MinimalRNN.
Args:
inputs: input Tensor, must be 2-D, `[batch, input_size]`.
state: state Tensor, must be 2-D, `[batch, state_size]`.
Returns:
A tuple containing:
- Output: A `2-D` tensor with shape `[batch_size, state_size]`.
- New state: A `2-D` tensor with shape `[batch_size, state_size]`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
input_size = inputs.get_shape()[1]
if tensor_shape.dimension_value(input_size) is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
feedforward_weight, gate_weight = array_ops.split(
value=self.kernel,
num_or_size_splits=[tensor_shape.dimension_value(input_size),
2 * self.units],
axis=0)
feedforward = math_ops.matmul(inputs, feedforward_weight)
feedforward = self.activation(feedforward)
gate_inputs = math_ops.matmul(
array_ops.concat([feedforward, state], 1), gate_weight)
gate_inputs = nn_ops.bias_add(gate_inputs, self.bias)
u = math_ops.sigmoid(gate_inputs)
new_h = u * state + (1 - u) * feedforward
return new_h, new_h
class CFNCell(rnn_cell_impl.LayerRNNCell):
"""Chaos Free Network cell.
The implementation is based on:
https://openreview.net/pdf?id=S1dIzvclg
Thomas Laurent, James von Brecht.
"A recurrent neural network without chaos." ICLR, 2017.
A CFN cell first projects the input to the hidden space. The hidden state
goes through a contractive mapping. The new hidden state is then calculated
as a linear combination of the projected input and the contracted previous
hidden state, using decoupled input and forget gates.
"""
def __init__(self,
units,
activation="tanh",
kernel_initializer="glorot_uniform",
bias_initializer="ones",
name=None,
dtype=None,
**kwargs):
"""Initialize the parameters for a CFN cell.
Args:
units: int, The number of units in the CFN cell.
activation: Nonlinearity to use. Default: `tanh`.
kernel_initializer: Initializer for the `kernel` weights
matrix. Default: `glorot_uniform`.
bias_initializer: The initializer to use for the bias in the
gates. Default: `ones`.
name: String, the name of the cell.
dtype: Default dtype of the cell.
**kwargs: Dict, keyword named properties for common cell attributes.
"""
super(CFNCell, self).__init__(name=name, dtype=dtype, **kwargs)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self.units = units
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
@property
def state_size(self):
return self.units
@property
def output_size(self):
return self.units
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% str(inputs_shape))
input_size = inputs_shape[-1]
# pylint: disable=protected-access
# `self.kernel` contains V_{\theta}, V_{\eta}, W.
# `self.recurrent_kernel` contains U_{\theta}, U_{\eta}.
# `self.bias` contains b_{\theta}, b_{\eta}.
self.kernel = self.add_weight(
shape=[input_size, 3 * self.units],
name=rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
initializer=self.kernel_initializer)
self.recurrent_kernel = self.add_weight(
shape=[self.units, 2 * self.units],
name="recurrent_%s" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
initializer=self.kernel_initializer)
self.bias = self.add_weight(
shape=[2 * self.units],
name=rnn_cell_impl._BIAS_VARIABLE_NAME,
initializer=self.bias_initializer)
# pylint: enable=protected-access
self.built = True
def call(self, inputs, state):
"""Run one step of CFN.
Args:
inputs: input Tensor, must be 2-D, `[batch, input_size]`.
state: state Tensor, must be 2-D, `[batch, state_size]`.
Returns:
A tuple containing:
- Output: A `2-D` tensor with shape `[batch_size, state_size]`.
- New state: A `2-D` tensor with shape `[batch_size, state_size]`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
input_size = inputs.get_shape()[-1]
if tensor_shape.dimension_value(input_size) is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
# The variable names u, v, w, b are consistent with the notations in the
# original paper.
v, w = array_ops.split(
value=self.kernel,
num_or_size_splits=[2 * self.units, self.units],
axis=1)
u = self.recurrent_kernel
b = self.bias
gates = math_ops.matmul(state, u) + math_ops.matmul(inputs, v)
gates = nn_ops.bias_add(gates, b)
gates = math_ops.sigmoid(gates)
theta, eta = array_ops.split(value=gates,
num_or_size_splits=2,
axis=1)
proj_input = math_ops.matmul(inputs, w)
# The input gate is (1 - eta), which is different from the original paper.
# This is for the propose of initialization. With the default
# bias_initializer `ones`, the input gate is initialized to a small number.
new_h = theta * self.activation(state) + (1 - eta) * self.activation(
proj_input)
return new_h, new_h
|
tensorflow-master
|
tensorflow/contrib/rnn/python/ops/rnn_cell.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import variable_scope as vs
def stack_bidirectional_rnn(cells_fw,
cells_bw,
inputs,
initial_states_fw=None,
initial_states_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Stacks several bidirectional rnn layers. The combined forward and backward
layer outputs are used as input of the next layer. tf.bidirectional_rnn
does not allow to share forward and backward information between layers.
The input_size of the first forward and backward cells must match.
The initial state for both directions is zero and no intermediate states
are returned.
As described in https://arxiv.org/abs/1303.5778
Args:
cells_fw: List of instances of RNNCell, one per layer,
to be used for forward direction.
cells_bw: List of instances of RNNCell, one per layer,
to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, input_size], or a nested tuple of such elements.
initial_states_fw: (optional) A list of the initial states (one per layer)
for the forward RNN.
Each tensor must has an appropriate type and shape
`[batch_size, cell_fw.state_size]`.
initial_states_bw: (optional) Same as for `initial_states_fw`, but using
the corresponding properties of `cells_bw`.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to None.
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length `T` list of outputs (one for each input), which
are depth-concatenated forward and backward outputs.
output_states_fw is the final states, one tensor per layer,
of the forward rnn.
output_states_bw is the final states, one tensor per layer,
of the backward rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
ValueError: If inputs is None, not a list or an empty list.
"""
if not cells_fw:
raise ValueError("Must specify at least one fw cell for BidirectionalRNN.")
if not cells_bw:
raise ValueError("Must specify at least one bw cell for BidirectionalRNN.")
if not isinstance(cells_fw, list):
raise ValueError("cells_fw must be a list of RNNCells (one per layer).")
if not isinstance(cells_bw, list):
raise ValueError("cells_bw must be a list of RNNCells (one per layer).")
if len(cells_fw) != len(cells_bw):
raise ValueError("Forward and Backward cells must have the same depth.")
if (initial_states_fw is not None and
(not isinstance(initial_states_fw, list) or
len(initial_states_fw) != len(cells_fw))):
raise ValueError(
"initial_states_fw must be a list of state tensors (one per layer).")
if (initial_states_bw is not None and
(not isinstance(initial_states_bw, list) or
len(initial_states_bw) != len(cells_bw))):
raise ValueError(
"initial_states_bw must be a list of state tensors (one per layer).")
states_fw = []
states_bw = []
prev_layer = inputs
with vs.variable_scope(scope or "stack_bidirectional_rnn"):
for i, (cell_fw, cell_bw) in enumerate(zip(cells_fw, cells_bw)):
initial_state_fw = None
initial_state_bw = None
if initial_states_fw:
initial_state_fw = initial_states_fw[i]
if initial_states_bw:
initial_state_bw = initial_states_bw[i]
with vs.variable_scope("cell_%d" % i) as cell_scope:
prev_layer, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell_fw,
cell_bw,
prev_layer,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
sequence_length=sequence_length,
dtype=dtype,
scope=cell_scope)
states_fw.append(state_fw)
states_bw.append(state_bw)
return prev_layer, tuple(states_fw), tuple(states_bw)
def stack_bidirectional_dynamic_rnn(cells_fw,
cells_bw,
inputs,
initial_states_fw=None,
initial_states_bw=None,
dtype=None,
sequence_length=None,
parallel_iterations=None,
time_major=False,
scope=None,
swap_memory=False):
"""Creates a dynamic bidirectional recurrent neural network.
Stacks several bidirectional rnn layers. The combined forward and backward
layer outputs are used as input of the next layer. tf.bidirectional_rnn
does not allow to share forward and backward information between layers.
The input_size of the first forward and backward cells must match.
The initial state for both directions is zero and no intermediate states
are returned.
Args:
cells_fw: List of instances of RNNCell, one per layer,
to be used for forward direction.
cells_bw: List of instances of RNNCell, one per layer,
to be used for backward direction.
inputs: The RNN inputs. this must be a tensor of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such elements.
initial_states_fw: (optional) A list of the initial states (one per layer)
for the forward RNN.
Each tensor must has an appropriate type and shape
`[batch_size, cell_fw.state_size]`.
initial_states_bw: (optional) Same as for `initial_states_fw`, but using
the corresponding properties of `cells_bw`.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
time_major: The shape format of the inputs and outputs Tensors. If true,
these Tensors must be shaped [max_time, batch_size, depth]. If false,
these Tensors must be shaped [batch_size, max_time, depth]. Using
time_major = True is a bit more efficient because it avoids transposes at
the beginning and end of the RNN calculation. However, most TensorFlow
data is batch-major, so by default this function accepts input and emits
output in batch-major form.
scope: VariableScope for the created subgraph; defaults to None.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs: Output `Tensor` shaped:
`[batch_size, max_time, layers_output]`. Where layers_output
are depth-concatenated forward and backward outputs.
output_states_fw is the final states, one tensor per layer,
of the forward rnn.
output_states_bw is the final states, one tensor per layer,
of the backward rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
ValueError: If inputs is `None`.
"""
if not cells_fw:
raise ValueError("Must specify at least one fw cell for BidirectionalRNN.")
if not cells_bw:
raise ValueError("Must specify at least one bw cell for BidirectionalRNN.")
if not isinstance(cells_fw, list):
raise ValueError("cells_fw must be a list of RNNCells (one per layer).")
if not isinstance(cells_bw, list):
raise ValueError("cells_bw must be a list of RNNCells (one per layer).")
if len(cells_fw) != len(cells_bw):
raise ValueError("Forward and Backward cells must have the same depth.")
if (initial_states_fw is not None and
(not isinstance(initial_states_fw, list) or
len(initial_states_fw) != len(cells_fw))):
raise ValueError(
"initial_states_fw must be a list of state tensors (one per layer).")
if (initial_states_bw is not None and
(not isinstance(initial_states_bw, list) or
len(initial_states_bw) != len(cells_bw))):
raise ValueError(
"initial_states_bw must be a list of state tensors (one per layer).")
states_fw = []
states_bw = []
prev_layer = inputs
with vs.variable_scope(scope or "stack_bidirectional_rnn"):
for i, (cell_fw, cell_bw) in enumerate(zip(cells_fw, cells_bw)):
initial_state_fw = None
initial_state_bw = None
if initial_states_fw:
initial_state_fw = initial_states_fw[i]
if initial_states_bw:
initial_state_bw = initial_states_bw[i]
with vs.variable_scope("cell_%d" % i):
outputs, (state_fw, state_bw) = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
prev_layer,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
sequence_length=sequence_length,
parallel_iterations=parallel_iterations,
dtype=dtype,
swap_memory=swap_memory,
time_major=time_major)
# Concat the outputs to create the new input.
prev_layer = array_ops.concat(outputs, 2)
states_fw.append(state_fw)
states_bw.append(state_bw)
return prev_layer, tuple(states_fw), tuple(states_bw)
|
tensorflow-master
|
tensorflow/contrib/rnn/python/ops/rnn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing fused RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import rnn
@six.add_metaclass(abc.ABCMeta)
class FusedRNNCell(object):
"""Abstract object representing a fused RNN cell.
A fused RNN cell represents the entire RNN expanded over the time
dimension. In effect, this represents an entire recurrent network.
Unlike RNN cells which are subclasses of `rnn_cell.RNNCell`, a `FusedRNNCell`
operates on the entire time sequence at once, by putting the loop over time
inside the cell. This usually leads to much more efficient, but more complex
and less flexible implementations.
Every `FusedRNNCell` must implement `__call__` with the following signature.
"""
@abc.abstractmethod
def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Run this fused RNN on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len x batch_size x input_size]`
or a list of `time_len` tensors of shape `[batch_size x input_size]`.
initial_state: either a tensor with shape `[batch_size x state_size]`
or a tuple with shapes `[batch_size x s] for s in state_size`, if the
cell takes tuples. If this is not provided, the cell is expected to
create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)`.
Defaults to `time_len` for each element.
scope: `VariableScope` or `string` for the created subgraph; defaults to
class name.
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len x batch_size x output_size]`
or a list of `time_len` tensors of shape `[batch_size x output_size]`,
to match the type of the `inputs`.
- Final state: Either a single `2-D` tensor, or a tuple of tensors
matching the arity and shapes of `initial_state`.
"""
pass
class FusedRNNCellAdaptor(FusedRNNCell):
"""This is an adaptor for RNNCell classes to be used with `FusedRNNCell`."""
def __init__(self, cell, use_dynamic_rnn=False):
"""Initialize the adaptor.
Args:
cell: an instance of a subclass of a `rnn_cell.RNNCell`.
use_dynamic_rnn: whether to use dynamic (or static) RNN.
"""
self._cell = cell
self._use_dynamic_rnn = use_dynamic_rnn
def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
is_list = isinstance(inputs, list)
if self._use_dynamic_rnn:
if is_list:
inputs = array_ops.stack(inputs)
outputs, state = rnn.dynamic_rnn(
self._cell,
inputs,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=dtype,
time_major=True,
scope=scope)
if is_list:
# Convert outputs back to list
outputs = array_ops.unstack(outputs)
else: # non-dynamic rnn
if not is_list:
inputs = array_ops.unstack(inputs)
outputs, state = rnn.static_rnn(
self._cell,
inputs,
initial_state=initial_state,
dtype=dtype,
sequence_length=sequence_length,
scope=scope)
if not is_list:
# Convert outputs back to tensor
outputs = array_ops.stack(outputs)
return outputs, state
class TimeReversedFusedRNN(FusedRNNCell):
"""This is an adaptor to time-reverse a FusedRNNCell.
For example,
```python
cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(10)
fw_lstm = tf.contrib.rnn.FusedRNNCellAdaptor(cell, use_dynamic_rnn=True)
bw_lstm = tf.contrib.rnn.TimeReversedFusedRNN(fw_lstm)
fw_out, fw_state = fw_lstm(inputs)
bw_out, bw_state = bw_lstm(inputs)
```
"""
def __init__(self, cell):
self._cell = cell
def _reverse(self, t, lengths):
"""Time reverse the provided tensor or list of tensors.
Assumes the top dimension is the time dimension.
Args:
t: 3D tensor or list of 2D tensors to be reversed
lengths: 1D tensor of lengths, or `None`
Returns:
A reversed tensor or list of tensors
"""
if isinstance(t, list):
return list(reversed(t))
else:
if lengths is None:
return array_ops.reverse_v2(t, [0])
else:
return array_ops.reverse_sequence(t, lengths, 0, 1)
def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
inputs = self._reverse(inputs, sequence_length)
outputs, state = self._cell(
inputs,
initial_state=initial_state,
dtype=dtype,
sequence_length=sequence_length,
scope=scope)
outputs = self._reverse(outputs, sequence_length)
return outputs, state
|
tensorflow-master
|
tensorflow/contrib/rnn/python/ops/fused_rnn_cell.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Summary API v2.
The operations in this package are safe to use with eager execution turned on or
off. It has a more flexible API that allows summaries to be written directly
from ops to places other than event log files, rather than propagating protos
from `tf.summary.merge_all` to `tf.summary.FileWriter`.
To use with eager execution enabled, write your code as follows:
```python
global_step = tf.train.get_or_create_global_step()
summary_writer = tf.contrib.summary.create_file_writer(
train_dir, flush_millis=10000)
with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
# model code goes here
# and in it call
tf.contrib.summary.scalar("loss", my_loss)
# In this case every call to tf.contrib.summary.scalar will generate a record
# ...
```
To use it with graph execution, write your code as follows:
```python
global_step = tf.train.get_or_create_global_step()
summary_writer = tf.contrib.summary.create_file_writer(
train_dir, flush_millis=10000)
with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
# model definition code goes here
# and in it call
tf.contrib.summary.scalar("loss", my_loss)
# In this case every call to tf.contrib.summary.scalar will generate an op,
# note the need to run tf.contrib.summary.all_summary_ops() to make sure these
# ops get executed.
# ...
train_op = ....
with tf.Session(...) as sess:
tf.global_variables_initializer().run()
tf.contrib.summary.initialize(graph=tf.get_default_graph())
# ...
while not_done_training:
sess.run([train_op, tf.contrib.summary.all_summary_ops()])
# ...
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops.summary_ops_v2 import all_v2_summary_ops as all_summary_ops
from tensorflow.python.ops.summary_ops_v2 import always_record_summaries
from tensorflow.python.ops.summary_ops_v2 import audio
from tensorflow.python.ops.summary_ops_v2 import create_db_writer
from tensorflow.python.ops.summary_ops_v2 import create_file_writer
from tensorflow.python.ops.summary_ops_v2 import create_summary_file_writer
from tensorflow.python.ops.summary_ops_v2 import eval_dir
from tensorflow.python.ops.summary_ops_v2 import flush
from tensorflow.python.ops.summary_ops_v2 import generic
from tensorflow.python.ops.summary_ops_v2 import graph
from tensorflow.python.ops.summary_ops_v2 import histogram
from tensorflow.python.ops.summary_ops_v2 import image
from tensorflow.python.ops.summary_ops_v2 import import_event
from tensorflow.python.ops.summary_ops_v2 import initialize
from tensorflow.python.ops.summary_ops_v2 import never_record_summaries
from tensorflow.python.ops.summary_ops_v2 import record_summaries_every_n_global_steps
from tensorflow.python.ops.summary_ops_v2 import scalar
from tensorflow.python.ops.summary_ops_v2 import should_record_summaries
from tensorflow.python.ops.summary_ops_v2 import summary_writer_initializer_op
from tensorflow.python.ops.summary_ops_v2 import SummaryWriter
|
tensorflow-master
|
tensorflow/contrib/summary/summary.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import time
import unittest
import six
from tensorflow.contrib.summary import summary as summary_ops
from tensorflow.contrib.summary import summary_test_util
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
get_all = summary_test_util.get_all
class GraphFileTest(test_util.TensorFlowTestCase):
def testSummaryOps(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.generic('tensor', 1, step=1)
summary_ops.scalar('scalar', 2.0, step=1)
summary_ops.histogram('histogram', [1.0], step=1)
summary_ops.image('image', [[[[1.0]]]], step=1)
summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
sess.run(summary_ops.all_summary_ops())
# The working condition of the ops is tested in the C++ test so we just
# test here that we're calling them correctly.
self.assertTrue(gfile.Exists(logdir))
def testSummaryName(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual('scalar', events[1].summary.value[0].tag)
def testSummaryNameScope(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
with ops.name_scope('scope'):
summary_ops.scalar('scalar', 2.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual('scope/scalar', events[1].summary.value[0].tag)
def testSummaryGlobalStep(self):
training_util.get_or_create_global_step()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(summary_ops.summary_writer_initializer_op())
step, _ = sess.run(
[training_util.get_global_step(), summary_ops.all_summary_ops()])
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(step, events[1].step)
def testMaxQueue(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(
logdir, max_queue=1, flush_millis=999999)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
# Note: First tf.compat.v1.Event is always file_version.
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
# Should flush after second summary since max_queue = 1
sess.run(summary_ops.all_summary_ops())
self.assertEqual(3, get_total())
def testFlushFunction(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(
logdir, max_queue=999999, flush_millis=999999)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
flush_op = summary_ops.flush()
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
# Note: First tf.compat.v1.Event is always file_version.
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
sess.run(flush_op)
self.assertEqual(2, get_total())
# Test "writer" parameter
sess.run(summary_ops.all_summary_ops())
sess.run(summary_ops.flush(writer=writer))
self.assertEqual(3, get_total())
sess.run(summary_ops.all_summary_ops())
sess.run(summary_ops.flush(writer=writer._resource)) # pylint:disable=protected-access
self.assertEqual(4, get_total())
def testSharedName(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
# Create with default shared name (should match logdir)
writer1 = summary_ops.create_file_writer(logdir)
with writer1.as_default():
summary_ops.scalar('one', 1.0, step=1)
# Create with explicit logdir shared name (should be same resource/file)
shared_name = 'logdir:' + logdir
writer2 = summary_ops.create_file_writer(logdir, name=shared_name)
with writer2.as_default():
summary_ops.scalar('two', 2.0, step=2)
# Create with different shared name (should be separate resource/file)
writer3 = summary_ops.create_file_writer(logdir, name='other')
with writer3.as_default():
summary_ops.scalar('three', 3.0, step=3)
with self.cached_session() as sess:
# Run init ops across writers sequentially to avoid race condition.
# TODO(nickfelt): fix race condition in resource manager lookup or create
sess.run(writer1.init())
sess.run(writer2.init())
time.sleep(1.1) # Ensure filename has a different timestamp
sess.run(writer3.init())
sess.run(summary_ops.all_summary_ops())
sess.run([writer1.flush(), writer2.flush(), writer3.flush()])
event_files = iter(sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))))
# First file has tags "one" and "two"
events = summary_test_util.events_from_file(next(event_files))
self.assertEqual('brain.Event:2', events[0].file_version)
tags = [e.summary.value[0].tag for e in events[1:]]
self.assertItemsEqual(['one', 'two'], tags)
# Second file has tag "three"
events = summary_test_util.events_from_file(next(event_files))
self.assertEqual('brain.Event:2', events[0].file_version)
tags = [e.summary.value[0].tag for e in events[1:]]
self.assertItemsEqual(['three'], tags)
# No more files
self.assertRaises(StopIteration, lambda: next(event_files))
def testWriterInitAndClose(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
with writer.as_default():
summary_ops.scalar('one', 1.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
# Running init() again while writer is open has no effect
sess.run(writer.init())
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
# Running close() should do an implicit flush
sess.run(writer.close())
self.assertEqual(2, get_total())
# Running init() on a closed writer should start a new file
time.sleep(1.1) # Ensure filename has a different timestamp
sess.run(writer.init())
sess.run(summary_ops.all_summary_ops())
sess.run(writer.close())
files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
self.assertEqual(2, len(files))
self.assertEqual(2, len(summary_test_util.events_from_file(files[1])))
def testWriterFlush(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
with writer.as_default():
summary_ops.scalar('one', 1.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
sess.run(writer.flush())
self.assertEqual(2, get_total())
class GraphDbTest(summary_test_util.SummaryDbTest):
def testGraphPassedToGraph_isForbiddenForThineOwnSafety(self):
with self.assertRaises(TypeError):
summary_ops.graph(ops.Graph())
with self.assertRaises(TypeError):
summary_ops.graph('')
# TODO(b/133791853) Re-enable these tests.
@unittest.skip('Skipping because of b/133791853.')
def testGraphSummary(self):
training_util.get_or_create_global_step()
name = 'hi'
graph = graph_pb2.GraphDef(node=(node_def_pb2.NodeDef(name=name),))
with self.cached_session():
with self.create_db_writer().as_default():
summary_ops.initialize(graph=graph)
six.assertCountEqual(self, [name],
get_all(self.db, 'SELECT node_name FROM Nodes'))
def testScalarSummary(self):
"""Test record_summaries_every_n_global_steps and all_summaries()."""
with ops.Graph().as_default(), self.cached_session() as sess:
global_step = training_util.get_or_create_global_step()
global_step.initializer.run()
with ops.device('/cpu:0'):
step_increment = state_ops.assign_add(global_step, 1)
sess.run(step_increment) # Increment global step from 0 to 1
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(logdir, max_queue=0,
name='t2').as_default():
with summary_ops.record_summaries_every_n_global_steps(2):
summary_ops.initialize()
summary_op = summary_ops.scalar('my_scalar', 2.0)
# Neither of these should produce a summary because
# global_step is 1 and "1 % 2 != 0"
sess.run(summary_ops.all_summary_ops())
sess.run(summary_op)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 1)
# Increment global step from 1 to 2 and check that the summary
# is now written
sess.run(step_increment)
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'my_scalar')
def testScalarSummaryNameScope(self):
"""Test record_summaries_every_n_global_steps and all_summaries()."""
with ops.Graph().as_default(), self.cached_session() as sess:
global_step = training_util.get_or_create_global_step()
global_step.initializer.run()
with ops.device('/cpu:0'):
step_increment = state_ops.assign_add(global_step, 1)
sess.run(step_increment) # Increment global step from 0 to 1
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(logdir, max_queue=0,
name='t2').as_default():
with summary_ops.record_summaries_every_n_global_steps(2):
summary_ops.initialize()
with ops.name_scope('scope'):
summary_op = summary_ops.scalar('my_scalar', 2.0)
# Neither of these should produce a summary because
# global_step is 1 and "1 % 2 != 0"
sess.run(summary_ops.all_summary_ops())
sess.run(summary_op)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 1)
# Increment global step from 1 to 2 and check that the summary
# is now written
sess.run(step_increment)
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'scope/my_scalar')
def testSummaryGraphModeCond(self):
with ops.Graph().as_default(), self.cached_session():
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.initialize()
training_util.get_or_create_global_step().initializer.run()
def f():
summary_ops.scalar('scalar', 2.0)
return constant_op.constant(True)
pred = array_ops.placeholder(dtypes.bool)
x = control_flow_ops.cond(pred, f,
lambda: constant_op.constant(False))
x.eval(feed_dict={pred: True})
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'cond/scalar')
def testSummaryGraphModeWhile(self):
with ops.Graph().as_default(), self.cached_session():
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.initialize()
training_util.get_or_create_global_step().initializer.run()
def body(unused_pred):
summary_ops.scalar('scalar', 2.0)
return constant_op.constant(False)
def cond(pred):
return pred
pred = array_ops.placeholder(dtypes.bool)
x = control_flow_ops.while_loop(cond, body, [pred])
x.eval(feed_dict={pred: True})
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'while/scalar')
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/summary/summary_ops_graph_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to test summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import sqlite3
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import summary_ops_v2 as summary_ops
from tensorflow.python.platform import gfile
class SummaryDbTest(test_util.TensorFlowTestCase):
"""Helper for summary database testing."""
def setUp(self):
super(SummaryDbTest, self).setUp()
self.db_path = os.path.join(self.get_temp_dir(), 'DbTest.sqlite')
if os.path.exists(self.db_path):
os.unlink(self.db_path)
self.db = sqlite3.connect(self.db_path)
self.create_db_writer = functools.partial(
summary_ops.create_db_writer,
db_uri=self.db_path,
experiment_name='experiment',
run_name='run',
user_name='user')
def tearDown(self):
self.db.close()
super(SummaryDbTest, self).tearDown()
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.compat.v1.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.compat.v1.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert gfile.Exists(logdir)
files = gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
def get_one(db, q, *p):
return db.execute(q, p).fetchone()[0]
def get_all(db, q, *p):
return unroll(db.execute(q, p).fetchall())
def unroll(list_of_tuples):
return sum(list_of_tuples, ())
|
tensorflow-master
|
tensorflow/contrib/summary/summary_test_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import time
import unittest
import sqlite3
import numpy as np
import six
from tensorflow.contrib.summary import summary as summary_ops
from tensorflow.contrib.summary import summary_test_util
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import gfile
from tensorflow.python.training import training_util
get_all = summary_test_util.get_all
get_one = summary_test_util.get_one
_NUMPY_NUMERIC_TYPES = {
types_pb2.DT_HALF: np.float16,
types_pb2.DT_FLOAT: np.float32,
types_pb2.DT_DOUBLE: np.float64,
types_pb2.DT_INT8: np.int8,
types_pb2.DT_INT16: np.int16,
types_pb2.DT_INT32: np.int32,
types_pb2.DT_INT64: np.int64,
types_pb2.DT_UINT8: np.uint8,
types_pb2.DT_UINT16: np.uint16,
types_pb2.DT_UINT32: np.uint32,
types_pb2.DT_UINT64: np.uint64,
types_pb2.DT_COMPLEX64: np.complex64,
types_pb2.DT_COMPLEX128: np.complex128,
types_pb2.DT_BOOL: np.bool_,
}
class EagerFileTest(test_util.TensorFlowTestCase):
def testShouldRecordSummary(self):
self.assertFalse(summary_ops.should_record_summaries())
with summary_ops.always_record_summaries():
self.assertTrue(summary_ops.should_record_summaries())
def testSummaryOps(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t0').as_default(), summary_ops.always_record_summaries():
summary_ops.generic('tensor', 1, '')
summary_ops.scalar('scalar', 2.0)
summary_ops.histogram('histogram', [1.0])
summary_ops.image('image', [[[[1.0]]]])
summary_ops.audio('audio', [[1.0]], 1.0, 1)
# The working condition of the ops is tested in the C++ test so we just
# test here that we're calling them correctly.
self.assertTrue(gfile.Exists(logdir))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testEagerMemory(self):
training_util.get_or_create_global_step()
logdir = self.get_temp_dir()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t0').as_default(), summary_ops.always_record_summaries():
summary_ops.generic('tensor', 1, '')
summary_ops.scalar('scalar', 2.0)
summary_ops.histogram('histogram', [1.0])
summary_ops.image('image', [[[[1.0]]]])
summary_ops.audio('audio', [[1.0]], 1.0, 1)
def testDefunSummarys(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t1').as_default(), summary_ops.always_record_summaries():
@function.defun
def write():
summary_ops.scalar('scalar', 2.0)
write()
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].simple_value, 2.0)
def testSummaryName(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'scalar')
def testSummaryNameScope(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
with ops.name_scope('scope'):
summary_ops.scalar('scalar', 2.0)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'scope/scalar')
def testSummaryGlobalStep(self):
step = training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=step)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'scalar')
def testRecordEveryNGlobalSteps(self):
step = training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
def run_step():
summary_ops.scalar('scalar', i, step=step)
step.assign_add(1)
with summary_ops.create_file_writer(
logdir).as_default(), summary_ops.record_summaries_every_n_global_steps(
2, step):
for i in range(10):
run_step()
# And another 10 steps as a graph function.
run_step_fn = function.defun(run_step)
for i in range(10):
run_step_fn()
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 11)
def testMaxQueue(self):
logs = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logs, max_queue=1, flush_millis=999999,
name='lol').as_default(), summary_ops.always_record_summaries():
get_total = lambda: len(summary_test_util.events_from_logdir(logs))
# Note: First tf.compat.v1.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.scalar('scalar', 2.0, step=1)
self.assertEqual(1, get_total())
# Should flush after second summary since max_queue = 1
summary_ops.scalar('scalar', 2.0, step=2)
self.assertEqual(3, get_total())
def testFlushFunction(self):
logs = tempfile.mkdtemp()
writer = summary_ops.create_file_writer(
logs, max_queue=999999, flush_millis=999999, name='lol')
with writer.as_default(), summary_ops.always_record_summaries():
get_total = lambda: len(summary_test_util.events_from_logdir(logs))
# Note: First tf.compat.v1.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.scalar('scalar', 2.0, step=1)
summary_ops.scalar('scalar', 2.0, step=2)
self.assertEqual(1, get_total())
summary_ops.flush()
self.assertEqual(3, get_total())
# Test "writer" parameter
summary_ops.scalar('scalar', 2.0, step=3)
summary_ops.flush(writer=writer)
self.assertEqual(4, get_total())
summary_ops.scalar('scalar', 2.0, step=4)
summary_ops.flush(writer=writer._resource) # pylint:disable=protected-access
self.assertEqual(5, get_total())
def testSharedName(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
# Create with default shared name (should match logdir)
writer1 = summary_ops.create_file_writer(logdir)
with writer1.as_default():
summary_ops.scalar('one', 1.0, step=1)
summary_ops.flush()
# Create with explicit logdir shared name (should be same resource/file)
shared_name = 'logdir:' + logdir
writer2 = summary_ops.create_file_writer(logdir, name=shared_name)
with writer2.as_default():
summary_ops.scalar('two', 2.0, step=2)
summary_ops.flush()
# Create with different shared name (should be separate resource/file)
time.sleep(1.1) # Ensure filename has a different timestamp
writer3 = summary_ops.create_file_writer(logdir, name='other')
with writer3.as_default():
summary_ops.scalar('three', 3.0, step=3)
summary_ops.flush()
event_files = iter(sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))))
# First file has tags "one" and "two"
events = iter(summary_test_util.events_from_file(next(event_files)))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual('one', next(events).summary.value[0].tag)
self.assertEqual('two', next(events).summary.value[0].tag)
self.assertRaises(StopIteration, lambda: next(events))
# Second file has tag "three"
events = iter(summary_test_util.events_from_file(next(event_files)))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual('three', next(events).summary.value[0].tag)
self.assertRaises(StopIteration, lambda: next(events))
# No more files
self.assertRaises(StopIteration, lambda: next(event_files))
def testWriterInitAndClose(self):
logdir = self.get_temp_dir()
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
self.assertEqual(1, get_total()) # file_version Event
# Calling init() again while writer is open has no effect
writer.init()
self.assertEqual(1, get_total())
try:
# Not using .as_default() to avoid implicit flush when exiting
writer.set_as_default()
summary_ops.scalar('one', 1.0, step=1)
self.assertEqual(1, get_total())
# Calling .close() should do an implicit flush
writer.close()
self.assertEqual(2, get_total())
# Calling init() on a closed writer should start a new file
time.sleep(1.1) # Ensure filename has a different timestamp
writer.init()
files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
self.assertEqual(2, len(files))
get_total = lambda: len(summary_test_util.events_from_file(files[1]))
self.assertEqual(1, get_total()) # file_version Event
summary_ops.scalar('two', 2.0, step=2)
writer.close()
self.assertEqual(2, get_total())
finally:
# Clean up by resetting default writer
summary_ops.create_file_writer(None).set_as_default()
def testWriterFlush(self):
logdir = self.get_temp_dir()
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
self.assertEqual(1, get_total()) # file_version Event
with writer.as_default():
summary_ops.scalar('one', 1.0, step=1)
self.assertEqual(1, get_total())
writer.flush()
self.assertEqual(2, get_total())
summary_ops.scalar('two', 2.0, step=2)
# Exiting the "as_default()" should do an implicit flush of the "two" tag
self.assertEqual(3, get_total())
class EagerDbTest(summary_test_util.SummaryDbTest):
# TODO(b/133791853) Re-enable these tests.
@unittest.skip('Skipping because of b/133791853.')
def testDbURIOpen(self):
tmpdb_path = os.path.join(self.get_temp_dir(), 'tmpDbURITest.sqlite')
tmpdb_uri = six.moves.urllib_parse.urljoin('file:', tmpdb_path)
tmpdb_writer = summary_ops.create_db_writer(tmpdb_uri, 'experimentA',
'run1', 'user1')
with summary_ops.always_record_summaries():
with tmpdb_writer.as_default():
summary_ops.scalar('t1', 2.0)
tmpdb = sqlite3.connect(tmpdb_path)
num = get_one(tmpdb, 'SELECT count(*) FROM Tags WHERE tag_name = "t1"')
self.assertEqual(num, 1)
tmpdb.close()
# TODO(b/133791853) Re-enable these tests.
@unittest.skip('Skipping because of b/133791853.')
def testIntegerSummaries(self):
step = training_util.create_global_step()
writer = self.create_db_writer()
def adder(x, y):
state_ops.assign_add(step, 1)
summary_ops.generic('x', x)
summary_ops.generic('y', y)
sum_ = x + y
summary_ops.generic('sum', sum_)
return sum_
with summary_ops.always_record_summaries():
with writer.as_default():
self.assertEqual(5, adder(int64(2), int64(3)).numpy())
six.assertCountEqual(
self, [1, 1, 1],
get_all(self.db, 'SELECT step FROM Tensors WHERE dtype IS NOT NULL'))
six.assertCountEqual(self, ['x', 'y', 'sum'],
get_all(self.db, 'SELECT tag_name FROM Tags'))
x_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "x"')
y_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "y"')
sum_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "sum"')
with summary_ops.always_record_summaries():
with writer.as_default():
self.assertEqual(9, adder(int64(4), int64(5)).numpy())
six.assertCountEqual(
self, [1, 1, 1, 2, 2, 2],
get_all(self.db, 'SELECT step FROM Tensors WHERE dtype IS NOT NULL'))
six.assertCountEqual(self, [x_id, y_id, sum_id],
get_all(self.db, 'SELECT tag_id FROM Tags'))
self.assertEqual(2, get_tensor(self.db, x_id, 1))
self.assertEqual(3, get_tensor(self.db, y_id, 1))
self.assertEqual(5, get_tensor(self.db, sum_id, 1))
self.assertEqual(4, get_tensor(self.db, x_id, 2))
self.assertEqual(5, get_tensor(self.db, y_id, 2))
self.assertEqual(9, get_tensor(self.db, sum_id, 2))
six.assertCountEqual(
self, ['experiment'],
get_all(self.db, 'SELECT experiment_name FROM Experiments'))
six.assertCountEqual(self, ['run'],
get_all(self.db, 'SELECT run_name FROM Runs'))
six.assertCountEqual(self, ['user'],
get_all(self.db, 'SELECT user_name FROM Users'))
def testBadExperimentName(self):
with self.assertRaises(ValueError):
self.create_db_writer(experiment_name='\0')
def testBadRunName(self):
with self.assertRaises(ValueError):
self.create_db_writer(run_name='\0')
def testBadUserName(self):
with self.assertRaises(ValueError):
self.create_db_writer(user_name='-hi')
with self.assertRaises(ValueError):
self.create_db_writer(user_name='hi-')
with self.assertRaises(ValueError):
self.create_db_writer(user_name='@')
# TODO(b/133791853) Re-enable these tests.
@unittest.skip('Skipping because of b/133791853.')
def testGraphSummary(self):
training_util.get_or_create_global_step()
name = 'hi'
graph = graph_pb2.GraphDef(node=(node_def_pb2.NodeDef(name=name),))
with summary_ops.always_record_summaries():
with self.create_db_writer().as_default():
summary_ops.graph(graph)
six.assertCountEqual(self, [name],
get_all(self.db, 'SELECT node_name FROM Nodes'))
def get_tensor(db, tag_id, step):
cursor = db.execute(
'SELECT dtype, shape, data FROM Tensors WHERE series = ? AND step = ?',
(tag_id, step))
dtype, shape, data = cursor.fetchone()
assert dtype in _NUMPY_NUMERIC_TYPES
buf = np.frombuffer(data, dtype=_NUMPY_NUMERIC_TYPES[dtype])
if not shape:
return buf[0]
return buf.reshape([int(i) for i in shape.split(',')])
def int64(x):
return array_ops.constant(x, dtypes.int64)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/summary/summary_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests to ensure ClusterResolvers are usable via the old contrib path."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cluster_resolver import SimpleClusterResolver
from tensorflow.contrib.cluster_resolver.python.training import cluster_resolver
from tensorflow.contrib.cluster_resolver.python.training import UnionClusterResolver
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class ClusterResolverInitializationTest(test.TestCase):
def testCreateSimpleClusterResolverFromLib(self):
base_cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
cluster_resolver.SimpleClusterResolver(base_cluster_spec)
def testCreateSimpleClusterResolver(self):
base_cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
SimpleClusterResolver(base_cluster_spec)
def testCreateUnionClusterResolver(self):
base_cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
simple_cr = SimpleClusterResolver(base_cluster_spec)
UnionClusterResolver(simple_cr)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/cluster_resolver/cluster_resolver_initialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Standard imports for Cluster Resolvers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import UnionClusterResolver
from tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver import GCEClusterResolver
from tensorflow.python.distribute.cluster_resolver.kubernetes_cluster_resolver import KubernetesClusterResolver
from tensorflow.python.distribute.cluster_resolver.slurm_cluster_resolver import SlurmClusterResolver
from tensorflow.python.distribute.cluster_resolver.tfconfig_cluster_resolver import TFConfigClusterResolver
from tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver import TPUClusterResolver
# pylint: enable=wildcard-import,unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'ClusterResolver',
'SimpleClusterResolver',
'UnionClusterResolver',
'GCEClusterResolver',
'KubernetesClusterResolver',
'TFConfigClusterResolver',
'TPUClusterResolver',
'SlurmClusterResolver',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/cluster_resolver/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file for KubernetesClusterResolver for backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# This file (and all files in this directory in general) is a backwards
# compatibility shim that exists to re-export ClusterResolvers such that
# existing OSS code will not be broken.
# pylint: disable=unused-import
from tensorflow.python.distribute.cluster_resolver.kubernetes_cluster_resolver import KubernetesClusterResolver
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'KubernetesClusterResolver',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/cluster_resolver/python/training/kubernetes_cluster_resolver.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file for SlurmClusterResolver to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# This file (and all files in this directory in general) is a backwards
# compatibility shim that exists to re-export ClusterResolvers such that
# existing OSS code will not be broken.
# pylint: disable=unused-import
from tensorflow.python.distribute.cluster_resolver.slurm_cluster_resolver import SlurmClusterResolver
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'SlurmClusterResolver',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/cluster_resolver/python/training/slurm_cluster_resolver.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file for GCEClusterResolver to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# This file (and all files in this directory in general) is a backwards
# compatibility shim that exists to re-export ClusterResolvers such that
# existing OSS code will not be broken.
# pylint: disable=unused-import
from tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver import GCEClusterResolver
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'GCEClusterResolver',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/cluster_resolver/python/training/gce_cluster_resolver.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library Imports for Cluster Resolvers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# This file (and all files in this directory in general) is a backwards
# compatibility shim that exists to re-export ClusterResolvers such that
# existing OSS code will not be broken.
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import UnionClusterResolver
from tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver import GCEClusterResolver
from tensorflow.python.distribute.cluster_resolver.kubernetes_cluster_resolver import KubernetesClusterResolver
from tensorflow.python.distribute.cluster_resolver.slurm_cluster_resolver import SlurmClusterResolver
from tensorflow.python.distribute.cluster_resolver.tfconfig_cluster_resolver import TFConfigClusterResolver
from tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver import TPUClusterResolver
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'cluster_resolver',
'gce_cluster_resolver',
'kubernetes_cluster_resolver',
'slurm_cluster_resolver',
'tfconfig_cluster_resolver',
'tpu_cluster_resolver',
'ClusterResolver',
'SimpleClusterResolver',
'UnionClusterResolver',
'GCEClusterResolver',
'KubernetesClusterResolver',
'TFConfigClusterResolver',
'TPUClusterResolver',
'SlurmClusterResolver',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/cluster_resolver/python/training/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file for TFConfigClusterResolver to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# This file (and all files in this directory in general) is a backwards
# compatibility shim that exists to re-export ClusterResolvers such that
# existing OSS code will not be broken.
# pylint: disable=unused-import
from tensorflow.python.distribute.cluster_resolver.tfconfig_cluster_resolver import TFConfigClusterResolver
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'TFConfigClusterResolver',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/cluster_resolver/python/training/tfconfig_cluster_resolver.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file for TPUClusterResolver to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# This file (and all files in this directory in general) is a backwards
# compatibility shim that exists to re-export ClusterResolvers such that
# existing OSS code will not be broken.
# pylint: disable=unused-import
from tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver import TPUClusterResolver
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'TPUClusterResolver',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/cluster_resolver/python/training/tpu_cluster_resolver.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file for ClusterResolver to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# This file (and all files in this directory in general) is a backwards
# compatibility shim that exists to re-export ClusterResolvers such that
# existing OSS code will not be broken.
# pylint: disable=unused-import
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import UnionClusterResolver
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'ClusterResolver',
'SimpleClusterResolver',
'UnionClusterResolver',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/cluster_resolver/python/training/cluster_resolver.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and estimators that enable explicit kernel methods in TensorFlow.
@@KernelLinearClassifier
@@RandomFourierFeatureMapper
@@sparse_multiclass_hinge_loss
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.kernel_methods.python.kernel_estimators import KernelLinearClassifier
from tensorflow.contrib.kernel_methods.python.losses import sparse_multiclass_hinge_loss
from tensorflow.contrib.kernel_methods.python.mappers.random_fourier_features import RandomFourierFeatureMapper
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/kernel_methods/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of kernel-methods-related loss operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.losses import losses
def sparse_multiclass_hinge_loss(
labels,
logits,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS):
r"""Adds Ops for computing the multiclass hinge loss.
The implementation is based on the following paper:
On the Algorithmic Implementation of Multiclass Kernel-based Vector Machines
by Crammer and Singer.
link: http://jmlr.csail.mit.edu/papers/volume2/crammer01a/crammer01a.pdf
This is a generalization of standard (binary) hinge loss. For a given instance
with correct label c*, the loss is given by:
$$loss = max_{c != c*} logits_c - logits_{c*} + 1.$$
or equivalently
$$loss = max_c { logits_c - logits_{c*} + I_{c != c*} }$$
where \\(I_{c != c*} = 1\ \text{if}\ c != c*\\) and 0 otherwise.
Args:
labels: `Tensor` of shape [batch_size] or [batch_size, 1]. Corresponds to
the ground truth. Each entry must be an index in `[0, num_classes)`.
logits: `Tensor` of shape [batch_size, num_classes] corresponding to the
unscaled logits. Its dtype should be either `float32` or `float64`.
weights: Optional (python) scalar or `Tensor`. If a non-scalar `Tensor`, its
rank should be either 1 ([batch_size]) or 2 ([batch_size, 1]).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is a scalar.
Raises:
ValueError: If `logits`, `labels` or `weights` have invalid or inconsistent
shapes.
ValueError: If `labels` tensor has invalid dtype.
"""
with ops.name_scope(scope, 'sparse_multiclass_hinge_loss', (logits,
labels)) as scope:
# Check logits Tensor has valid rank.
logits_rank = logits.get_shape().ndims
if logits_rank != 2:
raise ValueError(
'logits should have rank 2 ([batch_size, num_classes]). Given rank is'
' {}'.format(logits_rank))
logits_shape = array_ops.shape(logits)
batch_size, num_classes = logits_shape[0], logits_shape[1]
logits = math_ops.cast(logits, dtypes.float32)
# Check labels have valid type.
if labels.dtype != dtypes.int32 and labels.dtype != dtypes.int64:
raise ValueError(
'Invalid dtype for labels: {}. Acceptable dtypes: int32 and int64'.
format(labels.dtype))
# Check labels and weights have valid ranks and are consistent.
labels_rank = labels.get_shape().ndims
if labels_rank not in [1, 2]:
raise ValueError(
'labels should have rank 1 ([batch_size]) or 2 ([batch_size, 1]). '
'Given rank is {}'.format(labels_rank))
with ops.control_dependencies([
check_ops.assert_less(labels, math_ops.cast(num_classes, labels.dtype))
]):
labels = array_ops.reshape(labels, shape=[-1])
weights = ops.convert_to_tensor(weights)
weights_rank = weights.get_shape().ndims
if weights_rank not in [0, 1, 2]:
raise ValueError(
'non-scalar weights should have rank 1 ([batch_size]) or 2 '
'([batch_size, 1]). Given rank is {}'.format(labels_rank))
if weights_rank > 0:
weights = array_ops.reshape(weights, shape=[-1])
# Check weights and labels have the same number of elements.
weights.get_shape().assert_is_compatible_with(labels.get_shape())
# Compute the logits tensor corresponding to the correct class per instance.
example_indices = array_ops.reshape(
math_ops.range(batch_size), shape=[batch_size, 1])
indices = array_ops.concat(
[
example_indices,
array_ops.reshape(
math_ops.cast(labels, example_indices.dtype),
shape=[batch_size, 1])
],
axis=1)
label_logits = array_ops.reshape(
array_ops.gather_nd(params=logits, indices=indices),
shape=[batch_size, 1])
one_cold_labels = array_ops.one_hot(
indices=labels, depth=num_classes, on_value=0.0, off_value=1.0)
margin = logits - label_logits + one_cold_labels
margin = nn_ops.relu(margin)
loss = math_ops.reduce_max(margin, axis=1)
return losses.compute_weighted_loss(
loss, weights, scope, loss_collection, reduction=reduction)
|
tensorflow-master
|
tensorflow/contrib/kernel_methods/python/losses.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimators that combine explicit kernel mappings with linear models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import layers
from tensorflow.contrib.kernel_methods.python.mappers import dense_kernel_mapper as dkm
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
_FEATURE_COLUMNS = "feature_columns"
_KERNEL_MAPPERS = "kernel_mappers"
_OPTIMIZER = "optimizer"
def _check_valid_kernel_mappers(kernel_mappers):
"""Checks that the input kernel_mappers are valid."""
if kernel_mappers is None:
return True
for kernel_mappers_list in six.itervalues(kernel_mappers):
for kernel_mapper in kernel_mappers_list:
if not isinstance(kernel_mapper, dkm.DenseKernelMapper):
return False
return True
def _check_valid_head(head):
"""Returns true if the provided head is supported."""
if head is None:
return False
# pylint: disable=protected-access
return isinstance(head, head_lib._BinaryLogisticHead) or isinstance(
head, head_lib._MultiClassHead)
# pylint: enable=protected-access
def _update_features_and_columns(features, feature_columns,
kernel_mappers_dict):
"""Updates features and feature_columns based on provided kernel mappers.
Currently supports the update of `RealValuedColumn`s only.
Args:
features: Initial features dict. The key is a `string` (feature column name)
and the value is a tensor.
feature_columns: Initial iterable containing all the feature columns to be
consumed (possibly after being updated) by the model. All items should be
instances of classes derived from `FeatureColumn`.
kernel_mappers_dict: A dict from feature column (type: _FeatureColumn) to
objects inheriting from KernelMapper class.
Returns:
updated features and feature_columns based on provided kernel_mappers_dict.
"""
if kernel_mappers_dict is None:
return features, feature_columns
# First construct new columns and features affected by kernel_mappers_dict.
mapped_features = {}
mapped_columns = set()
for feature_column in kernel_mappers_dict:
column_name = feature_column.name
# Currently only mappings over RealValuedColumns are supported.
if not isinstance(feature_column, layers.feature_column._RealValuedColumn): # pylint: disable=protected-access
logging.warning(
"Updates are currently supported on RealValuedColumns only. Metadata "
"for FeatureColumn {} will not be updated.".format(column_name))
continue
mapped_column_name = column_name + "_MAPPED"
# Construct new feature columns based on provided kernel_mappers.
column_kernel_mappers = kernel_mappers_dict[feature_column]
new_dim = sum(mapper.output_dim for mapper in column_kernel_mappers)
mapped_columns.add(
layers.feature_column.real_valued_column(mapped_column_name, new_dim))
# Get mapped features by concatenating mapped tensors (one mapped tensor
# per kernel mappers from the list of kernel mappers corresponding to each
# feature column).
output_tensors = []
for kernel_mapper in column_kernel_mappers:
output_tensors.append(kernel_mapper.map(features[column_name]))
tensor = array_ops.concat(output_tensors, 1)
mapped_features[mapped_column_name] = tensor
# Finally update features dict and feature_columns.
features = features.copy()
features.update(mapped_features)
feature_columns = set(feature_columns)
feature_columns.update(mapped_columns)
return features, feature_columns
def _kernel_model_fn(features, labels, mode, params, config=None):
"""model_fn for the Estimator using kernel methods.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction. See
`ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use a FTRL optimizer.
* kernel_mappers: Dictionary of kernel mappers to be applied to the input
features before training.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
feature_columns = params[_FEATURE_COLUMNS]
kernel_mappers = params[_KERNEL_MAPPERS]
updated_features, updated_columns = _update_features_and_columns(
features, feature_columns, kernel_mappers)
params[_FEATURE_COLUMNS] = updated_columns
return linear._linear_model_fn( # pylint: disable=protected-access
updated_features, labels, mode, params, config)
class _KernelEstimator(estimator.Estimator):
"""Generic kernel-based linear estimator."""
def __init__(self,
feature_columns=None,
model_dir=None,
weight_column_name=None,
head=None,
optimizer=None,
kernel_mappers=None,
config=None):
"""Constructs a `_KernelEstimator` object."""
if not feature_columns and not kernel_mappers:
raise ValueError(
"You should set at least one of feature_columns, kernel_mappers.")
if not _check_valid_kernel_mappers(kernel_mappers):
raise ValueError("Invalid kernel mappers.")
if not _check_valid_head(head):
raise ValueError(
"head type: {} is not supported. Supported head types: "
"_BinaryLogisticHead, _MultiClassHead.".format(type(head)))
params = {
"head": head,
_FEATURE_COLUMNS: feature_columns or [],
_OPTIMIZER: optimizer,
_KERNEL_MAPPERS: kernel_mappers,
}
super(_KernelEstimator, self).__init__(
model_fn=_kernel_model_fn,
model_dir=model_dir,
config=config,
params=params)
class KernelLinearClassifier(_KernelEstimator):
"""Linear classifier using kernel methods as feature preprocessing.
It trains a linear model after possibly mapping initial input features into
a mapped space using explicit kernel mappings. Due to the kernel mappings,
training a linear classifier in the mapped (output) space can detect
non-linearities in the input space.
The user can provide a list of kernel mappers to be applied to all or a subset
of existing feature_columns. This way, the user can effectively provide 2
types of feature columns:
* those passed as elements of feature_columns in the classifier's constructor
* those appearing as a key of the kernel_mappers dict.
If a column appears in feature_columns only, no mapping is applied to it. If
it appears as a key in kernel_mappers, the corresponding kernel mappers are
applied to it. Note that it is possible that a column appears in both places.
Currently kernel_mappers are supported for _RealValuedColumns only.
Example usage:
```
real_column_a = real_valued_column(name='real_column_a',...)
sparse_column_b = sparse_column_with_hash_bucket(...)
kernel_mappers = {real_column_a : [RandomFourierFeatureMapper(...)]}
optimizer = ...
# real_column_a is used as a feature in both its initial and its transformed
# (mapped) form. sparse_column_b is not affected by kernel mappers.
kernel_classifier = KernelLinearClassifier(
feature_columns=[real_column_a, sparse_column_b],
model_dir=...,
optimizer=optimizer,
kernel_mappers=kernel_mappers)
# real_column_a is used as a feature in its transformed (mapped) form only.
# sparse_column_b is not affected by kernel mappers.
kernel_classifier = KernelLinearClassifier(
feature_columns=[sparse_column_b],
model_dir=...,
optimizer=optimizer,
kernel_mappers=kernel_mappers)
# Input builders
def train_input_fn: # returns x, y
...
def eval_input_fn: # returns x, y
...
kernel_classifier.fit(input_fn=train_input_fn)
kernel_classifier.evaluate(input_fn=eval_input_fn)
kernel_classifier.predict(...)
```
Input of `fit` and `evaluate` should have following features, otherwise there
will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
feature_columns=None,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
kernel_mappers=None,
config=None):
"""Construct a `KernelLinearClassifier` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph etc. This can also be
used to load checkpoints from the directory into an estimator to
continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Note that class labels are integers representing the class index (i.e.
values from 0 to n_classes-1). For arbitrary label values (e.g. string
labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: The optimizer used to train the model. If specified, it should
be an instance of `tf.Optimizer`. If `None`, the Ftrl optimizer is used
by default.
kernel_mappers: Dictionary of kernel mappers to be applied to the input
features before training a (linear) model. Keys are feature columns and
values are lists of mappers to be applied to the corresponding feature
column. Currently only _RealValuedColumns are supported and therefore
all mappers should conform to the `DenseKernelMapper` interface (see
./mappers/dense_kernel_mapper.py).
config: `RunConfig` object to configure the runtime settings.
Returns:
A `KernelLinearClassifier` estimator.
Raises:
ValueError: if n_classes < 2.
ValueError: if neither feature_columns nor kernel_mappers are provided.
ValueError: if mappers provided as kernel_mappers values are invalid.
"""
super(KernelLinearClassifier, self).__init__(
feature_columns=feature_columns,
model_dir=model_dir,
weight_column_name=weight_column_name,
head=head_lib.multi_class_head(
n_classes=n_classes, weight_column_name=weight_column_name),
optimizer=optimizer,
kernel_mappers=kernel_mappers,
config=config)
def predict_classes(self, input_fn=None):
"""Runs inference to determine the predicted class per instance.
Args:
input_fn: The input function providing features.
Returns:
A generator of predicted classes for the features provided by input_fn.
Each predicted class is represented by its class index (i.e. integer from
0 to n_classes-1)
"""
key = prediction_key.PredictionKey.CLASSES
predictions = super(KernelLinearClassifier, self).predict(
input_fn=input_fn, outputs=[key])
return (pred[key] for pred in predictions)
def predict_proba(self, input_fn=None):
"""Runs inference to determine the class probability predictions.
Args:
input_fn: The input function providing features.
Returns:
A generator of predicted class probabilities for the features provided by
input_fn.
"""
key = prediction_key.PredictionKey.PROBABILITIES
predictions = super(KernelLinearClassifier, self).predict(
input_fn=input_fn, outputs=[key])
return (pred[key] for pred in predictions)
|
tensorflow-master
|
tensorflow/contrib/kernel_methods/python/kernel_estimators.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for kernel_estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.kernel_methods.python import kernel_estimators
from tensorflow.contrib.kernel_methods.python.mappers.random_fourier_features import RandomFourierFeatureMapper
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.platform import googletest
def _linearly_separable_binary_input_fn():
"""Returns linearly-separable data points (binary classification)."""
return {
'feature1': constant_op.constant([[0.0], [1.0], [3.0]]),
'feature2': constant_op.constant([[1.0], [-1.2], [1.0]]),
}, constant_op.constant([[1], [0], [1]])
def _linearly_inseparable_binary_input_fn():
"""Returns non-linearly-separable data points (binary classification)."""
return {
'multi_dim_feature':
constant_op.constant([[1.0, 1.0], [1.0, -1.0], [-1.0, -1.0],
[-1.0, 1.0]]),
}, constant_op.constant([[1], [0], [1], [0]])
class KernelLinearClassifierTest(TensorFlowTestCase):
def testNoFeatureColumnsOrKernelMappers(self):
"""Tests that at least one of feature columns or kernels is provided."""
with self.assertRaises(ValueError):
_ = kernel_estimators.KernelLinearClassifier()
def testInvalidKernelMapper(self):
"""ValueError raised when the kernel mappers provided have invalid type."""
class DummyKernelMapper(object):
def __init__(self):
pass
feature = layers.real_valued_column('feature')
kernel_mappers = {feature: [DummyKernelMapper()]}
with self.assertRaises(ValueError):
_ = kernel_estimators.KernelLinearClassifier(
feature_columns=[feature], kernel_mappers=kernel_mappers)
def testInvalidNumberOfClasses(self):
"""ValueError raised when the kernel mappers provided have invalid type."""
feature = layers.real_valued_column('feature')
with self.assertRaises(ValueError):
_ = kernel_estimators.KernelLinearClassifier(
feature_columns=[feature], n_classes=1)
def testLinearlySeparableBinaryDataNoKernels(self):
"""Tests classifier w/o kernels (log. regression) for lin-separable data."""
feature1 = layers.real_valued_column('feature1')
feature2 = layers.real_valued_column('feature2')
logreg_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[feature1, feature2])
logreg_classifier.fit(
input_fn=_linearly_separable_binary_input_fn, steps=100)
metrics = logreg_classifier.evaluate(
input_fn=_linearly_separable_binary_input_fn, steps=1)
# Since the data is linearly separable, the classifier should have small
# loss and perfect accuracy.
self.assertLess(metrics['loss'], 0.1)
self.assertEqual(metrics['accuracy'], 1.0)
# As a result, it should assign higher probability to class 1 for the 1st
# and 3rd example and higher probability to class 0 for the second example.
logreg_prob_predictions = list(
logreg_classifier.predict_proba(input_fn=
_linearly_separable_binary_input_fn))
self.assertGreater(logreg_prob_predictions[0][1], 0.5)
self.assertGreater(logreg_prob_predictions[1][0], 0.5)
self.assertGreater(logreg_prob_predictions[2][1], 0.5)
def testLinearlyInseparableBinaryDataWithAndWithoutKernels(self):
"""Tests classifier w/ and w/o kernels on non-linearly-separable data."""
multi_dim_feature = layers.real_valued_column(
'multi_dim_feature', dimension=2)
# Data points are non-linearly separable so there will be at least one
# mis-classified sample (accuracy < 0.8). In fact, the loss is minimized for
# w1=w2=0.0, in which case each example incurs a loss of ln(2). The overall
# (average) loss should then be ln(2) and the logits should be approximately
# 0.0 for each sample.
logreg_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[multi_dim_feature])
logreg_classifier.fit(
input_fn=_linearly_inseparable_binary_input_fn, steps=50)
logreg_metrics = logreg_classifier.evaluate(
input_fn=_linearly_inseparable_binary_input_fn, steps=1)
logreg_loss = logreg_metrics['loss']
logreg_accuracy = logreg_metrics['accuracy']
logreg_predictions = logreg_classifier.predict(
input_fn=_linearly_inseparable_binary_input_fn, as_iterable=False)
self.assertAlmostEqual(logreg_loss, np.log(2), places=3)
self.assertLess(logreg_accuracy, 0.8)
self.assertAllClose(logreg_predictions['logits'], [[0.0], [0.0], [0.0],
[0.0]])
# Using kernel mappers allows to discover non-linearities in data. Mapping
# the data to a higher dimensional feature space using approx RBF kernels,
# substantially reduces the loss and leads to perfect classification
# accuracy.
kernel_mappers = {
multi_dim_feature: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')]
}
kernelized_logreg_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[], kernel_mappers=kernel_mappers)
kernelized_logreg_classifier.fit(
input_fn=_linearly_inseparable_binary_input_fn, steps=50)
kernelized_logreg_metrics = kernelized_logreg_classifier.evaluate(
input_fn=_linearly_inseparable_binary_input_fn, steps=1)
kernelized_logreg_loss = kernelized_logreg_metrics['loss']
kernelized_logreg_accuracy = kernelized_logreg_metrics['accuracy']
self.assertLess(kernelized_logreg_loss, 0.2)
self.assertEqual(kernelized_logreg_accuracy, 1.0)
def testVariablesWithAndWithoutKernels(self):
"""Tests variables w/ and w/o kernel."""
multi_dim_feature = layers.real_valued_column(
'multi_dim_feature', dimension=2)
linear_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[multi_dim_feature])
linear_classifier.fit(
input_fn=_linearly_inseparable_binary_input_fn, steps=50)
linear_variables = linear_classifier.get_variable_names()
self.assertIn('linear/multi_dim_feature/weight', linear_variables)
self.assertIn('linear/bias_weight', linear_variables)
linear_weights = linear_classifier.get_variable_value(
'linear/multi_dim_feature/weight')
linear_bias = linear_classifier.get_variable_value('linear/bias_weight')
kernel_mappers = {
multi_dim_feature: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')]
}
kernel_linear_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[], kernel_mappers=kernel_mappers)
kernel_linear_classifier.fit(
input_fn=_linearly_inseparable_binary_input_fn, steps=50)
kernel_linear_variables = kernel_linear_classifier.get_variable_names()
self.assertIn('linear/multi_dim_feature_MAPPED/weight',
kernel_linear_variables)
self.assertIn('linear/bias_weight', kernel_linear_variables)
kernel_linear_weights = kernel_linear_classifier.get_variable_value(
'linear/multi_dim_feature_MAPPED/weight')
kernel_linear_bias = kernel_linear_classifier.get_variable_value(
'linear/bias_weight')
# The feature column used for linear classification (no kernels) has
# dimension 2 so the model will learn a 2-dimension weights vector (and a
# scalar for the bias). In the kernelized model, the features are mapped to
# a 30-dimensional feature space and so the weights variable will also have
# dimension 30.
self.assertEqual(2, len(linear_weights))
self.assertEqual(1, len(linear_bias))
self.assertEqual(30, len(kernel_linear_weights))
self.assertEqual(1, len(kernel_linear_bias))
def testClassifierWithAndWithoutKernelsNoRealValuedColumns(self):
"""Tests kernels have no effect for non-real valued columns ."""
def input_fn():
return {
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
}, constant_op.constant([[1], [0], [1]])
price = layers.real_valued_column('price')
country = layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
linear_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[price, country])
linear_classifier.fit(input_fn=input_fn, steps=100)
linear_metrics = linear_classifier.evaluate(input_fn=input_fn, steps=1)
linear_loss = linear_metrics['loss']
linear_accuracy = linear_metrics['accuracy']
kernel_mappers = {
country: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')]
}
kernel_linear_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[price, country], kernel_mappers=kernel_mappers)
kernel_linear_classifier.fit(input_fn=input_fn, steps=100)
kernel_linear_metrics = kernel_linear_classifier.evaluate(
input_fn=input_fn, steps=1)
kernel_linear_loss = kernel_linear_metrics['loss']
kernel_linear_accuracy = kernel_linear_metrics['accuracy']
# The kernel mapping is applied to a non-real-valued feature column and so
# it should have no effect on the model. The loss and accuracy of the
# "kernelized" model should match the loss and accuracy of the initial model
# (without kernels).
self.assertAlmostEqual(linear_loss, kernel_linear_loss, delta=0.01)
self.assertAlmostEqual(linear_accuracy, kernel_linear_accuracy, delta=0.01)
def testMulticlassDataWithAndWithoutKernels(self):
"""Tests classifier w/ and w/o kernels on multiclass data."""
feature_column = layers.real_valued_column('feature', dimension=4)
# Metrics for linear classifier (no kernels).
linear_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[feature_column], n_classes=3)
linear_classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=50)
linear_metrics = linear_classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
linear_loss = linear_metrics['loss']
linear_accuracy = linear_metrics['accuracy']
# Using kernel mappers allows to discover non-linearities in data (via RBF
# kernel approximation), reduces loss and increases accuracy.
kernel_mappers = {
feature_column: [
RandomFourierFeatureMapper(
input_dim=4, output_dim=50, stddev=1.0, name='rffm')
]
}
kernel_linear_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[], n_classes=3, kernel_mappers=kernel_mappers)
kernel_linear_classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=50)
kernel_linear_metrics = kernel_linear_classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
kernel_linear_loss = kernel_linear_metrics['loss']
kernel_linear_accuracy = kernel_linear_metrics['accuracy']
self.assertLess(kernel_linear_loss, linear_loss)
self.assertGreater(kernel_linear_accuracy, linear_accuracy)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/kernel_methods/python/kernel_estimators_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for third_party.tensorflow.contrib.kernel_methods.python.losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.kernel_methods.python import losses
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SparseMulticlassHingeLossTest(test.TestCase):
def testInvalidLogitsShape(self):
"""An error is raised when logits have invalid shape."""
with self.cached_session():
logits = constant_op.constant([-1.0, 2.1], shape=(2,))
labels = constant_op.constant([0, 1])
with self.assertRaises(ValueError):
_ = losses.sparse_multiclass_hinge_loss(labels, logits)
def testInvalidLabelsShape(self):
"""An error is raised when labels have invalid shape."""
with self.cached_session():
logits = constant_op.constant([-1.0, 2.1], shape=(2, 1))
labels = constant_op.constant([1, 0], shape=(1, 1, 2))
with self.assertRaises(ValueError):
_ = losses.sparse_multiclass_hinge_loss(labels, logits)
def testInvalidWeightsShape(self):
"""An error is raised when weights have invalid shape."""
with self.cached_session():
logits = constant_op.constant([-1.0, 2.1], shape=(2, 1))
labels = constant_op.constant([1, 0], shape=(2,))
weights = constant_op.constant([1.5, 0.2], shape=(2, 1, 1))
with self.assertRaises(ValueError):
_ = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
def testInvalidLabelsDtype(self):
"""An error is raised when labels have invalid shape."""
with self.cached_session():
logits = constant_op.constant([-1.0, 2.1], shape=(2, 1))
labels = constant_op.constant([1, 0], dtype=dtypes.float32)
with self.assertRaises(ValueError):
_ = losses.sparse_multiclass_hinge_loss(labels, logits)
def testNoneWeightRaisesValueError(self):
"""An error is raised when weights are None."""
with self.cached_session():
logits = constant_op.constant([-1.0, 2.1], shape=(2, 1))
labels = constant_op.constant([1, 0])
with self.assertRaises(ValueError):
_ = losses.sparse_multiclass_hinge_loss(labels, logits, weights=None)
def testInconsistentLabelsAndWeightsShapesSameRank(self):
"""Error raised when weights and labels have same ranks, different sizes."""
with self.cached_session():
logits = constant_op.constant([-1.0, 2.1, 4.1], shape=(3, 1))
labels = constant_op.constant([1, 0, 2], shape=(3, 1))
weights = constant_op.constant([1.1, 2.0], shape=(2, 1))
with self.assertRaises(ValueError):
_ = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
def testInconsistentLabelsAndWeightsShapesDifferentRank(self):
"""Error raised when weights and labels have different ranks and sizes."""
with self.cached_session():
logits = constant_op.constant([-1.0, 2.1], shape=(2, 1))
labels = constant_op.constant([1, 0], shape=(2, 1))
weights = constant_op.constant([1.1, 2.0, 2.8], shape=(3,))
with self.assertRaises(ValueError):
_ = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
def testOutOfRangeLabels(self):
"""An error is raised when labels are not in [0, num_classes)."""
with self.cached_session():
logits = constant_op.constant([[1.2, -1.4, -1.0], [1.4, 1.8, 4.0],
[0.5, 1.8, -1.0]])
labels = constant_op.constant([1, 0, 4])
loss = losses.sparse_multiclass_hinge_loss(labels, logits)
with self.assertRaises(errors.InvalidArgumentError):
loss.eval()
def testZeroLossInt32Labels(self):
"""Loss is 0 if true class logits sufficiently higher than other classes."""
with self.cached_session():
logits = constant_op.constant([[1.2, -1.4, -1.0], [1.4, 1.8, 4.0],
[0.5, 1.8, -1.0]])
labels = constant_op.constant([0, 2, 1], dtype=dtypes.int32)
loss = losses.sparse_multiclass_hinge_loss(labels, logits)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testZeroLossInt64Labels(self):
"""Loss is 0 if true class logits sufficiently higher than other classes."""
with self.cached_session():
logits = constant_op.constant([[2.1, -0.4, -1.0], [1.4, 2.8, 4.0],
[-0.5, 0.8, -1.0]])
labels = constant_op.constant([0, 2, 1], dtype=dtypes.int64)
loss = losses.sparse_multiclass_hinge_loss(labels, logits)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testUnknownShape(self):
"""Result keeps same with `testZeroLossInt32Labels`"""
logits_np = np.array([[1.2, -1.4, -1.0], [1.4, 1.8, 4.0], [0.5, 1.8, -1.0]])
labels_np = np.array([0, 2, 1], dtype=np.int32)
logits_shapes = [
[3, 3], # batch_size, num_classes
[None, 3],
[3, None],
[None, None]
]
for batch_size, num_classes in logits_shapes:
with self.cached_session():
logits = array_ops.placeholder(
dtypes.float32, shape=(batch_size, num_classes))
labels = array_ops.placeholder(dtypes.int32, shape=(batch_size,))
loss = losses.sparse_multiclass_hinge_loss(labels, logits)
result = loss.eval(feed_dict={logits: logits_np, labels: labels_np})
self.assertAlmostEqual(result, 0.0, 3)
def testCorrectPredictionsSomeClassesInsideMargin(self):
"""Loss is > 0 even if true class logits are higher than other classes."""
with self.cached_session():
logits = constant_op.constant([[1.2, -1.4, 0.8], [1.4, 1.8, 4.0],
[1.5, 1.8, -1.0]])
labels = constant_op.constant([0, 2, 1])
loss = losses.sparse_multiclass_hinge_loss(labels, logits)
# The first and third samples incur some loss (0.6 and 0.7 respectively).
self.assertAlmostEqual(loss.eval(), 0.4333, 3)
def testIncorrectPredictions(self):
"""Loss is >0 when an incorrect class has higher logits than true class."""
with self.cached_session():
logits = constant_op.constant([[2.6, 0.4, 0.8], [1.4, 0.8, -1.0],
[0.5, -1.8, 2.0]])
labels = constant_op.constant([1, 0, 2])
loss = losses.sparse_multiclass_hinge_loss(labels, logits)
# The first examples incurs a high loss (3.2) since the logits of an
# incorrect class (0) are higher than the logits of the ground truth. The
# second example also incures a (smaller) loss (0.4).
self.assertAlmostEqual(loss.eval(), 1.2, 3)
def testIncorrectPredictionsColumnLabels(self):
"""Same as above but labels is a rank-2 tensor."""
with self.cached_session():
logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0],
[0.2, -1.8, 4.0]])
labels = constant_op.constant([1, 0, 2], shape=(3, 1))
loss = losses.sparse_multiclass_hinge_loss(labels, logits)
# The first examples incurs a high loss (3.0) since the logits of an
# incorrect class (0) are higher than the logits of the ground truth. The
# second example also incures a (smaller) loss (0.3).
self.assertAlmostEqual(loss.eval(), 1.1, 3)
def testIncorrectPredictionsZeroWeights(self):
"""Loss is 0 when all weights are missing even if predictions are wrong."""
with self.cached_session():
logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0],
[0.2, -1.8, 4.0]])
labels = constant_op.constant([1, 0, 2], shape=(3, 1))
weights = constant_op.constant([0.0, 0.0, 0.0], shape=(3, 1))
loss = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
# No overall loss since all weights are 0.
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testNonZeroLossWithPythonScalarWeights(self):
"""Weighted loss is correctly computed when weights is a python scalar."""
with self.cached_session():
logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0],
[0.2, -1.8, 4.0]])
labels = constant_op.constant([1, 0, 2], shape=(3, 1))
weights = 10.0
loss = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
self.assertAlmostEqual(loss.eval(), 11.0, 3)
def testNonZeroLossWithScalarTensorWeights(self):
"""Weighted loss is correctly computed when weights is a rank-0 tensor."""
with self.cached_session():
logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0],
[0.2, -1.8, 4.0]])
labels = constant_op.constant([1, 0, 2], shape=(3, 1))
weights = constant_op.constant(5.0)
loss = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
self.assertAlmostEqual(loss.eval(), 5.5, 3)
def testNonZeroLossWith1DTensorWeightsColumnLabels(self):
"""Weighted loss is correctly computed when weights is a rank-0 tensor."""
with self.cached_session():
logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0],
[0.2, -1.8, 4.0]])
labels = constant_op.constant([1, 0, 2], shape=(3, 1))
weights = constant_op.constant([1.0, 0.5, 2.0], shape=(3,))
loss = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
# The overall loss is 1/3 *(3.0*1.0 + 0.5*0.3+ 2.0*0.0) = 1.05
self.assertAlmostEqual(loss.eval(), 1.05, 3)
def testNonZeroLossWith2DTensorWeights1DLabelsSomeWeightsMissing(self):
"""Weighted loss is correctly computed when weights is a rank-0 tensor."""
with self.cached_session():
logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0],
[0.2, -1.8, 4.0], [1.6, 1.8, -4.0]])
labels = constant_op.constant([1, 0, 2, 1])
weights = constant_op.constant([[1.0], [0.0], [2.0], [4.0]])
loss = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
# The overall loss is 1/3 *(3.0*1.0 + 0.0*0.3+ 2.0*0.0 + 4.0*0.8) = 6.2/3.
self.assertAlmostEqual(loss.eval(), 2.06666, 3)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/kernel_methods/python/losses_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RandomFourierFeatureMapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.kernel_methods.python.mappers import dense_kernel_mapper
from tensorflow.contrib.kernel_methods.python.mappers.random_fourier_features import RandomFourierFeatureMapper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import googletest
def _inner_product(x, y):
r"""Inner product between tensors x and y.
The input tensors are assumed to be in ROW representation, that is, the method
returns \\(x * y^T\\).
Args:
x: input tensor in row format
y: input tensor in row format
Returns:
the inner product of x, y
"""
return math_ops.matmul(x, y, transpose_b=True)
def _compute_exact_rbf_kernel(x, y, stddev):
"""Computes exact RBF kernel given input tensors x and y and stddev."""
diff = math_ops.subtract(x, y)
diff_squared_norm = _inner_product(diff, diff)
return math_ops.exp(-diff_squared_norm / (2 * stddev * stddev))
class RandomFourierFeatureMapperTest(TensorFlowTestCase):
def testInvalidInputShape(self):
x = constant_op.constant([[2.0, 1.0]])
with self.cached_session():
rffm = RandomFourierFeatureMapper(3, 10)
with self.assertRaisesWithPredicateMatch(
dense_kernel_mapper.InvalidShapeError,
r'Invalid dimension: expected 3 input features, got 2 instead.'):
rffm.map(x)
def testMappedShape(self):
x1 = constant_op.constant([[2.0, 1.0, 0.0]])
x2 = constant_op.constant([[1.0, -1.0, 2.0], [-1.0, 10.0, 1.0],
[4.0, -2.0, -1.0]])
with self.cached_session():
rffm = RandomFourierFeatureMapper(3, 10, 1.0)
mapped_x1 = rffm.map(x1)
mapped_x2 = rffm.map(x2)
self.assertEqual([1, 10], mapped_x1.get_shape())
self.assertEqual([3, 10], mapped_x2.get_shape())
def testSameOmegaReused(self):
x = constant_op.constant([[2.0, 1.0, 0.0]])
with self.cached_session():
rffm = RandomFourierFeatureMapper(3, 100)
mapped_x = rffm.map(x)
mapped_x_copy = rffm.map(x)
# Two different evaluations of tensors output by map on the same input
# are identical because the same parameters are used for the mappings.
self.assertAllClose(mapped_x.eval(), mapped_x_copy.eval(), atol=0.001)
def testTwoMapperObjects(self):
x = constant_op.constant([[2.0, 1.0, 0.0]])
y = constant_op.constant([[1.0, -1.0, 2.0]])
stddev = 3.0
with self.cached_session():
# The mapped dimension is fairly small, so the kernel approximation is
# very rough.
rffm1 = RandomFourierFeatureMapper(3, 100, stddev)
rffm2 = RandomFourierFeatureMapper(3, 100, stddev)
mapped_x1 = rffm1.map(x)
mapped_y1 = rffm1.map(y)
mapped_x2 = rffm2.map(x)
mapped_y2 = rffm2.map(y)
approx_kernel_value1 = _inner_product(mapped_x1, mapped_y1)
approx_kernel_value2 = _inner_product(mapped_x2, mapped_y2)
self.assertAllClose(
approx_kernel_value1.eval(), approx_kernel_value2.eval(), atol=0.01)
def testBadKernelApproximation(self):
x = constant_op.constant([[2.0, 1.0, 0.0]])
y = constant_op.constant([[1.0, -1.0, 2.0]])
stddev = 3.0
with self.cached_session():
# The mapped dimension is fairly small, so the kernel approximation is
# very rough.
rffm = RandomFourierFeatureMapper(3, 100, stddev, seed=0)
mapped_x = rffm.map(x)
mapped_y = rffm.map(y)
exact_kernel_value = _compute_exact_rbf_kernel(x, y, stddev)
approx_kernel_value = _inner_product(mapped_x, mapped_y)
self.assertAllClose(
exact_kernel_value.eval(), approx_kernel_value.eval(), atol=0.2)
def testGoodKernelApproximationAmortized(self):
# Parameters.
num_points = 20
input_dim = 5
mapped_dim = 5000
stddev = 5.0
points_shape = [1, input_dim]
points = [
random_ops.random_uniform(shape=points_shape, maxval=1.0)
for _ in xrange(num_points)
]
normalized_points = [nn.l2_normalize(point, dim=1) for point in points]
total_absolute_error = 0.0
with self.cached_session():
rffm = RandomFourierFeatureMapper(input_dim, mapped_dim, stddev, seed=0)
# Cache mappings so that they are not computed multiple times.
cached_mappings = dict((point, rffm.map(point))
for point in normalized_points)
for x in normalized_points:
mapped_x = cached_mappings[x]
for y in normalized_points:
mapped_y = cached_mappings[y]
exact_kernel_value = _compute_exact_rbf_kernel(x, y, stddev)
approx_kernel_value = _inner_product(mapped_x, mapped_y)
abs_error = math_ops.abs(exact_kernel_value - approx_kernel_value)
total_absolute_error += abs_error
self.assertAllClose(
[[0.0]],
total_absolute_error.eval() / (num_points * num_points),
atol=0.02)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/kernel_methods/python/mappers/random_fourier_features_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Approximate kernel mapper for RBF kernel based on Random Fourier Features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.kernel_methods.python.mappers import dense_kernel_mapper as dkm
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
# TODO(sibyl-vie3Poto,felixyu): add an option to control whether the parameters in the
# kernel map are trainable.
class RandomFourierFeatureMapper(dkm.DenseKernelMapper):
r"""Class that implements Random Fourier Feature Mapping (RFFM) in TensorFlow.
The RFFM mapping is used to approximate the Gaussian (RBF) kernel:
$$(exp(-||x-y||_2^2 / (2 * \sigma^2))$$
The implementation of RFFM is based on the following paper:
"Random Features for Large-Scale Kernel Machines" by Ali Rahimi and Ben Recht.
(link: https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf)
The mapping uses a matrix \\(\Omega \in R^{d x D}\\) and a bias vector
\\(b \in R^D\\) where \\(d\\) is the input dimension (number of dense input
features) and \\(D\\) is the output dimension (i.e., dimension of the feature
space the input is mapped to). Each entry of \\(\Omega\\) is sampled i.i.d.
from a (scaled) Gaussian distribution and each entry of \\(b\\) is sampled
independently and uniformly from [0, \\(2 * \pi\\)].
For a single input feature vector \\(x \in R^d\\), its RFFM is defined as:
$$\sqrt(2/D) * cos(x * \Omega + b)$$
where \\(cos\\) is the element-wise cosine function and \\(x, b\\) are
represented as row vectors. The aforementioned paper shows that the linear
kernel of RFFM-mapped vectors approximates the Gaussian kernel of the initial
vectors.
"""
def __init__(self, input_dim, output_dim, stddev=1.0, seed=1, name=None):
r"""Constructs a RandomFourierFeatureMapper instance.
Args:
input_dim: The dimension (number of features) of the tensors to be mapped.
output_dim: The output dimension of the mapping.
stddev: The standard deviation of the Gaussian kernel to be approximated.
The error of the classifier trained using this approximation is very
sensitive to this parameter.
seed: An integer used to initialize the parameters (\\(\Omega\\) and
\\(b\\)) of the mapper. For repeatable sequences across different
invocations of the mapper object (for instance, to ensure consistent
mapping both at training and eval/inference if these happen in
different invocations), set this to the same integer.
name: name for the mapper object.
"""
# TODO(sibyl-vie3Poto): Maybe infer input_dim and/or output_dim (if not explicitly
# provided). input_dim can be inferred lazily, the first time map is called.
# output_dim can be inferred from input_dim using heuristics on the error of
# the approximation (and, by extension, the error of the classification
# based on the approximation).
self._input_dim = input_dim
self._output_dim = output_dim
self._stddev = stddev
self._seed = seed
self._name = name
@property
def name(self):
"""Returns a name for the `RandomFourierFeatureMapper` instance.
If the name provided in the constructor is `None`, then the object's unique
id is returned.
Returns:
A name for the `RandomFourierFeatureMapper` instance.
"""
return self._name or str(id(self))
@property
def input_dim(self):
return self._input_dim
@property
def output_dim(self):
return self._output_dim
def map(self, input_tensor):
"""Maps each row of input_tensor using random Fourier features.
Args:
input_tensor: a `Tensor` containing input features. It's shape is
[batch_size, self._input_dim].
Returns:
A `Tensor` of shape [batch_size, self._output_dim] containing RFFM-mapped
features.
Raises:
InvalidShapeError: if the shape of the `input_tensor` is inconsistent with
expected input dimension.
"""
input_tensor_shape = input_tensor.get_shape()
if len(input_tensor_shape) != 2:
raise dkm.InvalidShapeError(
'The shape of the tensor should be 2. Got %d instead.' %
len(input_tensor_shape))
features_dim = input_tensor_shape[1]
if features_dim != self._input_dim:
raise dkm.InvalidShapeError(
'Invalid dimension: expected %d input features, got %d instead.' %
(self._input_dim, features_dim))
# Add ops that compute (deterministically) omega_matrix and bias based on
# the provided seed.
# TODO(sibyl-vie3Poto): Storing the mapper's parameters (omega_matrix and bias) as
# constants incurs no RPC calls to the parameter server during distributed
# training. However, if the parameters grow too large (for instance if they
# don't fit into memory or if they blow up the size of the GraphDef proto),
# stroring them as constants is no longer an option. In this case, we should
# have a heuristic to choose out of one of the following alternatives:
# a) store them as variables (in the parameter server)
# b) store them as worker local variables
# c) generating on the fly the omega matrix at each step
np.random.seed(self._seed)
omega_matrix_shape = [self._input_dim, self._output_dim]
bias_shape = [self._output_dim]
omega_matrix = constant_op.constant(
np.random.normal(
scale=1.0 / self._stddev, size=omega_matrix_shape),
dtype=dtypes.float32)
bias = constant_op.constant(
np.random.uniform(
low=0.0, high=2 * np.pi, size=bias_shape),
dtype=dtypes.float32)
x_omega_plus_bias = math_ops.add(
math_ops.matmul(input_tensor, omega_matrix), bias)
return math.sqrt(2.0 / self._output_dim) * math_ops.cos(x_omega_plus_bias)
|
tensorflow-master
|
tensorflow/contrib/kernel_methods/python/mappers/random_fourier_features.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""API class for dense (approximate) kernel mappers.
See ./random_fourier_features.py for a concrete instantiation of this class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
class InvalidShapeError(Exception):
"""Exception thrown when a tensor's shape deviates from an expected shape."""
@six.add_metaclass(abc.ABCMeta)
class DenseKernelMapper(object):
"""Abstract class for a kernel mapper that maps dense inputs to dense outputs.
This class is abstract. Users should not create instances of this class.
"""
@abc.abstractmethod
def map(self, input_tensor):
"""Main Dense-Tensor-In-Dense-Tensor-Out (DTIDTO) map method.
Should be implemented by subclasses.
Args:
input_tensor: The dense input tensor to be mapped using the (approximate)
kernel mapper.
"""
raise NotImplementedError('map is not implemented for {}.'.format(self))
@abc.abstractproperty
def name(self):
"""Returns the name of the kernel mapper."""
pass
@abc.abstractproperty
def output_dim(self):
"""Returns the output dimension of the mapping."""
pass
|
tensorflow-master
|
tensorflow/contrib/kernel_methods/python/mappers/dense_kernel_mapper.py
|
# =============================================================================
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Custom op used by periodic_resample."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.periodic_resample.python.ops.periodic_resample_op import periodic_resample
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["periodic_resample"]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/periodic_resample/__init__.py
|
# =============================================================================
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Public API of periodic_resample."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/periodic_resample/python/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.