python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""mel conversion ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import shape_ops
from tensorflow.python.util.tf_export import tf_export
# mel spectrum constants.
_MEL_BREAK_FREQUENCY_HERTZ = 700.0
_MEL_HIGH_FREQUENCY_Q = 1127.0
def _mel_to_hertz(mel_values, name=None):
"""Converts frequencies in `mel_values` from the mel scale to linear scale.
Args:
mel_values: A `Tensor` of frequencies in the mel scale.
name: An optional name for the operation.
Returns:
A `Tensor` of the same shape and type as `mel_values` containing linear
scale frequencies in Hertz.
"""
with ops.name_scope(name, 'mel_to_hertz', [mel_values]):
mel_values = ops.convert_to_tensor(mel_values)
return _MEL_BREAK_FREQUENCY_HERTZ * (
math_ops.exp(mel_values / _MEL_HIGH_FREQUENCY_Q) - 1.0
)
def _hertz_to_mel(frequencies_hertz, name=None):
"""Converts frequencies in `frequencies_hertz` in Hertz to the mel scale.
Args:
frequencies_hertz: A `Tensor` of frequencies in Hertz.
name: An optional name for the operation.
Returns:
A `Tensor` of the same shape and type of `frequencies_hertz` containing
frequencies in the mel scale.
"""
with ops.name_scope(name, 'hertz_to_mel', [frequencies_hertz]):
frequencies_hertz = ops.convert_to_tensor(frequencies_hertz)
return _MEL_HIGH_FREQUENCY_Q * math_ops.log(
1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ))
def _validate_arguments(num_mel_bins, sample_rate,
lower_edge_hertz, upper_edge_hertz, dtype):
"""Checks the inputs to linear_to_mel_weight_matrix."""
if num_mel_bins <= 0:
raise ValueError('num_mel_bins must be positive. Got: %s' % num_mel_bins)
if sample_rate <= 0.0:
raise ValueError('sample_rate must be positive. Got: %s' % sample_rate)
if lower_edge_hertz < 0.0:
raise ValueError('lower_edge_hertz must be non-negative. Got: %s' %
lower_edge_hertz)
if lower_edge_hertz >= upper_edge_hertz:
raise ValueError('lower_edge_hertz %.1f >= upper_edge_hertz %.1f' %
(lower_edge_hertz, upper_edge_hertz))
if upper_edge_hertz > sample_rate / 2:
raise ValueError('upper_edge_hertz must not be larger than the Nyquist '
'frequency (sample_rate / 2). Got: %s for sample_rate: %s'
% (upper_edge_hertz, sample_rate))
if not dtype.is_floating:
raise ValueError('dtype must be a floating point type. Got: %s' % dtype)
@tf_export('signal.linear_to_mel_weight_matrix')
def linear_to_mel_weight_matrix(num_mel_bins=20,
num_spectrogram_bins=129,
sample_rate=8000,
lower_edge_hertz=125.0,
upper_edge_hertz=3800.0,
dtype=dtypes.float32,
name=None):
"""Returns a matrix to warp linear scale spectrograms to the [mel scale][mel].
Returns a weight matrix that can be used to re-weight a `Tensor` containing
`num_spectrogram_bins` linearly sampled frequency information from
`[0, sample_rate / 2]` into `num_mel_bins` frequency information from
`[lower_edge_hertz, upper_edge_hertz]` on the [mel scale][mel].
For example, the returned matrix `A` can be used to right-multiply a
spectrogram `S` of shape `[frames, num_spectrogram_bins]` of linear
scale spectrum values (e.g. STFT magnitudes) to generate a "mel spectrogram"
`M` of shape `[frames, num_mel_bins]`.
# `S` has shape [frames, num_spectrogram_bins]
# `M` has shape [frames, num_mel_bins]
M = tf.matmul(S, A)
The matrix can be used with `tf.tensordot` to convert an arbitrary rank
`Tensor` of linear-scale spectral bins into the mel scale.
# S has shape [..., num_spectrogram_bins].
# M has shape [..., num_mel_bins].
M = tf.tensordot(S, A, 1)
# tf.tensordot does not support shape inference for this case yet.
M.set_shape(S.shape[:-1].concatenate(A.shape[-1:]))
Args:
num_mel_bins: Python int. How many bands in the resulting mel spectrum.
num_spectrogram_bins: An integer `Tensor`. How many bins there are in the
source spectrogram data, which is understood to be `fft_size // 2 + 1`,
i.e. the spectrogram only contains the nonredundant FFT bins.
sample_rate: Python float. Samples per second of the input signal used to
create the spectrogram. We need this to figure out the actual frequencies
for each spectrogram bin, which dictates how they are mapped into the mel
scale.
lower_edge_hertz: Python float. Lower bound on the frequencies to be
included in the mel spectrum. This corresponds to the lower edge of the
lowest triangular band.
upper_edge_hertz: Python float. The desired top edge of the highest
frequency band.
dtype: The `DType` of the result matrix. Must be a floating point type.
name: An optional name for the operation.
Returns:
A `Tensor` of shape `[num_spectrogram_bins, num_mel_bins]`.
Raises:
ValueError: If `num_mel_bins`/`num_spectrogram_bins`/`sample_rate` are not
positive, `lower_edge_hertz` is negative, frequency edges are incorrectly
ordered, or `upper_edge_hertz` is larger than the Nyquist frequency.
[mel]: https://en.wikipedia.org/wiki/Mel_scale
"""
with ops.name_scope(name, 'linear_to_mel_weight_matrix') as name:
# Note: As num_spectrogram_bins is passed to `math_ops.linspace`
# and the validation is already done in linspace (both in shape function
# and in kernel), there is no need to validate num_spectrogram_bins here.
_validate_arguments(num_mel_bins, sample_rate,
lower_edge_hertz, upper_edge_hertz, dtype)
# This function can be constant folded by graph optimization since there are
# no Tensor inputs.
sample_rate = ops.convert_to_tensor(
sample_rate, dtype, name='sample_rate')
lower_edge_hertz = ops.convert_to_tensor(
lower_edge_hertz, dtype, name='lower_edge_hertz')
upper_edge_hertz = ops.convert_to_tensor(
upper_edge_hertz, dtype, name='upper_edge_hertz')
zero = ops.convert_to_tensor(0.0, dtype)
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
nyquist_hertz = sample_rate / 2.0
linear_frequencies = math_ops.linspace(
zero, nyquist_hertz, num_spectrogram_bins)[bands_to_zero:]
spectrogram_bins_mel = array_ops.expand_dims(
_hertz_to_mel(linear_frequencies), 1)
# Compute num_mel_bins triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into
# num_mel_bins + 2 pieces.
band_edges_mel = shape_ops.frame(
math_ops.linspace(_hertz_to_mel(lower_edge_hertz),
_hertz_to_mel(upper_edge_hertz),
num_mel_bins + 2), frame_length=3, frame_step=1)
# Split the triples up and reshape them into [1, num_mel_bins] tensors.
lower_edge_mel, center_mel, upper_edge_mel = tuple(array_ops.reshape(
t, [1, num_mel_bins]) for t in array_ops.split(
band_edges_mel, 3, axis=1))
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the mel domain, not Hertz.
lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (
center_mel - lower_edge_mel)
upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (
upper_edge_mel - center_mel)
# Intersect the line segments with each other and zero.
mel_weights_matrix = math_ops.maximum(
zero, math_ops.minimum(lower_slopes, upper_slopes))
# Re-add the zeroed lower bins we sliced out above.
return array_ops.pad(
mel_weights_matrix, [[bands_to_zero, 0], [0, 0]], name=name)
|
tensorflow-master
|
tensorflow/python/ops/signal/mel_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""General shape ops for frames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import util_ops
from tensorflow.python.util.tf_export import tf_export
def _infer_frame_shape(signal, frame_length, frame_step, pad_end, axis):
"""Infers the shape of the return value of `frame`."""
frame_length = tensor_util.constant_value(frame_length)
frame_step = tensor_util.constant_value(frame_step)
axis = tensor_util.constant_value(axis)
if signal.shape.ndims is None:
return None
if axis is None:
return [None] * (signal.shape.ndims + 1)
signal_shape = signal.shape.as_list()
num_frames = None
frame_axis = signal_shape[axis]
outer_dimensions = signal_shape[:axis]
inner_dimensions = signal_shape[axis:][1:]
if signal_shape and frame_axis is not None:
if frame_step is not None and pad_end:
# Double negative is so that we round up.
num_frames = max(0, -(-frame_axis // frame_step))
elif frame_step is not None and frame_length is not None:
assert not pad_end
num_frames = max(
0, (frame_axis - frame_length + frame_step) // frame_step)
return outer_dimensions + [num_frames, frame_length] + inner_dimensions
@tf_export("signal.frame")
def frame(signal, frame_length, frame_step, pad_end=False, pad_value=0, axis=-1,
name=None):
"""Expands `signal`'s `axis` dimension into frames of `frame_length`.
Slides a window of size `frame_length` over `signal`'s `axis` dimension
with a stride of `frame_step`, replacing the `axis` dimension with
`[frames, frame_length]` frames.
If `pad_end` is True, window positions that are past the end of the `axis`
dimension are padded with `pad_value` until the window moves fully past the
end of the dimension. Otherwise, only window positions that fully overlap the
`axis` dimension are produced.
For example:
```python
pcm = tf.compat.v1.placeholder(tf.float32, [None, 9152])
frames = tf.signal.frame(pcm, 512, 180)
magspec = tf.abs(tf.signal.rfft(frames, [512]))
image = tf.expand_dims(magspec, 3)
```
Args:
signal: A `[..., samples, ...]` `Tensor`. The rank and dimensions
may be unknown. Rank must be at least 1.
frame_length: The frame length in samples. An integer or scalar `Tensor`.
frame_step: The frame hop size in samples. An integer or scalar `Tensor`.
pad_end: Whether to pad the end of `signal` with `pad_value`.
pad_value: An optional scalar `Tensor` to use where the input signal
does not exist when `pad_end` is True.
axis: A scalar integer `Tensor` indicating the axis to frame. Defaults to
the last axis. Supports negative values for indexing from the end.
name: An optional name for the operation.
Returns:
A `Tensor` of frames with shape `[..., frames, frame_length, ...]`.
Raises:
ValueError: If `frame_length`, `frame_step`, `pad_value`, or `axis` are not
scalar.
"""
with ops.name_scope(name, "frame", [signal, frame_length, frame_step,
pad_value]):
signal = ops.convert_to_tensor(signal, name="signal")
frame_length = ops.convert_to_tensor(frame_length, name="frame_length")
frame_step = ops.convert_to_tensor(frame_step, name="frame_step")
axis = ops.convert_to_tensor(axis, name="axis")
signal.shape.with_rank_at_least(1)
frame_length.shape.assert_has_rank(0)
frame_step.shape.assert_has_rank(0)
axis.shape.assert_has_rank(0)
result_shape = _infer_frame_shape(signal, frame_length, frame_step, pad_end,
axis)
# Axis can be negative. Convert it to positive.
signal_rank = array_ops.rank(signal)
axis = math_ops.range(signal_rank)[axis]
signal_shape = array_ops.shape(signal)
outer_dimensions, length_samples, inner_dimensions = array_ops.split(
signal_shape, [axis, 1, signal_rank - 1 - axis])
length_samples = array_ops.reshape(length_samples, [])
num_outer_dimensions = array_ops.size(outer_dimensions)
num_inner_dimensions = array_ops.size(inner_dimensions)
# If padding is requested, pad the input signal tensor with pad_value.
if pad_end:
pad_value = ops.convert_to_tensor(pad_value, signal.dtype)
pad_value.shape.assert_has_rank(0)
# Calculate number of frames, using double negatives to round up.
num_frames = -(-length_samples // frame_step)
# Pad the signal by up to frame_length samples based on how many samples
# are remaining starting from last_frame_position.
pad_samples = math_ops.maximum(
0, frame_length + frame_step * (num_frames - 1) - length_samples)
# Pad the inner dimension of signal by pad_samples.
paddings = array_ops.concat(
[array_ops.zeros([num_outer_dimensions, 2], dtype=pad_samples.dtype),
[[0, pad_samples]],
array_ops.zeros([num_inner_dimensions, 2], dtype=pad_samples.dtype)],
0)
signal = array_ops.pad(signal, paddings, constant_values=pad_value)
signal_shape = array_ops.shape(signal)
length_samples = signal_shape[axis]
else:
num_frames = math_ops.maximum(
0, 1 + (length_samples - frame_length) // frame_step)
subframe_length = util_ops.gcd(frame_length, frame_step)
subframes_per_frame = frame_length // subframe_length
subframes_per_hop = frame_step // subframe_length
num_subframes = length_samples // subframe_length
slice_shape = array_ops.concat([outer_dimensions,
[num_subframes * subframe_length],
inner_dimensions], 0)
subframe_shape = array_ops.concat([outer_dimensions,
[num_subframes, subframe_length],
inner_dimensions], 0)
subframes = array_ops.reshape(array_ops.strided_slice(
signal, array_ops.zeros_like(signal_shape),
slice_shape), subframe_shape)
# frame_selector is a [num_frames, subframes_per_frame] tensor
# that indexes into the appropriate frame in subframes. For example:
# [[0, 0, 0, 0], [2, 2, 2, 2], [4, 4, 4, 4]]
frame_selector = array_ops.reshape(
math_ops.range(num_frames) * subframes_per_hop, [num_frames, 1])
# subframe_selector is a [num_frames, subframes_per_frame] tensor
# that indexes into the appropriate subframe within a frame. For example:
# [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]
subframe_selector = array_ops.reshape(
math_ops.range(subframes_per_frame), [1, subframes_per_frame])
# Adding the 2 selector tensors together produces a [num_frames,
# subframes_per_frame] tensor of indices to use with tf.gather to select
# subframes from subframes. We then reshape the inner-most
# subframes_per_frame dimension to stitch the subframes together into
# frames. For example: [[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7]].
selector = frame_selector + subframe_selector
frames = array_ops.reshape(
array_ops.gather(subframes, selector, axis=axis),
array_ops.concat([outer_dimensions, [num_frames, frame_length],
inner_dimensions], 0))
if result_shape:
frames.set_shape(result_shape)
return frames
|
tensorflow-master
|
tensorflow/python/ops/signal/shape_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for computing common window functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('signal.hann_window')
def hann_window(window_length, periodic=True, dtype=dtypes.float32, name=None):
"""Generate a [Hann window][hann].
Args:
window_length: A scalar `Tensor` indicating the window length to generate.
periodic: A bool `Tensor` indicating whether to generate a periodic or
symmetric window. Periodic windows are typically used for spectral
analysis while symmetric windows are typically used for digital
filter design.
dtype: The data type to produce. Must be a floating point type.
name: An optional name for the operation.
Returns:
A `Tensor` of shape `[window_length]` of type `dtype`.
Raises:
ValueError: If `dtype` is not a floating point type.
[hann]: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows
"""
return _raised_cosine_window(name, 'hann_window', window_length, periodic,
dtype, 0.5, 0.5)
@tf_export('signal.hamming_window')
def hamming_window(window_length, periodic=True, dtype=dtypes.float32,
name=None):
"""Generate a [Hamming][hamming] window.
Args:
window_length: A scalar `Tensor` indicating the window length to generate.
periodic: A bool `Tensor` indicating whether to generate a periodic or
symmetric window. Periodic windows are typically used for spectral
analysis while symmetric windows are typically used for digital
filter design.
dtype: The data type to produce. Must be a floating point type.
name: An optional name for the operation.
Returns:
A `Tensor` of shape `[window_length]` of type `dtype`.
Raises:
ValueError: If `dtype` is not a floating point type.
[hamming]: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows
"""
return _raised_cosine_window(name, 'hamming_window', window_length, periodic,
dtype, 0.54, 0.46)
def _raised_cosine_window(name, default_name, window_length, periodic,
dtype, a, b):
"""Helper function for computing a raised cosine window.
Args:
name: Name to use for the scope.
default_name: Default name to use for the scope.
window_length: A scalar `Tensor` or integer indicating the window length.
periodic: A bool `Tensor` indicating whether to generate a periodic or
symmetric window.
dtype: A floating point `DType`.
a: The alpha parameter to the raised cosine window.
b: The beta parameter to the raised cosine window.
Returns:
A `Tensor` of shape `[window_length]` of type `dtype`.
Raises:
ValueError: If `dtype` is not a floating point type or `window_length` is
not scalar or `periodic` is not scalar.
"""
if not dtype.is_floating:
raise ValueError('dtype must be a floating point type. Found %s' % dtype)
with ops.name_scope(name, default_name, [window_length, periodic]):
window_length = ops.convert_to_tensor(window_length, dtype=dtypes.int32,
name='window_length')
window_length.shape.assert_has_rank(0)
window_length_const = tensor_util.constant_value(window_length)
if window_length_const == 1:
return array_ops.ones([1], dtype=dtype)
periodic = math_ops.cast(
ops.convert_to_tensor(periodic, dtype=dtypes.bool, name='periodic'),
dtypes.int32)
periodic.shape.assert_has_rank(0)
even = 1 - math_ops.mod(window_length, 2)
n = math_ops.cast(window_length + periodic * even - 1, dtype=dtype)
count = math_ops.cast(math_ops.range(window_length), dtype)
cos_arg = constant_op.constant(2 * np.pi, dtype=dtype) * count / n
if window_length_const is not None:
return math_ops.cast(a - b * math_ops.cos(cos_arg), dtype=dtype)
return control_flow_ops.cond(
math_ops.equal(window_length, 1),
lambda: array_ops.ones([1], dtype=dtype),
lambda: math_ops.cast(a - b * math_ops.cos(cos_arg), dtype=dtype))
|
tensorflow-master
|
tensorflow/python/ops/signal/window_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Identity bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Identity",
]
class Identity(bijector.Bijector):
"""Compute Y = g(X) = X.
Example Use:
```python
# Create the Y=g(X)=X transform which is intended for Tensors with 1 batch
# ndim and 1 event ndim (i.e., vector of vectors).
identity = Identity()
x = [[1., 2],
[3, 4]]
x == identity.forward(x) == identity.inverse(x)
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self, validate_args=False, name="identity"):
super(Identity, self).__init__(
forward_min_event_ndims=0,
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
def _forward(self, x):
return x
def _inverse(self, y):
return y
def _inverse_log_det_jacobian(self, y):
return constant_op.constant(0., dtype=y.dtype)
def _forward_log_det_jacobian(self, x):
return constant_op.constant(0., dtype=x.dtype)
|
tensorflow-master
|
tensorflow/python/ops/distributions/identity_bijector.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Laplace distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import special_math
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Laplace",
"LaplaceWithSoftplusScale",
]
@tf_export(v1=["distributions.Laplace"])
class Laplace(distribution.Distribution):
"""The Laplace distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) of this distribution is,
```none
pdf(x; mu, sigma) = exp(-|x - mu| / sigma) / Z
Z = 2 sigma
```
where `loc = mu`, `scale = sigma`, and `Z` is the normalization constant.
Note that the Laplace distribution can be thought of two exponential
distributions spliced together "back-to-back."
The Lpalce distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Laplace(loc=0, scale=1)
Y = loc + scale * X
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Laplace"):
"""Construct Laplace distribution with parameters `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g., `loc / scale` is a valid operation).
Args:
loc: Floating point tensor which characterizes the location (center)
of the distribution.
scale: Positive floating point tensor which characterizes the spread of
the distribution.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` are of different dtype.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Laplace, self).__init__(
dtype=self._loc.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(), self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
# Uniform variates must be sampled from the open-interval `(-1, 1)` rather
# than `[-1, 1)`. In the case of `(0, 1)` we'd use
# `np.finfo(self.dtype.as_numpy_dtype).tiny` because it is the smallest,
# positive, "normal" number. However, the concept of subnormality exists
# only at zero; here we need the smallest usable number larger than -1,
# i.e., `-1 + eps/2`.
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=1.,
dtype=self.dtype,
seed=seed)
return (self.loc - self.scale * math_ops.sign(uniform_samples) *
math_ops.log1p(-math_ops.abs(uniform_samples)))
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return special_math.log_cdf_laplace(self._z(x))
def _log_survival_function(self, x):
return special_math.log_cdf_laplace(-self._z(x))
def _cdf(self, x):
z = self._z(x)
return (0.5 + 0.5 * math_ops.sign(z) *
(1. - math_ops.exp(-math_ops.abs(z))))
def _log_unnormalized_prob(self, x):
return -math_ops.abs(self._z(x))
def _log_normalization(self):
return math.log(2.) + math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast scale.
scale = self.scale + array_ops.zeros_like(self.loc)
return math.log(2.) + 1. + math_ops.log(scale)
def _mean(self):
return self.loc + array_ops.zeros_like(self.scale)
def _stddev(self):
return math.sqrt(2.) * self.scale + array_ops.zeros_like(self.loc)
def _median(self):
return self._mean()
def _mode(self):
return self._mean()
def _z(self, x):
return (x - self.loc) / self.scale
class LaplaceWithSoftplusScale(Laplace):
"""Laplace with softplus applied to `scale`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Laplace(loc, tf.nn.softplus(scale)) "
"instead.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="LaplaceWithSoftplusScale"):
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
super(LaplaceWithSoftplusScale, self).__init__(
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-master
|
tensorflow/python/ops/distributions/laplace.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Categorical distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def _broadcast_cat_event_and_params(event, params, base_dtype):
"""Broadcasts the event or distribution parameters."""
if event.dtype.is_integer:
pass
elif event.dtype.is_floating:
# When `validate_args=True` we've already ensured int/float casting
# is closed.
event = math_ops.cast(event, dtype=dtypes.int32)
else:
raise TypeError("`value` should have integer `dtype` or "
"`self.dtype` ({})".format(base_dtype))
shape_known_statically = (
params.shape.ndims is not None and
params.shape[:-1].is_fully_defined() and
event.shape.is_fully_defined())
if not shape_known_statically or params.shape[:-1] != event.shape:
params *= array_ops.ones_like(event[..., array_ops.newaxis],
dtype=params.dtype)
params_shape = array_ops.shape(params)[:-1]
event *= array_ops.ones(params_shape, dtype=event.dtype)
if params.shape.ndims is not None:
event.set_shape(tensor_shape.TensorShape(params.shape[:-1]))
return event, params
@tf_export(v1=["distributions.Categorical"])
class Categorical(distribution.Distribution):
"""Categorical distribution.
The Categorical distribution is parameterized by either probabilities or
log-probabilities of a set of `K` classes. It is defined over the integers
`{0, 1, ..., K}`.
The Categorical distribution is closely related to the `OneHotCategorical` and
`Multinomial` distributions. The Categorical distribution can be intuited as
generating samples according to `argmax{ OneHotCategorical(probs) }` itself
being identical to `argmax{ Multinomial(probs, total_count=1) }`.
#### Mathematical Details
The probability mass function (pmf) is,
```none
pmf(k; pi) = prod_j pi_j**[k == j]
```
#### Pitfalls
The number of classes, `K`, must not exceed:
- the largest integer representable by `self.dtype`, i.e.,
`2**(mantissa_bits+1)` (IEEE 754),
- the maximum `Tensor` index, i.e., `2**31-1`.
In other words,
```python
K <= min(2**31-1, {
tf.float16: 2**11,
tf.float32: 2**24,
tf.float64: 2**53 }[param.dtype])
```
Note: This condition is validated only when `self.validate_args = True`.
#### Examples
Creates a 3-class distribution with the 2nd class being most likely.
```python
dist = Categorical(probs=[0.1, 0.5, 0.4])
n = 1e4
empirical_prob = tf.cast(
tf.histogram_fixed_width(
dist.sample(int(n)),
[0., 2],
nbins=3),
dtype=tf.float32) / n
# ==> array([ 0.1005, 0.5037, 0.3958], dtype=float32)
```
Creates a 3-class distribution with the 2nd class being most likely.
Parameterized by [logits](https://en.wikipedia.org/wiki/Logit) rather than
probabilities.
```python
dist = Categorical(logits=np.log([0.1, 0.5, 0.4])
n = 1e4
empirical_prob = tf.cast(
tf.histogram_fixed_width(
dist.sample(int(n)),
[0., 2],
nbins=3),
dtype=tf.float32) / n
# ==> array([0.1045, 0.5047, 0.3908], dtype=float32)
```
Creates a 3-class distribution with the 3rd class being most likely.
The distribution functions can be evaluated on counts.
```python
# counts is a scalar.
p = [0.1, 0.4, 0.5]
dist = Categorical(probs=p)
dist.prob(0) # Shape []
# p will be broadcast to [[0.1, 0.4, 0.5], [0.1, 0.4, 0.5]] to match counts.
counts = [1, 0]
dist.prob(counts) # Shape [2]
# p will be broadcast to shape [3, 5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7, 3]
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(
self,
logits=None,
probs=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="Categorical"):
"""Initialize Categorical distributions using class log-probabilities.
Args:
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of logits for each class. Only one of `logits` or
`probs` should be passed in.
probs: An N-D `Tensor`, `N >= 1`, representing the probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of probabilities for each class. Only one of
`logits` or `probs` should be passed in.
dtype: The type of the event samples (default: int32).
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[logits, probs]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits,
probs=probs,
validate_args=validate_args,
multidimensional=True,
name=name)
if validate_args:
self._logits = distribution_util.embed_check_categorical_event_shape(
self._logits)
logits_shape_static = self._logits.get_shape().with_rank_at_least(1)
if logits_shape_static.ndims is not None:
self._batch_rank = ops.convert_to_tensor(
logits_shape_static.ndims - 1,
dtype=dtypes.int32,
name="batch_rank")
else:
with ops.name_scope(name="batch_rank"):
self._batch_rank = array_ops.rank(self._logits) - 1
logits_shape = array_ops.shape(self._logits, name="logits_shape")
if tensor_shape.dimension_value(logits_shape_static[-1]) is not None:
self._event_size = ops.convert_to_tensor(
logits_shape_static.dims[-1].value,
dtype=dtypes.int32,
name="event_size")
else:
with ops.name_scope(name="event_size"):
self._event_size = logits_shape[self._batch_rank]
if logits_shape_static[:-1].is_fully_defined():
self._batch_shape_val = constant_op.constant(
logits_shape_static[:-1].as_list(),
dtype=dtypes.int32,
name="batch_shape")
else:
with ops.name_scope(name="batch_shape"):
self._batch_shape_val = logits_shape[:-1]
super(Categorical, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._logits,
self._probs],
name=name)
@property
def event_size(self):
"""Scalar `int32` tensor: the number of classes."""
return self._event_size
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def probs(self):
"""Vector of coordinatewise probabilities."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.identity(self._batch_shape_val)
def _batch_shape(self):
return self.logits.get_shape()[:-1]
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
if self.logits.get_shape().ndims == 2:
logits_2d = self.logits
else:
logits_2d = array_ops.reshape(self.logits, [-1, self.event_size])
sample_dtype = dtypes.int64 if self.dtype.size > 4 else dtypes.int32
draws = random_ops.multinomial(
logits_2d, n, seed=seed, output_dtype=sample_dtype)
draws = array_ops.reshape(
array_ops.transpose(draws),
array_ops.concat([[n], self.batch_shape_tensor()], 0))
return math_ops.cast(draws, self.dtype)
def _cdf(self, k):
k = ops.convert_to_tensor(k, name="k")
if self.validate_args:
k = distribution_util.embed_check_integer_casting_closed(
k, target_dtype=dtypes.int32)
k, probs = _broadcast_cat_event_and_params(
k, self.probs, base_dtype=self.dtype.base_dtype)
# batch-flatten everything in order to use `sequence_mask()`.
batch_flattened_probs = array_ops.reshape(probs,
(-1, self._event_size))
batch_flattened_k = array_ops.reshape(k, [-1])
to_sum_over = array_ops.where(
array_ops.sequence_mask(batch_flattened_k, self._event_size),
batch_flattened_probs,
array_ops.zeros_like(batch_flattened_probs))
batch_flattened_cdf = math_ops.reduce_sum(to_sum_over, axis=-1)
# Reshape back to the shape of the argument.
return array_ops.reshape(batch_flattened_cdf, array_ops.shape(k))
def _log_prob(self, k):
k = ops.convert_to_tensor(k, name="k")
if self.validate_args:
k = distribution_util.embed_check_integer_casting_closed(
k, target_dtype=dtypes.int32)
k, logits = _broadcast_cat_event_and_params(
k, self.logits, base_dtype=self.dtype.base_dtype)
return -nn_ops.sparse_softmax_cross_entropy_with_logits(labels=k,
logits=logits)
def _entropy(self):
return -math_ops.reduce_sum(
nn_ops.log_softmax(self.logits) * self.probs, axis=-1)
def _mode(self):
ret = math_ops.argmax(self.logits, axis=self._batch_rank)
ret = math_ops.cast(ret, self.dtype)
ret.set_shape(self.batch_shape)
return ret
@kullback_leibler.RegisterKL(Categorical, Categorical)
def _kl_categorical_categorical(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Categorical.
Args:
a: instance of a Categorical distribution object.
b: instance of a Categorical distribution object.
name: (optional) Name to use for created operations.
default is "kl_categorical_categorical".
Returns:
Batchwise KL(a || b)
"""
with ops.name_scope(name, "kl_categorical_categorical",
values=[a.logits, b.logits]):
# sum(probs log(probs / (1 - probs)))
delta_log_probs1 = (nn_ops.log_softmax(a.logits) -
nn_ops.log_softmax(b.logits))
return math_ops.reduce_sum(nn_ops.softmax(a.logits) * delta_log_probs1,
axis=-1)
|
tensorflow-master
|
tensorflow/python/ops/distributions/categorical.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Dirichlet distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Dirichlet",
]
_dirichlet_sample_note = """Note: `value` must be a non-negative tensor with
dtype `self.dtype` and be in the `(self.event_shape() - 1)`-simplex, i.e.,
`tf.reduce_sum(value, -1) = 1`. It must have a shape compatible with
`self.batch_shape() + self.event_shape()`."""
@tf_export(v1=["distributions.Dirichlet"])
class Dirichlet(distribution.Distribution):
"""Dirichlet distribution.
The Dirichlet distribution is defined over the
[`(k-1)`-simplex](https://en.wikipedia.org/wiki/Simplex) using a positive,
length-`k` vector `concentration` (`k > 1`). The Dirichlet is identically the
Beta distribution when `k = 2`.
#### Mathematical Details
The Dirichlet is a distribution over the open `(k-1)`-simplex, i.e.,
```none
S^{k-1} = { (x_0, ..., x_{k-1}) in R^k : sum_j x_j = 1 and all_j x_j > 0 }.
```
The probability density function (pdf) is,
```none
pdf(x; alpha) = prod_j x_j**(alpha_j - 1) / Z
Z = prod_j Gamma(alpha_j) / Gamma(sum_j alpha_j)
```
where:
* `x in S^{k-1}`, i.e., the `(k-1)`-simplex,
* `concentration = alpha = [alpha_0, ..., alpha_{k-1}]`, `alpha_j > 0`,
* `Z` is the normalization constant aka the [multivariate beta function](
https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function),
and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The `concentration` represents mean total counts of class occurrence, i.e.,
```none
concentration = alpha = mean * total_concentration
```
where `mean` in `S^{k-1}` and `total_concentration` is a positive real number
representing a mean total count.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
Warning: Some components of the samples can be zero due to finite precision.
This happens more often when some of the concentrations are very small.
Make sure to round the samples to `np.finfo(dtype).tiny` before computing the
density.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Create a single trivariate Dirichlet, with the 3rd class being three times
# more frequent than the first. I.e., batch_shape=[], event_shape=[3].
alpha = [1., 2, 3]
dist = tfd.Dirichlet(alpha)
dist.sample([4, 5]) # shape: [4, 5, 3]
# x has one sample, one batch, three classes:
x = [.2, .3, .5] # shape: [3]
dist.prob(x) # shape: []
# x has two samples from one batch:
x = [[.1, .4, .5],
[.2, .3, .5]]
dist.prob(x) # shape: [2]
# alpha will be broadcast to shape [5, 7, 3] to match x.
x = [[...]] # shape: [5, 7, 3]
dist.prob(x) # shape: [5, 7]
```
```python
# Create batch_shape=[2], event_shape=[3]:
alpha = [[1., 2, 3],
[4, 5, 6]] # shape: [2, 3]
dist = tfd.Dirichlet(alpha)
dist.sample([4, 5]) # shape: [4, 5, 2, 3]
x = [.2, .3, .5]
# x will be broadcast as [[.2, .3, .5],
# [.2, .3, .5]],
# thus matching batch_shape [2, 3].
dist.prob(x) # shape: [2]
```
Compute the gradients of samples w.r.t. the parameters:
```python
alpha = tf.constant([1.0, 2.0, 3.0])
dist = tfd.Dirichlet(alpha)
samples = dist.sample(5) # Shape [5, 3]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, alpha)
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
concentration,
validate_args=False,
allow_nan_stats=True,
name="Dirichlet"):
"""Initialize a batch of Dirichlet distributions.
Args:
concentration: Positive floating-point `Tensor` indicating mean number
of class occurrences; aka "alpha". Implies `self.dtype`, and
`self.batch_shape`, `self.event_shape`, i.e., if
`concentration.shape = [N1, N2, ..., Nm, k]` then
`batch_shape = [N1, N2, ..., Nm]` and
`event_shape = [k]`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[concentration]) as name:
self._concentration = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration, name="concentration"),
validate_args)
self._total_concentration = math_ops.reduce_sum(self._concentration, -1)
super(Dirichlet, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._total_concentration],
name=name)
@property
def concentration(self):
"""Concentration parameter; expected counts for that coordinate."""
return self._concentration
@property
def total_concentration(self):
"""Sum of last dim of concentration parameter."""
return self._total_concentration
def _batch_shape_tensor(self):
return array_ops.shape(self.total_concentration)
def _batch_shape(self):
return self.total_concentration.get_shape()
def _event_shape_tensor(self):
return array_ops.shape(self.concentration)[-1:]
def _event_shape(self):
return self.concentration.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
gamma_sample = random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
dtype=self.dtype,
seed=seed)
return gamma_sample / math_ops.reduce_sum(gamma_sample, -1, keepdims=True)
@distribution_util.AppendDocstring(_dirichlet_sample_note)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
@distribution_util.AppendDocstring(_dirichlet_sample_note)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return math_ops.reduce_sum(math_ops.xlogy(self.concentration - 1., x), -1)
def _log_normalization(self):
return special_math_ops.lbeta(self.concentration)
def _entropy(self):
k = math_ops.cast(self.event_shape_tensor()[0], self.dtype)
return (
self._log_normalization()
+ ((self.total_concentration - k)
* math_ops.digamma(self.total_concentration))
- math_ops.reduce_sum(
(self.concentration - 1.) * math_ops.digamma(self.concentration),
axis=-1))
def _mean(self):
return self.concentration / self.total_concentration[..., array_ops.newaxis]
def _covariance(self):
x = self._variance_scale_term() * self._mean()
return array_ops.matrix_set_diag(
-math_ops.matmul(x[..., array_ops.newaxis],
x[..., array_ops.newaxis, :]), # outer prod
self._variance())
def _variance(self):
scale = self._variance_scale_term()
x = scale * self._mean()
return x * (scale - x)
def _variance_scale_term(self):
"""Helper to `_covariance` and `_variance` which computes a shared scale."""
return math_ops.rsqrt(1. + self.total_concentration[..., array_ops.newaxis])
@distribution_util.AppendDocstring(
"""Note: The mode is undefined when any `concentration <= 1`. If
`self.allow_nan_stats` is `True`, `NaN` is used for undefined modes. If
`self.allow_nan_stats` is `False` an exception is raised when one or more
modes are undefined.""")
def _mode(self):
k = math_ops.cast(self.event_shape_tensor()[0], self.dtype)
mode = (self.concentration - 1.) / (
self.total_concentration[..., array_ops.newaxis] - k)
if self.allow_nan_stats:
nan = array_ops.fill(
array_ops.shape(mode),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(
math_ops.reduce_all(self.concentration > 1., axis=-1),
mode, nan)
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype),
self.concentration,
message="Mode undefined when any concentration <= 1"),
], mode)
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
check_ops.assert_rank_at_least(
concentration, 1,
message="Concentration parameter must have >=1 dimensions."),
check_ops.assert_less(
1, array_ops.shape(concentration)[-1],
message="Concentration parameter must have event_size >= 2."),
], concentration)
def _maybe_assert_valid_sample(self, x):
"""Checks the validity of a sample."""
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x, message="samples must be positive"),
check_ops.assert_near(
array_ops.ones([], dtype=self.dtype),
math_ops.reduce_sum(x, -1),
message="sample last-dimension must sum to `1`"),
], x)
@kullback_leibler.RegisterKL(Dirichlet, Dirichlet)
def _kl_dirichlet_dirichlet(d1, d2, name=None):
"""Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet.
Args:
d1: instance of a Dirichlet distribution object.
d2: instance of a Dirichlet distribution object.
name: (optional) Name to use for created operations.
default is "kl_dirichlet_dirichlet".
Returns:
Batchwise KL(d1 || d2)
"""
with ops.name_scope(name, "kl_dirichlet_dirichlet", values=[
d1.concentration, d2.concentration]):
# The KL between Dirichlet distributions can be derived as follows. We have
#
# Dir(x; a) = 1 / B(a) * prod_i[x[i]^(a[i] - 1)]
#
# where B(a) is the multivariate Beta function:
#
# B(a) = Gamma(a[1]) * ... * Gamma(a[n]) / Gamma(a[1] + ... + a[n])
#
# The KL is
#
# KL(Dir(x; a), Dir(x; b)) = E_Dir(x; a){log(Dir(x; a) / Dir(x; b))}
#
# so we'll need to know the log density of the Dirichlet. This is
#
# log(Dir(x; a)) = sum_i[(a[i] - 1) log(x[i])] - log B(a)
#
# The only term that matters for the expectations is the log(x[i]). To
# compute the expectation of this term over the Dirichlet density, we can
# use the following facts about the Dirichlet in exponential family form:
# 1. log(x[i]) is a sufficient statistic
# 2. expected sufficient statistics (of any exp family distribution) are
# equal to derivatives of the log normalizer with respect to
# corresponding natural parameters: E{T[i](x)} = dA/d(eta[i])
#
# To proceed, we can rewrite the Dirichlet density in exponential family
# form as follows:
#
# Dir(x; a) = exp{eta(a) . T(x) - A(a)}
#
# where '.' is the dot product of vectors eta and T, and A is a scalar:
#
# eta[i](a) = a[i] - 1
# T[i](x) = log(x[i])
# A(a) = log B(a)
#
# Now, we can use fact (2) above to write
#
# E_Dir(x; a)[log(x[i])]
# = dA(a) / da[i]
# = d/da[i] log B(a)
# = d/da[i] (sum_j lgamma(a[j])) - lgamma(sum_j a[j])
# = digamma(a[i])) - digamma(sum_j a[j])
#
# Putting it all together, we have
#
# KL[Dir(x; a) || Dir(x; b)]
# = E_Dir(x; a){log(Dir(x; a) / Dir(x; b)}
# = E_Dir(x; a){sum_i[(a[i] - b[i]) log(x[i])} - (lbeta(a) - lbeta(b))
# = sum_i[(a[i] - b[i]) * E_Dir(x; a){log(x[i])}] - lbeta(a) + lbeta(b)
# = sum_i[(a[i] - b[i]) * (digamma(a[i]) - digamma(sum_j a[j]))]
# - lbeta(a) + lbeta(b))
digamma_sum_d1 = math_ops.digamma(
math_ops.reduce_sum(d1.concentration, axis=-1, keepdims=True))
digamma_diff = math_ops.digamma(d1.concentration) - digamma_sum_d1
concentration_diff = d1.concentration - d2.concentration
return (math_ops.reduce_sum(concentration_diff * digamma_diff, axis=-1) -
special_math_ops.lbeta(d1.concentration) +
special_math_ops.lbeta(d2.concentration))
|
tensorflow-master
|
tensorflow/python/ops/distributions/dirichlet.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Transformed Distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import identity_bijector
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
__all__ = [
"TransformedDistribution",
]
# The following helper functions attempt to statically perform a TF operation.
# These functions make debugging easier since we can do more validation during
# graph construction.
def _static_value(x):
"""Returns the static value of a `Tensor` or `None`."""
return tensor_util.constant_value(ops.convert_to_tensor(x))
def _logical_and(*args):
"""Convenience function which attempts to statically `reduce_all`."""
args_ = [_static_value(x) for x in args]
if any(x is not None and not bool(x) for x in args_):
return constant_op.constant(False)
if all(x is not None and bool(x) for x in args_):
return constant_op.constant(True)
if len(args) == 2:
return math_ops.logical_and(*args)
return math_ops.reduce_all(args)
def _logical_equal(x, y):
"""Convenience function which attempts to statically compute `x == y`."""
x_ = _static_value(x)
y_ = _static_value(y)
if x_ is None or y_ is None:
return math_ops.equal(x, y)
return constant_op.constant(np.array_equal(x_, y_))
def _logical_not(x):
"""Convenience function which attempts to statically apply `logical_not`."""
x_ = _static_value(x)
if x_ is None:
return math_ops.logical_not(x)
return constant_op.constant(np.logical_not(x_))
def _concat_vectors(*args):
"""Convenience function which concatenates input vectors."""
args_ = [_static_value(x) for x in args]
if any(x_ is None for x_ in args_):
return array_ops.concat(args, 0)
return constant_op.constant([x_ for vec_ in args_ for x_ in vec_])
def _pick_scalar_condition(pred, cond_true, cond_false):
"""Convenience function which chooses the condition based on the predicate."""
# Note: This function is only valid if all of pred, cond_true, and cond_false
# are scalars. This means its semantics are arguably more like tf.cond than
# tf.select even though we use tf.select to implement it.
pred_ = _static_value(pred)
if pred_ is None:
return array_ops.where(pred, cond_true, cond_false)
return cond_true if pred_ else cond_false
def _ones_like(x):
"""Convenience function attempts to statically construct `ones_like`."""
# Should only be used for small vectors.
if x.get_shape().is_fully_defined():
return array_ops.ones(x.get_shape().as_list(), dtype=x.dtype)
return array_ops.ones_like(x)
def _ndims_from_shape(shape):
"""Returns `Tensor`'s `rank` implied by a `Tensor` shape."""
if shape.get_shape().ndims not in (None, 1):
raise ValueError("input is not a valid shape: not 1D")
if not shape.dtype.is_integer:
raise TypeError("input is not a valid shape: wrong dtype")
if shape.get_shape().is_fully_defined():
return constant_op.constant(shape.get_shape().as_list()[0])
return array_ops.shape(shape)[0]
def _is_scalar_from_shape(shape):
"""Returns `True` `Tensor` if `Tensor` shape implies a scalar."""
return _logical_equal(_ndims_from_shape(shape), 0)
class TransformedDistribution(distribution_lib.Distribution):
"""A Transformed Distribution.
A `TransformedDistribution` models `p(y)` given a base distribution `p(x)`,
and a deterministic, invertible, differentiable transform, `Y = g(X)`. The
transform is typically an instance of the `Bijector` class and the base
distribution is typically an instance of the `Distribution` class.
A `Bijector` is expected to implement the following functions:
- `forward`,
- `inverse`,
- `inverse_log_det_jacobian`.
The semantics of these functions are outlined in the `Bijector` documentation.
We now describe how a `TransformedDistribution` alters the input/outputs of a
`Distribution` associated with a random variable (rv) `X`.
Write `cdf(Y=y)` for an absolutely continuous cumulative distribution function
of random variable `Y`; write the probability density function `pdf(Y=y) :=
d^k / (dy_1,...,dy_k) cdf(Y=y)` for its derivative wrt to `Y` evaluated at
`y`. Assume that `Y = g(X)` where `g` is a deterministic diffeomorphism,
i.e., a non-random, continuous, differentiable, and invertible function.
Write the inverse of `g` as `X = g^{-1}(Y)` and `(J o g)(x)` for the Jacobian
of `g` evaluated at `x`.
A `TransformedDistribution` implements the following operations:
* `sample`
Mathematically: `Y = g(X)`
Programmatically: `bijector.forward(distribution.sample(...))`
* `log_prob`
Mathematically: `(log o pdf)(Y=y) = (log o pdf o g^{-1})(y)
+ (log o abs o det o J o g^{-1})(y)`
Programmatically: `(distribution.log_prob(bijector.inverse(y))
+ bijector.inverse_log_det_jacobian(y))`
* `log_cdf`
Mathematically: `(log o cdf)(Y=y) = (log o cdf o g^{-1})(y)`
Programmatically: `distribution.log_cdf(bijector.inverse(x))`
* and similarly for: `cdf`, `prob`, `log_survival_function`,
`survival_function`.
A simple example constructing a Log-Normal distribution from a Normal
distribution:
```python
ds = tfp.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Exp(),
name="LogNormalTransformedDistribution")
```
A `LogNormal` made from callables:
```python
ds = tfp.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Inline(
forward_fn=tf.exp,
inverse_fn=tf.math.log,
inverse_log_det_jacobian_fn=(
lambda y: -tf.reduce_sum(tf.math.log(y), axis=-1)),
name="LogNormalTransformedDistribution")
```
Another example constructing a Normal from a StandardNormal:
```python
ds = tfp.distributions
normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Affine(
shift=-1.,
scale_identity_multiplier=2.)
name="NormalTransformedDistribution")
```
A `TransformedDistribution`'s batch- and event-shape are implied by the base
distribution unless explicitly overridden by `batch_shape` or `event_shape`
arguments. Specifying an overriding `batch_shape` (`event_shape`) is
permitted only if the base distribution has scalar batch-shape (event-shape).
The bijector is applied to the distribution as if the distribution possessed
the overridden shape(s). The following example demonstrates how to construct a
multivariate Normal as a `TransformedDistribution`.
```python
ds = tfp.distributions
# We will create two MVNs with batch_shape = event_shape = 2.
mean = [[-1., 0], # batch:0
[0., 1]] # batch:1
chol_cov = [[[1., 0],
[0, 1]], # batch:0
[[1, 0],
[2, 2]]] # batch:1
mvn1 = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Affine(shift=mean, scale_tril=chol_cov),
batch_shape=[2], # Valid because base_distribution.batch_shape == [].
event_shape=[2]) # Valid because base_distribution.event_shape == [].
mvn2 = ds.MultivariateNormalTriL(loc=mean, scale_tril=chol_cov)
# mvn1.log_prob(x) == mvn2.log_prob(x)
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
distribution,
bijector=None,
batch_shape=None,
event_shape=None,
validate_args=False,
name=None):
"""Construct a Transformed Distribution.
Args:
distribution: The base distribution instance to transform. Typically an
instance of `Distribution`.
bijector: The object responsible for calculating the transformation.
Typically an instance of `Bijector`. `None` means `Identity()`.
batch_shape: `integer` vector `Tensor` which overrides `distribution`
`batch_shape`; valid only if `distribution.is_scalar_batch()`.
event_shape: `integer` vector `Tensor` which overrides `distribution`
`event_shape`; valid only if `distribution.is_scalar_event()`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class. Default:
`bijector.name + distribution.name`.
"""
parameters = dict(locals())
name = name or (("" if bijector is None else bijector.name) +
distribution.name)
with ops.name_scope(name, values=[event_shape, batch_shape]) as name:
# For convenience we define some handy constants.
self._zero = constant_op.constant(0, dtype=dtypes.int32, name="zero")
self._empty = constant_op.constant([], dtype=dtypes.int32, name="empty")
if bijector is None:
bijector = identity_bijector.Identity(validate_args=validate_args)
# We will keep track of a static and dynamic version of
# self._is_{batch,event}_override. This way we can do more prior to graph
# execution, including possibly raising Python exceptions.
self._override_batch_shape = self._maybe_validate_shape_override(
batch_shape, distribution.is_scalar_batch(), validate_args,
"batch_shape")
self._is_batch_override = _logical_not(_logical_equal(
_ndims_from_shape(self._override_batch_shape), self._zero))
self._is_maybe_batch_override = bool(
tensor_util.constant_value(self._override_batch_shape) is None or
tensor_util.constant_value(self._override_batch_shape).size != 0)
self._override_event_shape = self._maybe_validate_shape_override(
event_shape, distribution.is_scalar_event(), validate_args,
"event_shape")
self._is_event_override = _logical_not(_logical_equal(
_ndims_from_shape(self._override_event_shape), self._zero))
self._is_maybe_event_override = bool(
tensor_util.constant_value(self._override_event_shape) is None or
tensor_util.constant_value(self._override_event_shape).size != 0)
# To convert a scalar distribution into a multivariate distribution we
# will draw dims from the sample dims, which are otherwise iid. This is
# easy to do except in the case that the base distribution has batch dims
# and we're overriding event shape. When that case happens the event dims
# will incorrectly be to the left of the batch dims. In this case we'll
# cyclically permute left the new dims.
self._needs_rotation = _logical_and(
self._is_event_override,
_logical_not(self._is_batch_override),
_logical_not(distribution.is_scalar_batch()))
override_event_ndims = _ndims_from_shape(self._override_event_shape)
self._rotate_ndims = _pick_scalar_condition(
self._needs_rotation, override_event_ndims, 0)
# We'll be reducing the head dims (if at all), i.e., this will be []
# if we don't need to reduce.
self._reduce_event_indices = math_ops.range(
self._rotate_ndims - override_event_ndims, self._rotate_ndims)
self._distribution = distribution
self._bijector = bijector
super(TransformedDistribution, self).__init__(
dtype=self._distribution.dtype,
reparameterization_type=self._distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=self._distribution.allow_nan_stats,
parameters=parameters,
# We let TransformedDistribution access _graph_parents since this class
# is more like a baseclass than derived.
graph_parents=(distribution._graph_parents + # pylint: disable=protected-access
bijector.graph_parents),
name=name)
@property
def distribution(self):
"""Base distribution, p(x)."""
return self._distribution
@property
def bijector(self):
"""Function transforming x => y."""
return self._bijector
def _event_shape_tensor(self):
return self.bijector.forward_event_shape_tensor(
distribution_util.pick_vector(
self._is_event_override,
self._override_event_shape,
self.distribution.event_shape_tensor()))
def _event_shape(self):
# If there's a chance that the event_shape has been overridden, we return
# what we statically know about the `event_shape_override`. This works
# because: `_is_maybe_event_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `event_shape` or we do.
#
# Since the `bijector` may change the `event_shape`, we then forward what we
# know to the bijector. This allows the `bijector` to have final say in the
# `event_shape`.
static_override = tensor_util.constant_value_as_shape(
self._override_event_shape)
return self.bijector.forward_event_shape(
static_override
if self._is_maybe_event_override
else self.distribution.event_shape)
def _batch_shape_tensor(self):
return distribution_util.pick_vector(
self._is_batch_override,
self._override_batch_shape,
self.distribution.batch_shape_tensor())
def _batch_shape(self):
# If there's a chance that the batch_shape has been overridden, we return
# what we statically know about the `batch_shape_override`. This works
# because: `_is_maybe_batch_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `batch_shape` or we do.
#
# Notice that this implementation parallels the `_event_shape` except that
# the `bijector` doesn't get to alter the `batch_shape`. Recall that
# `batch_shape` is a property of a distribution while `event_shape` is
# shared between both the `distribution` instance and the `bijector`.
static_override = tensor_util.constant_value_as_shape(
self._override_batch_shape)
return (static_override
if self._is_maybe_batch_override
else self.distribution.batch_shape)
def _sample_n(self, n, seed=None):
sample_shape = _concat_vectors(
distribution_util.pick_vector(self._needs_rotation, self._empty, [n]),
self._override_batch_shape,
self._override_event_shape,
distribution_util.pick_vector(self._needs_rotation, [n], self._empty))
x = self.distribution.sample(sample_shape=sample_shape, seed=seed)
x = self._maybe_rotate_dims(x)
# We'll apply the bijector in the `_call_sample_n` function.
return x
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
# We override `_call_sample_n` rather than `_sample_n` so we can ensure that
# the result of `self.bijector.forward` is not modified (and thus caching
# works).
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
# First, generate samples. We will possibly generate extra samples in the
# event that we need to reinterpret the samples as part of the
# event_shape.
x = self._sample_n(n, seed, **kwargs)
# Next, we reshape `x` into its final form. We do this prior to the call
# to the bijector to ensure that the bijector caching works.
batch_event_shape = array_ops.shape(x)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
x = array_ops.reshape(x, final_shape)
# Finally, we apply the bijector's forward transformation. For caching to
# work, it is imperative that this is the last modification to the
# returned result.
y = self.bijector.forward(x, **kwargs)
y = self._set_sample_static_shape(y, sample_shape)
return y
def _log_prob(self, y):
# For caching to work, it is imperative that the bijector is the first to
# modify the input.
x = self.bijector.inverse(y)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(y, event_ndims=event_ndims)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_log_prob_for_one_fiber(y, x, ildj, event_ndims)
lp_on_fibers = [
self._finish_log_prob_for_one_fiber(y, x_i, ildj_i, event_ndims)
for x_i, ildj_i in zip(x, ildj)]
return math_ops.reduce_logsumexp(array_ops.stack(lp_on_fibers), axis=0)
def _finish_log_prob_for_one_fiber(self, y, x, ildj, event_ndims):
"""Finish computation of log_prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
log_prob = self.distribution.log_prob(x)
if self._is_maybe_event_override:
log_prob = math_ops.reduce_sum(log_prob, self._reduce_event_indices)
log_prob += math_ops.cast(ildj, log_prob.dtype)
if self._is_maybe_event_override and isinstance(event_ndims, int):
log_prob.set_shape(
array_ops.broadcast_static_shape(
y.get_shape().with_rank_at_least(1)[:-event_ndims],
self.batch_shape))
return log_prob
def _prob(self, y):
x = self.bijector.inverse(y)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(y, event_ndims=event_ndims)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_prob_for_one_fiber(y, x, ildj, event_ndims)
prob_on_fibers = [
self._finish_prob_for_one_fiber(y, x_i, ildj_i, event_ndims)
for x_i, ildj_i in zip(x, ildj)]
return sum(prob_on_fibers)
def _finish_prob_for_one_fiber(self, y, x, ildj, event_ndims):
"""Finish computation of prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
prob = self.distribution.prob(x)
if self._is_maybe_event_override:
prob = math_ops.reduce_prod(prob, self._reduce_event_indices)
prob *= math_ops.exp(math_ops.cast(ildj, prob.dtype))
if self._is_maybe_event_override and isinstance(event_ndims, int):
prob.set_shape(
array_ops.broadcast_static_shape(
y.get_shape().with_rank_at_least(1)[:-event_ndims],
self.batch_shape))
return prob
def _log_cdf(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("log_cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_cdf is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.log_cdf(x)
def _cdf(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("cdf is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.cdf(x)
def _log_survival_function(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("log_survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_survival_function is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.log_survival_function(x)
def _survival_function(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("survival_function is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.survival_function(x)
def _quantile(self, value):
if self._is_maybe_event_override:
raise NotImplementedError("quantile is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("quantile is not implemented when "
"bijector is not injective.")
# x_q is the "qth quantile" of X iff q = P[X <= x_q]. Now, since X =
# g^{-1}(Y), q = P[X <= x_q] = P[g^{-1}(Y) <= x_q] = P[Y <= g(x_q)],
# implies the qth quantile of Y is g(x_q).
inv_cdf = self.distribution.quantile(value)
return self.bijector.forward(inv_cdf)
def _entropy(self):
if not self.bijector.is_constant_jacobian:
raise NotImplementedError("entropy is not implemented")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("entropy is not implemented when "
"bijector is not injective.")
# Suppose Y = g(X) where g is a diffeomorphism and X is a continuous rv. It
# can be shown that:
# H[Y] = H[X] + E_X[(log o abs o det o J o g)(X)].
# If is_constant_jacobian then:
# E_X[(log o abs o det o J o g)(X)] = (log o abs o det o J o g)(c)
# where c can by anything.
entropy = self.distribution.entropy()
if self._is_maybe_event_override:
# H[X] = sum_i H[X_i] if X_i are mutually independent.
# This means that a reduce_sum is a simple rescaling.
entropy *= math_ops.cast(math_ops.reduce_prod(self._override_event_shape),
dtype=entropy.dtype.base_dtype)
if self._is_maybe_batch_override:
new_shape = array_ops.concat([
_ones_like(self._override_batch_shape),
self.distribution.batch_shape_tensor()
], 0)
entropy = array_ops.reshape(entropy, new_shape)
multiples = array_ops.concat([
self._override_batch_shape,
_ones_like(self.distribution.batch_shape_tensor())
], 0)
entropy = array_ops.tile(entropy, multiples)
dummy = array_ops.zeros(
shape=array_ops.concat(
[self.batch_shape_tensor(), self.event_shape_tensor()],
0),
dtype=self.dtype)
event_ndims = (self.event_shape.ndims if self.event_shape.ndims is not None
else array_ops.size(self.event_shape_tensor()))
ildj = self.bijector.inverse_log_det_jacobian(
dummy, event_ndims=event_ndims)
entropy -= math_ops.cast(ildj, entropy.dtype)
entropy.set_shape(self.batch_shape)
return entropy
def _maybe_validate_shape_override(self, override_shape, base_is_scalar,
validate_args, name):
"""Helper to __init__ which ensures override batch/event_shape are valid."""
if override_shape is None:
override_shape = []
override_shape = ops.convert_to_tensor(override_shape, dtype=dtypes.int32,
name=name)
if not override_shape.dtype.is_integer:
raise TypeError("shape override must be an integer")
override_is_scalar = _is_scalar_from_shape(override_shape)
if tensor_util.constant_value(override_is_scalar):
return self._empty
dynamic_assertions = []
if override_shape.get_shape().ndims is not None:
if override_shape.get_shape().ndims != 1:
raise ValueError("shape override must be a vector")
elif validate_args:
dynamic_assertions += [check_ops.assert_rank(
override_shape, 1,
message="shape override must be a vector")]
if tensor_util.constant_value(override_shape) is not None:
if any(s <= 0 for s in tensor_util.constant_value(override_shape)):
raise ValueError("shape override must have positive elements")
elif validate_args:
dynamic_assertions += [check_ops.assert_positive(
override_shape,
message="shape override must have positive elements")]
is_both_nonscalar = _logical_and(_logical_not(base_is_scalar),
_logical_not(override_is_scalar))
if tensor_util.constant_value(is_both_nonscalar) is not None:
if tensor_util.constant_value(is_both_nonscalar):
raise ValueError("base distribution not scalar")
elif validate_args:
dynamic_assertions += [check_ops.assert_equal(
is_both_nonscalar, False,
message="base distribution not scalar")]
if not dynamic_assertions:
return override_shape
return control_flow_ops.with_dependencies(
dynamic_assertions, override_shape)
def _maybe_rotate_dims(self, x, rotate_right=False):
"""Helper which rolls left event_dims left or right event_dims right."""
needs_rotation_const = tensor_util.constant_value(self._needs_rotation)
if needs_rotation_const is not None and not needs_rotation_const:
return x
ndims = array_ops.rank(x)
n = (ndims - self._rotate_ndims) if rotate_right else self._rotate_ndims
return array_ops.transpose(
x, _concat_vectors(math_ops.range(n, ndims), math_ops.range(0, n)))
def _maybe_get_static_event_ndims(self):
if self.event_shape.ndims is not None:
return self.event_shape.ndims
event_ndims = array_ops.size(self.event_shape_tensor())
event_ndims_ = distribution_util.maybe_get_static_value(event_ndims)
if event_ndims_ is not None:
return event_ndims_
return event_ndims
|
tensorflow-master
|
tensorflow/python/ops/distributions/transformed_distribution.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Student's t distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"StudentT",
"StudentTWithAbsDfSoftplusScale",
]
@tf_export(v1=["distributions.StudentT"])
class StudentT(distribution.Distribution):
"""Student's t-distribution.
This distribution has parameters: degree of freedom `df`, location `loc`,
and `scale`.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; df, mu, sigma) = (1 + y**2 / df)**(-0.5 (df + 1)) / Z
where,
y = (x - mu) / sigma
Z = abs(sigma) sqrt(df pi) Gamma(0.5 df) / Gamma(0.5 (df + 1))
```
where:
* `loc = mu`,
* `scale = sigma`, and,
* `Z` is the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The StudentT distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ StudentT(df, loc=0, scale=1)
Y = loc + scale * X
```
Notice that `scale` has semantics more similar to standard deviation than
variance. However it is not actually the std. deviation; the Student's
t-distribution std. dev. is `scale sqrt(df / (df - 2))` when `df > 2`.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Student t distribution.
single_dist = tfd.StudentT(df=3)
# Evaluate the pdf at 1, returning a scalar Tensor.
single_dist.prob(1.)
# Define a batch of two scalar valued Student t's.
# The first has degrees of freedom 2, mean 1, and scale 11.
# The second 3, 2 and 22.
multi_dist = tfd.StudentT(df=[2, 3], loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
multi_dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
multi_dist.sample(3)
```
Arguments are broadcast when possible.
```python
# Define a batch of two Student's t distributions.
# Both have df 2 and mean 1, but different scales.
dist = tfd.StudentT(df=2, loc=1, scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
Compute the gradients of samples w.r.t. the parameters:
```python
df = tf.constant(2.0)
loc = tf.constant(2.0)
scale = tf.constant(11.0)
dist = tfd.StudentT(df=df, loc=loc, scale=scale)
samples = dist.sample(5) # Shape [5]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, [df, loc, scale])
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
df,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="StudentT"):
"""Construct Student's t distributions.
The distributions have degree of freedom `df`, mean `loc`, and scale
`scale`.
The parameters `df`, `loc`, and `scale` must be shaped in a way that
supports broadcasting (e.g. `df + loc + scale` is a valid operation).
Args:
df: Floating-point `Tensor`. The degrees of freedom of the
distribution(s). `df` must contain only positive values.
loc: Floating-point `Tensor`. The mean(s) of the distribution(s).
scale: Floating-point `Tensor`. The scaling factor(s) for the
distribution(s). Note that `scale` is not technically the standard
deviation of this distribution but has semantics more similar to
standard deviation than variance.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[df, loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(df)]
if validate_args else []):
self._df = array_ops.identity(df, name="df")
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype(
(self._df, self._loc, self._scale))
super(StudentT, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._df, self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("df", "loc", "scale"), (
[ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 3)))
@property
def df(self):
"""Degrees of freedom in these Student's t distribution(s)."""
return self._df
@property
def loc(self):
"""Locations of these Student's t distribution(s)."""
return self._loc
@property
def scale(self):
"""Scaling factors of these Student's t distribution(s)."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.df),
array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale)))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
array_ops.broadcast_static_shape(self.df.get_shape(),
self.loc.get_shape()),
self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=math_ops.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# The sampling method comes from the fact that if:
# X ~ Normal(0, 1)
# Z ~ Chi2(df)
# Y = X / sqrt(Z / df)
# then:
# Y ~ StudentT(df).
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed)
df = self.df * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
gamma_sample = random_ops.random_gamma(
[n],
0.5 * df,
beta=0.5,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, salt="student_t"))
samples = normal_sample * math_ops.rsqrt(gamma_sample / df)
return samples * self.scale + self.loc # Abs(scale) not wanted.
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_unnormalized_prob(self, x):
y = (x - self.loc) / self.scale # Abs(scale) superfluous.
return -0.5 * (self.df + 1.) * math_ops.log1p(y**2. / self.df)
def _log_normalization(self):
return (math_ops.log(math_ops.abs(self.scale)) +
0.5 * math_ops.log(self.df) +
0.5 * np.log(np.pi) +
math_ops.lgamma(0.5 * self.df) -
math_ops.lgamma(0.5 * (self.df + 1.)))
def _cdf(self, x):
# Take Abs(scale) to make subsequent where work correctly.
y = (x - self.loc) / math_ops.abs(self.scale)
x_t = self.df / (y**2. + self.df)
neg_cdf = 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t)
return array_ops.where(math_ops.less(y, 0.), neg_cdf, 1. - neg_cdf)
def _entropy(self):
v = array_ops.ones(self.batch_shape_tensor(),
dtype=self.dtype)[..., array_ops.newaxis]
u = v * self.df[..., array_ops.newaxis]
beta_arg = array_ops.concat([u, v], -1) / 2.
return (math_ops.log(math_ops.abs(self.scale)) +
0.5 * math_ops.log(self.df) +
special_math_ops.lbeta(beta_arg) +
0.5 * (self.df + 1.) *
(math_ops.digamma(0.5 * (self.df + 1.)) -
math_ops.digamma(0.5 * self.df)))
@distribution_util.AppendDocstring(
"""The mean of Student's T equals `loc` if `df > 1`, otherwise it is
`NaN`. If `self.allow_nan_stats=True`, then an exception will be raised
rather than returning `NaN`.""")
def _mean(self):
mean = self.loc * array_ops.ones(self.batch_shape_tensor(),
dtype=self.dtype)
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
math_ops.greater(
self.df,
array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)),
mean,
array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies(
[
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.df,
message="mean not defined for components of df <= 1"),
],
mean)
@distribution_util.AppendDocstring("""
The variance for Student's T equals
```
df / (df - 2), when df > 2
infinity, when 1 < df <= 2
NaN, when df <= 1
```
""")
def _variance(self):
# We need to put the tf.where inside the outer tf.where to ensure we never
# hit a NaN in the gradient.
denom = array_ops.where(math_ops.greater(self.df, 2.),
self.df - 2.,
array_ops.ones_like(self.df))
# Abs(scale) superfluous.
var = (array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) *
math_ops.square(self.scale) * self.df / denom)
# When 1 < df <= 2, variance is infinite.
inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
result_where_defined = array_ops.where(
self.df > array_ops.fill(self.batch_shape_tensor(), 2.),
var,
array_ops.fill(self.batch_shape_tensor(), inf, name="inf"))
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
math_ops.greater(
self.df,
array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)),
result_where_defined,
array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies(
[
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.df,
message="variance not defined for components of df <= 1"),
],
result_where_defined)
def _mode(self):
return array_ops.identity(self.loc)
class StudentTWithAbsDfSoftplusScale(StudentT):
"""StudentT with `df = floor(abs(df))` and `scale = softplus(scale)`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.StudentT(tf.floor(tf.abs(df)), loc, "
"tf.nn.softplus(scale)) instead.",
warn_once=True)
def __init__(self,
df,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="StudentTWithAbsDfSoftplusScale"):
parameters = dict(locals())
with ops.name_scope(name, values=[df, scale]) as name:
super(StudentTWithAbsDfSoftplusScale, self).__init__(
df=math_ops.floor(math_ops.abs(df)),
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-master
|
tensorflow/python/ops/distributions/student_t.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bijector base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.ops.distributions.bijector_impl import Bijector
# pylint: enable=wildcard-import,unused-import
|
tensorflow-master
|
tensorflow/python/ops/distributions/bijector.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal (Gaussian) distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import special_math
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Normal",
"NormalWithSoftplusScale",
]
@tf_export(v1=["distributions.Normal"])
class Normal(distribution.Distribution):
"""The Normal distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; mu, sigma) = exp(-0.5 (x - mu)**2 / sigma**2) / Z
Z = (2 pi sigma**2)**0.5
```
where `loc = mu` is the mean, `scale = sigma` is the std. deviation, and, `Z`
is the normalization constant.
The Normal distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Normal(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Normal distribution.
dist = tfd.Normal(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = tfd.Normal(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations.
dist = tfd.Normal(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Normal"):
"""Construct Normal distributions with mean and stddev `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor; the means of the distribution(s).
scale: Floating point tensor; the stddevs of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` have different `dtype`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Normal, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the mean."""
return self._loc
@property
def scale(self):
"""Distribution parameter for standard deviation."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc),
array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(),
self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
sampled = random_ops.random_normal(
shape=shape, mean=0., stddev=1., dtype=self.loc.dtype, seed=seed)
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_cdf(self, x):
return special_math.log_ndtr(self._z(x))
def _cdf(self, x):
return special_math.ndtr(self._z(x))
def _log_survival_function(self, x):
return special_math.log_ndtr(-self._z(x))
def _survival_function(self, x):
return special_math.ndtr(-self._z(x))
def _log_unnormalized_prob(self, x):
return -0.5 * math_ops.square(self._z(x))
def _log_normalization(self):
return 0.5 * math.log(2. * math.pi) + math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast scale.
scale = self.scale * array_ops.ones_like(self.loc)
return 0.5 * math.log(2. * math.pi * math.e) + math_ops.log(scale)
def _mean(self):
return self.loc * array_ops.ones_like(self.scale)
def _quantile(self, p):
return self._inv_z(special_math.ndtri(p))
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit normal."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
def _inv_z(self, z):
"""Reconstruct input `x` from a its normalized version."""
with ops.name_scope("reconstruct", values=[z]):
return z * self.scale + self.loc
class NormalWithSoftplusScale(Normal):
"""Normal with softplus applied to `scale`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Normal(loc, tf.nn.softplus(scale)) "
"instead.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="NormalWithSoftplusScale"):
parameters = dict(locals())
with ops.name_scope(name, values=[scale]) as name:
super(NormalWithSoftplusScale, self).__init__(
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Normal, Normal)
def _kl_normal_normal(n_a, n_b, name=None):
"""Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
"""
with ops.name_scope(name, "kl_normal_normal", [n_a.loc, n_b.loc]):
one = constant_op.constant(1, dtype=n_a.dtype)
two = constant_op.constant(2, dtype=n_a.dtype)
half = constant_op.constant(0.5, dtype=n_a.dtype)
s_a_squared = math_ops.square(n_a.scale)
s_b_squared = math_ops.square(n_b.scale)
ratio = s_a_squared / s_b_squared
return (math_ops.squared_difference(n_a.loc, n_b.loc) / (two * s_b_squared)
+ half * (ratio - one - math_ops.log(ratio)))
|
tensorflow-master
|
tensorflow/python/ops/distributions/normal.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import hashlib
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util import tf_inspect
def assert_integer_form(x,
data=None,
summarize=None,
message=None,
int_dtype=None,
name="assert_integer_form"):
"""Assert that x has integer components (or floats equal to integers).
Args:
x: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
int_dtype: A `tf.dtype` used to cast the float to. The default (`None`)
implies the smallest possible signed int will be used for casting.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.
"""
with ops.name_scope(name, values=[x, data]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return control_flow_ops.no_op()
message = message or "{} has non-integer components".format(x)
if int_dtype is None:
try:
int_dtype = {
dtypes.float16: dtypes.int16,
dtypes.float32: dtypes.int32,
dtypes.float64: dtypes.int64,
}[x.dtype.base_dtype]
except KeyError:
raise TypeError("Unrecognized type {}".format(x.dtype.name))
return check_ops.assert_equal(
x,
math_ops.cast(math_ops.cast(x, int_dtype), x.dtype),
data=data,
summarize=summarize,
message=message,
name=name)
def assert_symmetric(matrix):
matrix_t = array_ops.matrix_transpose(matrix)
return control_flow_ops.with_dependencies(
[check_ops.assert_equal(matrix, matrix_t)], matrix)
def embed_check_nonnegative_integer_form(
x, name="embed_check_nonnegative_integer_form"):
"""Assert x is a non-negative tensor, and optionally of integers."""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
assertions = [
check_ops.assert_non_negative(
x, message="'{}' must be non-negative.".format(x)),
]
if not x.dtype.is_integer:
assertions += [
assert_integer_form(
x,
message="'{}' cannot contain fractional components.".format(x)),
]
return control_flow_ops.with_dependencies(assertions, x)
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
"""
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
# Here we can't just do math_ops.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in math_ops.equal.
def all_shapes_equal():
return math_ops.reduce_all(
math_ops.equal(
array_ops.concat(
[array_ops.shape(a), array_ops.shape(b)], 0),
array_ops.concat(
[array_ops.shape(b), array_ops.shape(a)], 0)))
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(a), array_ops.rank(b)),
all_shapes_equal, lambda: constant_op.constant(False))
def maybe_get_static_value(x, dtype=None):
"""Helper which tries to return a static value.
Given `x`, extract it's value statically, optionally casting to a specific
dtype. If this is not possible, None is returned.
Args:
x: `Tensor` for which to extract a value statically.
dtype: Optional dtype to cast to.
Returns:
Statically inferred value if possible, otherwise None.
"""
if x is None:
return x
try:
# This returns an np.ndarray.
x_ = tensor_util.constant_value(x)
except TypeError:
x_ = x
if x_ is None or dtype is None:
return x_
return np.array(x_, dtype)
def get_logits_and_probs(logits=None,
probs=None,
multidimensional=False,
validate_args=False,
name="get_logits_and_probs",
dtype=None):
"""Converts logit to probabilities (or vice-versa), and returns both.
Args:
logits: Floating-point `Tensor` representing log-odds.
probs: Floating-point `Tensor` representing probabilities.
multidimensional: Python `bool`, default `False`. If `True`, represents
whether the last dimension of `logits` or `probs`, a `[N1, N2, ... k]`
dimensional tensor, representing the logit or probability of `shape[-1]`
classes.
validate_args: Python `bool`, default `False`. When `True`, either assert `0
<= probs <= 1` (if not `multidimensional`) or that the last dimension of
`probs` sums to one.
name: A name for this operation (optional).
dtype: `tf.DType` to prefer when converting args to `Tensor`s.
Returns:
logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or
`1`, then the corresponding entry in the returned logit will be `-Inf` and
`Inf` respectively.
Raises:
ValueError: if neither `probs` nor `logits` were passed in, or both were.
"""
with ops.name_scope(name, values=[probs, logits]):
if (probs is None) == (logits is None):
raise ValueError("Must pass probs or logits, but not both.")
if probs is None:
logits = ops.convert_to_tensor(logits, name="logits", dtype=dtype)
if not logits.dtype.is_floating:
raise TypeError("logits must having floating type.")
# We can early return since we constructed probs and therefore know
# they're valid.
if multidimensional:
if validate_args:
logits = embed_check_categorical_event_shape(logits)
return logits, nn.softmax(logits, name="probs")
return logits, math_ops.sigmoid(logits, name="probs")
probs = ops.convert_to_tensor(probs, name="probs", dtype=dtype)
if not probs.dtype.is_floating:
raise TypeError("probs must having floating type.")
if validate_args:
with ops.name_scope("validate_probs"):
one = constant_op.constant(1., probs.dtype)
dependencies = [check_ops.assert_non_negative(probs)]
if multidimensional:
probs = embed_check_categorical_event_shape(probs)
dependencies += [
check_ops.assert_near(
math_ops.reduce_sum(probs, -1),
one,
message="probs does not sum to 1.")
]
else:
dependencies += [
check_ops.assert_less_equal(
probs, one, message="probs has components greater than 1.")
]
probs = control_flow_ops.with_dependencies(dependencies, probs)
with ops.name_scope("logits"):
if multidimensional:
# Here we don't compute the multidimensional case, in a manner
# consistent with respect to the unidimensional case. We do so
# following the TF convention. Typically, you might expect to see
# logits = log(probs) - log(probs[pivot]). A side-effect of
# being consistent with the TF approach is that the unidimensional case
# implicitly handles the second dimension but the multidimensional case
# explicitly keeps the pivot dimension.
return math_ops.log(probs), probs
return math_ops.log(probs) - math_ops.log1p(-1. * probs), probs
def _is_known_unsigned_by_dtype(dt):
"""Helper returning True if dtype is known to be unsigned."""
return {
dtypes.bool: True,
dtypes.uint8: True,
dtypes.uint16: True,
}.get(dt.base_dtype, False)
def _is_known_signed_by_dtype(dt):
"""Helper returning True if dtype is known to be signed."""
return {
dtypes.float16: True,
dtypes.float32: True,
dtypes.float64: True,
dtypes.int8: True,
dtypes.int16: True,
dtypes.int32: True,
dtypes.int64: True,
}.get(dt.base_dtype, False)
def _is_known_dtype(dt):
"""Helper returning True if dtype is known."""
return _is_known_unsigned_by_dtype(dt) or _is_known_signed_by_dtype(dt)
def _largest_integer_by_dtype(dt):
"""Helper returning the largest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if dt.is_floating:
return int(2**(np.finfo(dt.as_numpy_dtype).nmant + 1))
if dt.is_integer:
return np.iinfo(dt.as_numpy_dtype).max
if dt.base_dtype == dtypes.bool:
return int(1)
# We actually can't land here but keep the case for completeness.
raise TypeError("Unrecognized dtype: {}".format(dt.name))
def _smallest_integer_by_dtype(dt):
"""Helper returning the smallest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if _is_known_unsigned_by_dtype(dt):
return 0
return -1 * _largest_integer_by_dtype(dt)
def _is_integer_like_by_dtype(dt):
"""Helper returning True if dtype.is_integer or is `bool`."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
return dt.is_integer or dt.base_dtype == dtypes.bool
def embed_check_categorical_event_shape(
categorical_param, name="embed_check_categorical_event_shape"):
"""Embeds checks that categorical distributions don't have too many classes.
A categorical-type distribution is one which, e.g., returns the class label
rather than a one-hot encoding. E.g., `Categorical(probs)`.
Since distributions output samples in the same dtype as the parameters, we
must ensure that casting doesn't lose precision. That is, the
`parameter.dtype` implies a maximum number of classes. However, since shape is
`int32` and categorical variables are presumed to be indexes into a `Tensor`,
we must also ensure that the number of classes is no larger than the largest
possible `int32` index, i.e., `2**31-1`.
In other words the number of classes, `K`, must satisfy the following
condition:
```python
K <= min(
int(2**31 - 1), # Largest float as an index.
{
dtypes.float16: int(2**11), # Largest int as a float16.
dtypes.float32: int(2**24),
dtypes.float64: int(2**53),
}.get(categorical_param.dtype.base_dtype, 0))
```
Args:
categorical_param: Floating-point `Tensor` representing parameters of
distribution over categories. The rightmost shape is presumed to be the
number of categories.
name: A name for this operation (optional).
Returns:
categorical_param: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `categorical_param` has an unknown `dtype`.
ValueError: if we can statically identify `categorical_param` as being too
large (for being closed under int32/float casting).
"""
with ops.name_scope(name, values=[categorical_param]):
x = ops.convert_to_tensor(categorical_param, name="categorical_param")
# The size must not exceed both of:
# - The largest possible int32 (since categorical values are presumed to be
# indexes into a Tensor).
# - The largest possible integer exactly representable under the given
# floating-point dtype (since we need to cast to/from).
#
# The chosen floating-point thresholds are 2**(1 + mantissa_bits).
# For more details, see:
# https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation
x_dtype = x.dtype.base_dtype
max_event_size = (
_largest_integer_by_dtype(x_dtype) if x_dtype.is_floating else 0)
if max_event_size == 0:
raise TypeError("Unable to validate size of unrecognized dtype "
"({}).".format(x_dtype.name))
try:
x_shape_static = x.get_shape().with_rank_at_least(1)
except ValueError:
raise ValueError("A categorical-distribution parameter must have "
"at least 1 dimension.")
if tensor_shape.dimension_value(x_shape_static[-1]) is not None:
event_size = x_shape_static.dims[-1].value
if event_size < 2:
raise ValueError("A categorical-distribution parameter must have at "
"least 2 events.")
if event_size > max_event_size:
raise ValueError("Number of classes exceeds `dtype` precision, i.e., "
"{} implies shape ({}) cannot exceed {}.".format(
x_dtype.name, event_size, max_event_size))
return x
else:
event_size = array_ops.shape(x, name="x_shape")[-1]
return control_flow_ops.with_dependencies([
check_ops.assert_rank_at_least(
x,
1,
message=("A categorical-distribution parameter must have "
"at least 1 dimension.")),
check_ops.assert_greater_equal(
array_ops.shape(x)[-1],
2,
message=("A categorical-distribution parameter must have at "
"least 2 events.")),
check_ops.assert_less_equal(
event_size,
max_event_size,
message="Number of classes exceeds `dtype` precision, "
"i.e., {} dtype cannot exceed {} shape.".format(
x_dtype.name, max_event_size)),
], x)
def embed_check_integer_casting_closed(x,
target_dtype,
assert_nonnegative=True,
name="embed_check_casting_closed"):
"""Ensures integers remain unaffected despite casting to/from int/float types.
Example integer-types: `uint8`, `int32`, `bool`.
Example floating-types: `float32`, `float64`.
The largest possible integer representable by an IEEE754 floating-point is
`2**(1 + mantissa_bits)` yet the largest possible integer as an int-type is
`2**(bits - 1) - 1`. This function ensures that a `Tensor` purporting to have
integer-form values can be cast to some other type without loss of precision.
The smallest representable integer is the negative of the largest
representable integer, except for types: `uint8`, `uint16`, `bool`. For these
types, the smallest representable integer is `0`.
Args:
x: `Tensor` representing integer-form values.
target_dtype: TF `dtype` under which `x` should have identical values.
assert_nonnegative: `bool` indicating `x` should contain nonnegative values.
name: A name for this operation (optional).
Returns:
x: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `x` is neither integer- nor floating-type.
TypeError: if `target_dtype` is neither integer- nor floating-type.
TypeError: if neither `x` nor `target_dtype` are integer-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if (not _is_integer_like_by_dtype(x.dtype) and not x.dtype.is_floating):
raise TypeError("{}.dtype must be floating- or "
"integer-type.".format(x.dtype.name))
if (not _is_integer_like_by_dtype(target_dtype) and
not target_dtype.is_floating):
raise TypeError("target_dtype ({}) must be floating- or "
"integer-type.".format(target_dtype.name))
if (not _is_integer_like_by_dtype(x.dtype) and
not _is_integer_like_by_dtype(target_dtype)):
raise TypeError("At least one of {}.dtype ({}) and target_dtype ({}) "
"must be integer-type.".format(x, x.dtype.name,
target_dtype.name))
assertions = []
if assert_nonnegative:
assertions += [
check_ops.assert_non_negative(
x, message="Elements must be non-negative."),
]
if x.dtype.is_floating:
# Being here means _is_integer_like_by_dtype(target_dtype) = True.
# Since this check implies the magnitude check below, we need only it.
assertions += [
assert_integer_form(
x,
int_dtype=target_dtype,
message="Elements must be {}-equivalent.".format(
target_dtype.name)),
]
else:
if (_largest_integer_by_dtype(x.dtype) >
_largest_integer_by_dtype(target_dtype)):
# Cast may lose integer precision.
assertions += [
check_ops.assert_less_equal(
x,
_largest_integer_by_dtype(target_dtype),
message=("Elements cannot exceed {}.".format(
_largest_integer_by_dtype(target_dtype)))),
]
if (not assert_nonnegative and (_smallest_integer_by_dtype(
x.dtype) < _smallest_integer_by_dtype(target_dtype))):
assertions += [
check_ops.assert_greater_equal(
x,
_smallest_integer_by_dtype(target_dtype),
message=("Elements cannot be smaller than {}.".format(
_smallest_integer_by_dtype(target_dtype)))),
]
if not assertions:
return x
return control_flow_ops.with_dependencies(assertions, x)
def log_combinations(n, counts, name="log_combinations"):
"""Multinomial coefficient.
Given `n` and `counts`, where `counts` has last dimension `k`, we compute
the multinomial coefficient as:
```n! / sum_i n_i!```
where `i` runs over all `k` classes.
Args:
n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
counts: Floating-point `Tensor` broadcastable with `n`. This represents
counts in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
`Tensor` representing the multinomial coefficient between `n` and `counts`.
"""
# First a bit about the number of ways counts could have come in:
# E.g. if counts = [1, 2], then this is 3 choose 2.
# In general, this is (sum counts)! / sum(counts!)
# The sum should be along the last dimension of counts. This is the
# "distribution" dimension. Here n a priori represents the sum of counts.
with ops.name_scope(name, values=[n, counts]):
n = ops.convert_to_tensor(n, name="n")
counts = ops.convert_to_tensor(counts, name="counts")
total_permutations = math_ops.lgamma(n + 1)
counts_factorial = math_ops.lgamma(counts + 1)
redundant_permutations = math_ops.reduce_sum(counts_factorial, axis=[-1])
return total_permutations - redundant_permutations
def matrix_diag_transform(matrix, transform=None, name=None):
"""Transform diagonal of [batch-]matrix, leave rest of matrix unchanged.
Create a trainable covariance defined by a Cholesky factor:
```python
# Transform network layer into 2 x 2 array.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
# Make the diagonal positive. If the upper triangle was zero, this would be a
# valid Cholesky factor.
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# LinearOperatorLowerTriangular ignores the upper triangle.
operator = LinearOperatorLowerTriangular(chol)
```
Example of heteroskedastic 2-D linear regression.
```python
tfd = tfp.distributions
# Get a trainable Cholesky factor.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# Get a trainable mean.
mu = tf.contrib.layers.fully_connected(activations, 2)
# This is a fully trainable multivariate normal!
dist = tfd.MultivariateNormalTriL(mu, chol)
# Standard log loss. Minimizing this will "train" mu and chol, and then dist
# will be a distribution predicting labels as multivariate Gaussians.
loss = -1 * tf.reduce_mean(dist.log_prob(labels))
```
Args:
matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are
equal.
transform: Element-wise function mapping `Tensors` to `Tensors`. To be
applied to the diagonal of `matrix`. If `None`, `matrix` is returned
unchanged. Defaults to `None`.
name: A name to give created ops. Defaults to "matrix_diag_transform".
Returns:
A `Tensor` with same shape and `dtype` as `matrix`.
"""
with ops.name_scope(name, "matrix_diag_transform", [matrix]):
matrix = ops.convert_to_tensor(matrix, name="matrix")
if transform is None:
return matrix
# Replace the diag with transformed diag.
diag = array_ops.matrix_diag_part(matrix)
transformed_diag = transform(diag)
transformed_mat = array_ops.matrix_set_diag(matrix, transformed_diag)
return transformed_mat
def rotate_transpose(x, shift, name="rotate_transpose"):
"""Circularly moves dims left or right.
Effectively identical to:
```python
numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift))
```
When `validate_args=False` additional graph-runtime checks are
performed. These checks entail moving data from to GPU to CPU.
Example:
```python
x = tf.random.normal([1, 2, 3, 4]) # Tensor of shape [1, 2, 3, 4].
rotate_transpose(x, -1).shape == [2, 3, 4, 1]
rotate_transpose(x, -2).shape == [3, 4, 1, 2]
rotate_transpose(x, 1).shape == [4, 1, 2, 3]
rotate_transpose(x, 2).shape == [3, 4, 1, 2]
rotate_transpose(x, 7).shape == rotate_transpose(x, 3).shape # [2, 3, 4, 1]
rotate_transpose(x, -7).shape == rotate_transpose(x, -3).shape # [4, 1, 2, 3]
```
Args:
x: `Tensor`.
shift: `Tensor`. Number of dimensions to transpose left (shift<0) or
transpose right (shift>0).
name: Python `str`. The name to give this op.
Returns:
rotated_x: Input `Tensor` with dimensions circularly rotated by shift.
Raises:
TypeError: if shift is not integer type.
"""
with ops.name_scope(name, values=[x, shift]):
x = ops.convert_to_tensor(x, name="x")
shift = ops.convert_to_tensor(shift, name="shift")
# We do not assign back to preserve constant-ness.
check_ops.assert_integer(shift)
shift_value_static = tensor_util.constant_value(shift)
ndims = x.get_shape().ndims
if ndims is not None and shift_value_static is not None:
if ndims < 2:
return x
shift_value_static = np.sign(shift_value_static) * (
abs(shift_value_static) % ndims)
if shift_value_static == 0:
return x
perm = np.roll(np.arange(ndims), shift_value_static)
return array_ops.transpose(x, perm=perm)
else:
# Consider if we always had a positive shift, and some specified
# direction.
# When shifting left we want the new array:
# last(x, n-shift) + first(x, shift)
# and if shifting right then we want:
# last(x, shift) + first(x, n-shift)
# Observe that last(a) == slice(a, n) and first(a) == slice(0, a).
# Also, we can encode direction and shift as one: direction * shift.
# Combining these facts, we have:
# a = cond(shift<0, -shift, n-shift)
# last(x, n-a) + first(x, a) == x[a:n] + x[0:a]
# Finally, we transform shift by modulo length so it can be specified
# independently from the array upon which it operates (like python).
ndims = array_ops.rank(x)
shift = array_ops.where(
math_ops.less(shift, 0), math_ops.mod(-shift, ndims),
ndims - math_ops.mod(shift, ndims))
first = math_ops.range(0, shift)
last = math_ops.range(shift, ndims)
perm = array_ops.concat([last, first], 0)
return array_ops.transpose(x, perm=perm)
def pick_vector(cond, true_vector, false_vector, name="pick_vector"):
"""Picks possibly different length row `Tensor`s based on condition.
Value `Tensor`s should have exactly one dimension.
If `cond` is a python Boolean or `tf.constant` then either `true_vector` or
`false_vector` is immediately returned. I.e., no graph nodes are created and
no validation happens.
Args:
cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.
true_vector: `Tensor` of one dimension. Returned when cond is `True`.
false_vector: `Tensor` of one dimension. Returned when cond is `False`.
name: Python `str`. The name to give this op.
Example: ```python pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15,
18)) # [10, 11] pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15,
18)) # [15, 16, 17] ```
Returns:
true_or_false_vector: `Tensor`.
Raises:
TypeError: if `cond.dtype != tf.bool`
TypeError: if `cond` is not a constant and
`true_vector.dtype != false_vector.dtype`
"""
with ops.name_scope(name, values=(cond, true_vector, false_vector)):
cond = ops.convert_to_tensor(cond, name="cond")
if cond.dtype != dtypes.bool:
raise TypeError("%s.dtype=%s which is not %s" %
(cond, cond.dtype, dtypes.bool))
cond_value_static = tensor_util.constant_value(cond)
if cond_value_static is not None:
return true_vector if cond_value_static else false_vector
true_vector = ops.convert_to_tensor(true_vector, name="true_vector")
false_vector = ops.convert_to_tensor(false_vector, name="false_vector")
if true_vector.dtype != false_vector.dtype:
raise TypeError(
"%s.dtype=%s does not match %s.dtype=%s" %
(true_vector, true_vector.dtype, false_vector, false_vector.dtype))
n = array_ops.shape(true_vector)[0]
return array_ops.slice(
array_ops.concat([true_vector, false_vector], 0),
[array_ops.where(cond, 0, n)], [array_ops.where(cond, n, -1)])
def prefer_static_broadcast_shape(shape1,
shape2,
name="prefer_static_broadcast_shape"):
"""Convenience function which statically broadcasts shape when possible.
Args:
shape1: `1-D` integer `Tensor`. Already converted to tensor!
shape2: `1-D` integer `Tensor`. Already converted to tensor!
name: A string name to prepend to created ops.
Returns:
The broadcast shape, either as `TensorShape` (if broadcast can be done
statically), or as a `Tensor`.
"""
with ops.name_scope(name, values=[shape1, shape2]):
def make_shape_tensor(x):
return ops.convert_to_tensor(x, name="shape", dtype=dtypes.int32)
def get_tensor_shape(s):
if isinstance(s, tensor_shape.TensorShape):
return s
s_ = tensor_util.constant_value(make_shape_tensor(s))
if s_ is not None:
return tensor_shape.TensorShape(s_)
return None
def get_shape_tensor(s):
if not isinstance(s, tensor_shape.TensorShape):
return make_shape_tensor(s)
if s.is_fully_defined():
return make_shape_tensor(s.as_list())
raise ValueError("Cannot broadcast from partially "
"defined `TensorShape`.")
shape1_ = get_tensor_shape(shape1)
shape2_ = get_tensor_shape(shape2)
if shape1_ is not None and shape2_ is not None:
return array_ops.broadcast_static_shape(shape1_, shape2_)
shape1_ = get_shape_tensor(shape1)
shape2_ = get_shape_tensor(shape2)
return array_ops.broadcast_dynamic_shape(shape1_, shape2_)
def prefer_static_rank(x):
"""Return static rank of tensor `x` if available, else `tf.rank(x)`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static rank is obtainable), else `Tensor`.
"""
return prefer_static_value(array_ops.rank(x))
def prefer_static_shape(x):
"""Return static shape of tensor `x` if available, else `tf.shape(x)`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static shape is obtainable), else `Tensor`.
"""
return prefer_static_value(array_ops.shape(x))
def prefer_static_value(x):
"""Return static value of tensor `x` if available, else `x`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static value is obtainable), else `Tensor`.
"""
static_x = tensor_util.constant_value(x)
if static_x is not None:
return static_x
return x
def gen_new_seed(seed, salt):
"""Generate a new seed, from the given seed and salt."""
if seed is None:
return None
string = (str(seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
def fill_triangular(x, upper=False, name=None):
"""Creates a (batch of) triangular matrix from a vector of inputs.
Created matrix can be lower- or upper-triangular. (It is more efficient to
create the matrix as upper or lower, rather than transpose.)
Triangular matrix elements are filled in a clockwise spiral. See example,
below.
If `x.get_shape()` is `[b1, b2, ..., bB, d]` then the output shape is
`[b1, b2, ..., bB, n, n]` where `n` is such that `d = n(n+1)/2`, i.e.,
`n = int(np.sqrt(0.25 + 2. * m) - 0.5)`.
Example:
```python
fill_triangular([1, 2, 3, 4, 5, 6])
# ==> [[4, 0, 0],
# [6, 5, 0],
# [3, 2, 1]]
fill_triangular([1, 2, 3, 4, 5, 6], upper=True)
# ==> [[1, 2, 3],
# [0, 5, 6],
# [0, 0, 4]]
```
For comparison, a pure numpy version of this function can be found in
`util_test.py`, function `_fill_triangular`.
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
tril: `Tensor` with lower (or upper) triangular elements filled from `x`.
Raises:
ValueError: if `x` cannot be mapped to a triangular matrix.
"""
with ops.name_scope(name, "fill_triangular", values=[x]):
x = ops.convert_to_tensor(x, name="x")
if tensor_shape.dimension_value(
x.shape.with_rank_at_least(1)[-1]) is not None:
# Formula derived by solving for n: m = n(n+1)/2.
m = np.int32(x.shape.dims[-1].value)
n = np.sqrt(0.25 + 2. * m) - 0.5
if n != np.floor(n):
raise ValueError("Input right-most shape ({}) does not "
"correspond to a triangular matrix.".format(m))
n = np.int32(n)
static_final_shape = x.shape[:-1].concatenate([n, n])
else:
m = array_ops.shape(x)[-1]
# For derivation, see above. Casting automatically lops off the 0.5, so we
# omit it. We don't validate n is an integer because this has
# graph-execution cost; an error will be thrown from the reshape, below.
n = math_ops.cast(
math_ops.sqrt(0.25 + math_ops.cast(2 * m, dtype=dtypes.float32)),
dtype=dtypes.int32)
static_final_shape = x.shape.with_rank_at_least(1)[:-1].concatenate(
[None, None])
# We now concatenate the "tail" of `x` to `x` (and reverse one of them).
#
# We do this based on the insight that the input `x` provides `ceil(n/2)`
# rows of an `n x n` matrix, some of which will get zeroed out being on the
# wrong side of the diagonal. The first row will not get zeroed out at all,
# and we need `floor(n/2)` more rows, so the first is what we omit from
# `x_tail`. If we then stack those `ceil(n/2)` rows with the `floor(n/2)`
# rows provided by a reversed tail, it is exactly the other set of elements
# of the reversed tail which will be zeroed out for being on the wrong side
# of the diagonal further up/down the matrix. And, in doing-so, we've filled
# the triangular matrix in a clock-wise spiral pattern. Neat!
#
# Try it out in numpy:
# n = 3
# x = np.arange(n * (n + 1) / 2)
# m = x.shape[0]
# n = np.int32(np.sqrt(.25 + 2 * m) - .5)
# x_tail = x[(m - (n**2 - m)):]
# np.concatenate([x_tail, x[::-1]], 0).reshape(n, n) # lower
# # ==> array([[3, 4, 5],
# [5, 4, 3],
# [2, 1, 0]])
# np.concatenate([x, x_tail[::-1]], 0).reshape(n, n) # upper
# # ==> array([[0, 1, 2],
# [3, 4, 5],
# [5, 4, 3]])
#
# Note that we can't simply do `x[..., -(n**2 - m):]` because this doesn't
# correctly handle `m == n == 1`. Hence, we do nonnegative indexing.
# Furthermore observe that:
# m - (n**2 - m)
# = n**2 / 2 + n / 2 - (n**2 - n**2 / 2 + n / 2)
# = 2 (n**2 / 2 + n / 2) - n**2
# = n**2 + n - n**2
# = n
ndims = prefer_static_rank(x)
if upper:
x_list = [x, array_ops.reverse(x[..., n:], axis=[ndims - 1])]
else:
x_list = [x[..., n:], array_ops.reverse(x, axis=[ndims - 1])]
new_shape = (
static_final_shape.as_list() if static_final_shape.is_fully_defined()
else array_ops.concat([array_ops.shape(x)[:-1], [n, n]], axis=0))
x = array_ops.reshape(array_ops.concat(x_list, axis=-1), new_shape)
x = array_ops.matrix_band_part(
x, num_lower=(0 if upper else -1), num_upper=(-1 if upper else 0))
x.set_shape(static_final_shape)
return x
def fill_triangular_inverse(x, upper=False, name=None):
"""Creates a vector from a (batch of) triangular matrix.
The vector is created from the lower-triangular or upper-triangular portion
depending on the value of the parameter `upper`.
If `x.shape` is `[b1, b2, ..., bB, n, n]` then the output shape is
`[b1, b2, ..., bB, d]` where `d = n (n + 1) / 2`.
Example:
```python
fill_triangular_inverse(
[[4, 0, 0],
[6, 5, 0],
[3, 2, 1]])
# ==> [1, 2, 3, 4, 5, 6]
fill_triangular_inverse(
[[1, 2, 3],
[0, 5, 6],
[0, 0, 4]], upper=True)
# ==> [1, 2, 3, 4, 5, 6]
```
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
flat_tril: (Batch of) vector-shaped `Tensor` representing vectorized lower
(or upper) triangular elements from `x`.
"""
with ops.name_scope(name, "fill_triangular_inverse", values=[x]):
x = ops.convert_to_tensor(x, name="x")
if tensor_shape.dimension_value(
x.shape.with_rank_at_least(2)[-1]) is not None:
n = np.int32(x.shape.dims[-1].value)
m = np.int32((n * (n + 1)) // 2)
static_final_shape = x.shape[:-2].concatenate([m])
else:
n = array_ops.shape(x)[-1]
m = (n * (n + 1)) // 2
static_final_shape = x.shape.with_rank_at_least(2)[:-2].concatenate(
[None])
ndims = prefer_static_rank(x)
if upper:
initial_elements = x[..., 0, :]
triangular_portion = x[..., 1:, :]
else:
initial_elements = array_ops.reverse(x[..., -1, :], axis=[ndims - 2])
triangular_portion = x[..., :-1, :]
rotated_triangular_portion = array_ops.reverse(
array_ops.reverse(triangular_portion, axis=[ndims - 1]),
axis=[ndims - 2])
consolidated_matrix = triangular_portion + rotated_triangular_portion
end_sequence = array_ops.reshape(
consolidated_matrix,
array_ops.concat([array_ops.shape(x)[:-2], [n * (n - 1)]], axis=0))
y = array_ops.concat([initial_elements, end_sequence[..., :m - n]], axis=-1)
y.set_shape(static_final_shape)
return y
def tridiag(below=None, diag=None, above=None, name=None):
"""Creates a matrix with values set above, below, and on the diagonal.
Example:
```python
tridiag(below=[1., 2., 3.],
diag=[4., 5., 6., 7.],
above=[8., 9., 10.])
# ==> array([[ 4., 8., 0., 0.],
# [ 1., 5., 9., 0.],
# [ 0., 2., 6., 10.],
# [ 0., 0., 3., 7.]], dtype=float32)
```
Warning: This Op is intended for convenience, not efficiency.
Args:
below: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the below
diagonal part. `None` is logically equivalent to `below = 0`.
diag: `Tensor` of shape `[B1, ..., Bb, d]` corresponding to the diagonal
part. `None` is logically equivalent to `diag = 0`.
above: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the above
diagonal part. `None` is logically equivalent to `above = 0`.
name: Python `str`. The name to give this op.
Returns:
tridiag: `Tensor` with values set above, below and on the diagonal.
Raises:
ValueError: if all inputs are `None`.
"""
def _pad(x):
"""Prepends and appends a zero to every vector in a batch of vectors."""
shape = array_ops.concat([array_ops.shape(x)[:-1], [1]], axis=0)
z = array_ops.zeros(shape, dtype=x.dtype)
return array_ops.concat([z, x, z], axis=-1)
def _add(*x):
"""Adds list of Tensors, ignoring `None`."""
s = None
for y in x:
if y is None:
continue
elif s is None:
s = y
else:
s += y
if s is None:
raise ValueError("Must specify at least one of `below`, `diag`, `above`.")
return s
with ops.name_scope(name, "tridiag", [below, diag, above]):
if below is not None:
below = ops.convert_to_tensor(below, name="below")
below = array_ops.matrix_diag(_pad(below))[..., :-1, 1:]
if diag is not None:
diag = ops.convert_to_tensor(diag, name="diag")
diag = array_ops.matrix_diag(diag)
if above is not None:
above = ops.convert_to_tensor(above, name="above")
above = array_ops.matrix_diag(_pad(above))[..., 1:, :-1]
# TODO(jvdillon): Consider using scatter_nd instead of creating three full
# matrices.
return _add(below, diag, above)
def reduce_weighted_logsumexp(logx,
w=None,
axis=None,
keep_dims=False,
return_sign=False,
name=None):
"""Computes `log(abs(sum(weight * exp(elements across tensor dimensions))))`.
If all weights `w` are known to be positive, it is more efficient to directly
use `reduce_logsumexp`, i.e., `tf.reduce_logsumexp(logx + tf.math.log(w))` is
more
efficient than `du.reduce_weighted_logsumexp(logx, w)`.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(w * exp(input))). It
avoids overflows caused by taking the exp of large inputs and underflows
caused by taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0, 0],
[0, 0, 0]])
w = tf.constant([[-1., 1, 1],
[1, 1, 1]])
du.reduce_weighted_logsumexp(x, w)
# ==> log(-1*1 + 1*1 + 1*1 + 1*1 + 1*1 + 1*1) = log(4)
du.reduce_weighted_logsumexp(x, w, axis=0)
# ==> [log(-1+1), log(1+1), log(1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1)
# ==> [log(-1+1+1), log(1+1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True)
# ==> [[log(-1+1+1)], [log(1+1+1)]]
du.reduce_weighted_logsumexp(x, w, axis=[0, 1])
# ==> log(-1+5)
```
Args:
logx: The tensor to reduce. Should have numeric type.
w: The weight tensor. Should have numeric type identical to `logx`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
return_sign: If `True`, returns the sign of the result.
name: A name for the operation (optional).
Returns:
lswe: The `log(abs(sum(weight * exp(x))))` reduced tensor.
sign: (Optional) The sign of `sum(weight * exp(x))`.
"""
with ops.name_scope(name, "reduce_weighted_logsumexp", [logx, w]):
logx = ops.convert_to_tensor(logx, name="logx")
if w is None:
lswe = math_ops.reduce_logsumexp(logx, axis=axis, keepdims=keep_dims)
if return_sign:
sgn = array_ops.ones_like(lswe)
return lswe, sgn
return lswe
w = ops.convert_to_tensor(w, dtype=logx.dtype, name="w")
log_absw_x = logx + math_ops.log(math_ops.abs(w))
max_log_absw_x = math_ops.reduce_max(log_absw_x, axis=axis, keepdims=True)
# If the largest element is `-inf` or `inf` then we don't bother subtracting
# off the max. We do this because otherwise we'd get `inf - inf = NaN`. That
# this is ok follows from the fact that we're actually free to subtract any
# value we like, so long as we add it back after taking the `log(sum(...))`.
max_log_absw_x = array_ops.where(
math_ops.is_inf(max_log_absw_x), array_ops.zeros_like(max_log_absw_x),
max_log_absw_x)
wx_over_max_absw_x = (
math_ops.sign(w) * math_ops.exp(log_absw_x - max_log_absw_x))
sum_wx_over_max_absw_x = math_ops.reduce_sum(
wx_over_max_absw_x, axis=axis, keepdims=keep_dims)
if not keep_dims:
max_log_absw_x = array_ops.squeeze(max_log_absw_x, axis)
sgn = math_ops.sign(sum_wx_over_max_absw_x)
lswe = max_log_absw_x + math_ops.log(sgn * sum_wx_over_max_absw_x)
if return_sign:
return lswe, sgn
return lswe
# TODO(jvdillon): Merge this test back into:
# tensorflow/python/ops/softplus_op_test.py
# once TF core is accepting new ops.
def softplus_inverse(x, name=None):
"""Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).
Mathematically this op is equivalent to:
```none
softplus_inverse = log(exp(x) - 1.)
```
Args:
x: `Tensor`. Non-negative (not enforced), floating-point.
name: A name for the operation (optional).
Returns:
`Tensor`. Has the same type/shape as input `x`.
"""
with ops.name_scope(name, "softplus_inverse", values=[x]):
x = ops.convert_to_tensor(x, name="x")
# We begin by deriving a more numerically stable softplus_inverse:
# x = softplus(y) = Log[1 + exp{y}], (which means x > 0).
# ==> exp{x} = 1 + exp{y} (1)
# ==> y = Log[exp{x} - 1] (2)
# = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}]
# = Log[(1 - exp{-x}) / 1] + Log[exp{x}]
# = Log[1 - exp{-x}] + x (3)
# (2) is the "obvious" inverse, but (3) is more stable than (2) for large x.
# For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will
# be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0.
#
# In addition to the numerically stable derivation above, we clamp
# small/large values to be congruent with the logic in:
# tensorflow/core/kernels/softplus_op.h
#
# Finally, we set the input to one whenever the input is too large or too
# small. This ensures that no unchosen codepath is +/- inf. This is
# necessary to ensure the gradient doesn't get NaNs. Recall that the
# gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false`
# thus an `inf` in an unselected path results in `0*inf=nan`. We are careful
# to overwrite `x` with ones only when we will never actually use this
# value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`.
threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2.
is_too_small = math_ops.less(x, np.exp(threshold))
is_too_large = math_ops.greater(x, -threshold)
too_small_value = math_ops.log(x)
too_large_value = x
# This `where` will ultimately be a NOP because we won't select this
# codepath whenever we used the surrogate `ones_like`.
x = array_ops.where(
math_ops.logical_or(is_too_small, is_too_large), array_ops.ones_like(x),
x)
y = x + math_ops.log(-math_ops.expm1(-x)) # == log(expm1(x))
return array_ops.where(is_too_small, too_small_value,
array_ops.where(is_too_large, too_large_value, y))
# TODO(b/35290280): Add unit-tests.
def dimension_size(x, axis):
"""Returns the size of a specific dimension."""
# Since tf.gather isn't "constant-in, constant-out", we must first check the
# static shape or fallback to dynamic shape.
s = tensor_shape.dimension_value(
x.shape.with_rank_at_least(np.abs(axis))[axis])
if s is not None:
return s
return array_ops.shape(x)[axis]
def process_quadrature_grid_and_probs(quadrature_grid_and_probs,
dtype,
validate_args,
name=None):
"""Validates quadrature grid, probs or computes them as necessary.
Args:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight. When `None`, defaults to:
`np.polynomial.hermite.hermgauss(deg=8)`.
dtype: The expected `dtype` of `grid` and `probs`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight.
Raises:
ValueError: if `quadrature_grid_and_probs is not None` and
`len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])`
"""
with ops.name_scope(name, "process_quadrature_grid_and_probs",
[quadrature_grid_and_probs]):
if quadrature_grid_and_probs is None:
grid, probs = np.polynomial.hermite.hermgauss(deg=8)
grid = grid.astype(dtype.as_numpy_dtype)
probs = probs.astype(dtype.as_numpy_dtype)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
grid = ops.convert_to_tensor(grid, name="grid", dtype=dtype)
probs = ops.convert_to_tensor(probs, name="probs", dtype=dtype)
return grid, probs
grid, probs = tuple(quadrature_grid_and_probs)
grid = ops.convert_to_tensor(grid, name="grid", dtype=dtype)
probs = ops.convert_to_tensor(probs, name="unnormalized_probs", dtype=dtype)
probs /= linalg_ops.norm(probs, ord=1, axis=-1, keepdims=True, name="probs")
def _static_event_size(x):
"""Returns the static size of a specific dimension or `None`."""
return tensor_shape.dimension_value(x.shape.with_rank_at_least(1)[-1])
m, n = _static_event_size(probs), _static_event_size(grid)
if m is not None and n is not None:
if m != n:
raise ValueError("`quadrature_grid_and_probs` must be a `tuple` of "
"same-length zero-th-dimension `Tensor`s "
"(saw lengths {}, {})".format(m, n))
elif validate_args:
assertions = [
check_ops.assert_equal(
dimension_size(probs, axis=-1),
dimension_size(grid, axis=-1),
message=("`quadrature_grid_and_probs` must be a `tuple` of "
"same-length zero-th-dimension `Tensor`s")),
]
with ops.control_dependencies(assertions):
grid = array_ops.identity(grid)
probs = array_ops.identity(probs)
return grid, probs
def pad(x, axis, front=False, back=False, value=0, count=1, name=None):
"""Pads `value` to the front and/or back of a `Tensor` dim, `count` times.
Args:
x: `Tensor` input.
axis: Scalar `int`-like `Tensor` representing the single dimension to pad.
(Negative indexing is supported.)
front: Python `bool`; if `True` the beginning of the `axis` dimension is
padded with `value`, `count` times. If `False` no front padding is made.
back: Python `bool`; if `True` the end of the `axis` dimension is padded
with `value`, `count` times. If `False` no end padding is made.
value: Scalar `int`-like `Tensor` representing the actual value added to the
front and/or back of the `axis` dimension of `x`.
count: Scalar `int`-like `Tensor` representing number of elements added to
the front and/or back of the `axis` dimension of `x`. E.g., if `front =
back = True` then `2 * count` elements are added.
name: Python `str` name prefixed to Ops created by this function.
Returns:
pad: The padded version of input `x`.
Raises:
ValueError: if both `front` and `back` are `False`.
TypeError: if `count` is not `int`-like.
"""
with ops.name_scope(name, "pad", [x, value, count]):
x = ops.convert_to_tensor(x, name="x")
value = ops.convert_to_tensor(value, dtype=x.dtype, name="value")
count = ops.convert_to_tensor(count, name="count")
if not count.dtype.is_integer:
raise TypeError("`count.dtype` (`{}`) must be `int`-like.".format(
count.dtype.name))
if not front and not back:
raise ValueError("At least one of `front`, `back` must be `True`.")
ndims = (
x.shape.ndims if x.shape.ndims is not None else array_ops.rank(
x, name="ndims"))
axis = ops.convert_to_tensor(axis, name="axis")
axis_ = tensor_util.constant_value(axis)
if axis_ is not None:
axis = axis_
if axis < 0:
axis = ndims + axis
count_ = tensor_util.constant_value(count)
if axis_ >= 0 or x.shape.ndims is not None:
head = x.shape[:axis]
middle = tensor_shape.TensorShape(None if count_ is None else (
tensor_shape.dimension_at_index(x.shape, axis) + count_ *
(front + back)))
tail = x.shape[axis + 1:]
final_shape = head.concatenate(middle.concatenate(tail))
else:
final_shape = None
else:
axis = array_ops.where(axis < 0, ndims + axis, axis)
final_shape = None
x = array_ops.pad(
x,
paddings=array_ops.one_hot(
indices=array_ops.stack(
[axis if front else -1, axis if back else -1]),
depth=ndims,
axis=0,
on_value=count,
dtype=dtypes.int32),
constant_values=value)
if final_shape is not None:
x.set_shape(final_shape)
return x
def parent_frame_arguments():
"""Returns parent frame arguments.
When called inside a function, returns a dictionary with the caller's function
arguments. These are positional arguments and keyword arguments (**kwargs),
while variable arguments (*varargs) are excluded.
When called at global scope, this will return an empty dictionary, since there
are no arguments.
WARNING: If caller function argument names are overloaded before invoking
this method, then values will reflect the overloaded value. For this reason,
we recommend calling `parent_frame_arguments` at the beginning of the
function.
"""
# All arguments and the names used for *varargs, and **kwargs
arg_names, variable_arg_name, keyword_arg_name, local_vars = (
tf_inspect._inspect.getargvalues( # pylint: disable=protected-access
# Get the first frame of the caller of this method.
tf_inspect._inspect.stack()[1][0])) # pylint: disable=protected-access
# Remove the *varargs, and flatten the **kwargs. Both are
# nested lists.
local_vars.pop(variable_arg_name, {})
keyword_args = local_vars.pop(keyword_arg_name, {})
final_args = {}
# Copy over arguments and their values. In general, local_vars
# may contain more than just the arguments, since this method
# can be called anywhere in a function.
for arg_name in arg_names:
final_args[arg_name] = local_vars.pop(arg_name)
final_args.update(keyword_args)
return final_args
class AppendDocstring(object):
"""Helper class to promote private subclass docstring to public counterpart.
Example:
```python
class TransformedDistribution(Distribution):
@distribution_util.AppendDocstring(
additional_note="A special note!",
kwargs_dict={"foo": "An extra arg."})
def _prob(self, y, foo=None):
pass
```
In this case, the `AppendDocstring` decorator appends the `additional_note` to
the docstring of `prob` (not `_prob`) and adds a new `kwargs`
section with each dictionary item as a bullet-point.
For a more detailed example, see `TransformedDistribution`.
"""
def __init__(self, additional_note="", kwargs_dict=None):
"""Initializes the AppendDocstring object.
Args:
additional_note: Python string added as additional docstring to public
version of function.
kwargs_dict: Python string/string dictionary representing specific kwargs
expanded from the **kwargs input.
Raises:
ValueError: if kwargs_dict.key contains whitespace.
ValueError: if kwargs_dict.value contains newlines.
"""
self._additional_note = additional_note
if kwargs_dict:
bullets = []
for key in sorted(kwargs_dict.keys()):
value = kwargs_dict[key]
if any(x.isspace() for x in key):
raise ValueError("Parameter name \"%s\" contains whitespace." % key)
value = value.lstrip()
if "\n" in value:
raise ValueError(
"Parameter description for \"%s\" contains newlines." % key)
bullets.append("* `%s`: %s" % (key, value))
self._additional_note += ("\n\n##### `kwargs`:\n\n" + "\n".join(bullets))
def __call__(self, fn):
@functools.wraps(fn)
def _fn(*args, **kwargs):
return fn(*args, **kwargs)
if _fn.__doc__ is None:
_fn.__doc__ = self._additional_note
else:
_fn.__doc__ += "\n%s" % self._additional_note
return _fn
|
tensorflow-master
|
tensorflow/python/ops/distributions/util.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Beta distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Beta",
"BetaWithSoftplusConcentration",
]
_beta_sample_note = """Note: `x` must have dtype `self.dtype` and be in
`[0, 1].` It must have a shape compatible with `self.batch_shape()`."""
@tf_export(v1=["distributions.Beta"])
class Beta(distribution.Distribution):
"""Beta distribution.
The Beta distribution is defined over the `(0, 1)` interval using parameters
`concentration1` (aka "alpha") and `concentration0` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z
Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta)
```
where:
* `concentration1 = alpha`,
* `concentration0 = beta`,
* `Z` is the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The concentration parameters represent mean total counts of a `1` or a `0`,
i.e.,
```none
concentration1 = alpha = mean * total_concentration
concentration0 = beta = (1. - mean) * total_concentration
```
where `mean` in `(0, 1)` and `total_concentration` is a positive real number
representing a mean `total_count = concentration1 + concentration0`.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
Warning: The samples can be zero due to finite precision.
This happens more often when some of the concentrations are very small.
Make sure to round the samples to `np.finfo(dtype).tiny` before computing the
density.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Create a batch of three Beta distributions.
alpha = [1, 2, 3]
beta = [1, 2, 3]
dist = tfd.Beta(alpha, beta)
dist.sample([4, 5]) # Shape [4, 5, 3]
# `x` has three batch entries, each with two samples.
x = [[.1, .4, .5],
[.2, .3, .5]]
# Calculate the probability of each pair of samples under the corresponding
# distribution in `dist`.
dist.prob(x) # Shape [2, 3]
```
```python
# Create batch_shape=[2, 3] via parameter broadcast:
alpha = [[1.], [2]] # Shape [2, 1]
beta = [3., 4, 5] # Shape [3]
dist = tfd.Beta(alpha, beta)
# alpha broadcast as: [[1., 1, 1,],
# [2, 2, 2]]
# beta broadcast as: [[3., 4, 5],
# [3, 4, 5]]
# batch_Shape [2, 3]
dist.sample([4, 5]) # Shape [4, 5, 2, 3]
x = [.2, .3, .5]
# x will be broadcast as [[.2, .3, .5],
# [.2, .3, .5]],
# thus matching batch_shape [2, 3].
dist.prob(x) # Shape [2, 3]
```
Compute the gradients of samples w.r.t. the parameters:
```python
alpha = tf.constant(1.0)
beta = tf.constant(2.0)
dist = tfd.Beta(alpha, beta)
samples = dist.sample(5) # Shape [5]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, [alpha, beta])
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
concentration1=None,
concentration0=None,
validate_args=False,
allow_nan_stats=True,
name="Beta"):
"""Initialize a batch of Beta distributions.
Args:
concentration1: Positive floating-point `Tensor` indicating mean
number of successes; aka "alpha". Implies `self.dtype` and
`self.batch_shape`, i.e.,
`concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.
concentration0: Positive floating-point `Tensor` indicating mean
number of failures; aka "beta". Otherwise has same semantics as
`concentration1`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[concentration1, concentration0]) as name:
self._concentration1 = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration1, name="concentration1"),
validate_args)
self._concentration0 = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration0, name="concentration0"),
validate_args)
check_ops.assert_same_float_dtype([
self._concentration1, self._concentration0])
self._total_concentration = self._concentration1 + self._concentration0
super(Beta, self).__init__(
dtype=self._total_concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration1,
self._concentration0,
self._total_concentration],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(zip(
["concentration1", "concentration0"],
[ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2))
@property
def concentration1(self):
"""Concentration parameter associated with a `1` outcome."""
return self._concentration1
@property
def concentration0(self):
"""Concentration parameter associated with a `0` outcome."""
return self._concentration0
@property
def total_concentration(self):
"""Sum of concentration parameters."""
return self._total_concentration
def _batch_shape_tensor(self):
return array_ops.shape(self.total_concentration)
def _batch_shape(self):
return self.total_concentration.get_shape()
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
expanded_concentration1 = array_ops.ones_like(
self.total_concentration, dtype=self.dtype) * self.concentration1
expanded_concentration0 = array_ops.ones_like(
self.total_concentration, dtype=self.dtype) * self.concentration0
gamma1_sample = random_ops.random_gamma(
shape=[n],
alpha=expanded_concentration1,
dtype=self.dtype,
seed=seed)
gamma2_sample = random_ops.random_gamma(
shape=[n],
alpha=expanded_concentration0,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, "beta"))
beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)
return beta_sample
@distribution_util.AppendDocstring(_beta_sample_note)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
@distribution_util.AppendDocstring(_beta_sample_note)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
@distribution_util.AppendDocstring(_beta_sample_note)
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
@distribution_util.AppendDocstring(_beta_sample_note)
def _cdf(self, x):
return math_ops.betainc(self.concentration1, self.concentration0, x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return (math_ops.xlogy(self.concentration1 - 1., x) +
(self.concentration0 - 1.) * math_ops.log1p(-x))
def _log_normalization(self):
return (math_ops.lgamma(self.concentration1)
+ math_ops.lgamma(self.concentration0)
- math_ops.lgamma(self.total_concentration))
def _entropy(self):
return (
self._log_normalization()
- (self.concentration1 - 1.) * math_ops.digamma(self.concentration1)
- (self.concentration0 - 1.) * math_ops.digamma(self.concentration0)
+ ((self.total_concentration - 2.) *
math_ops.digamma(self.total_concentration)))
def _mean(self):
return self._concentration1 / self._total_concentration
def _variance(self):
return self._mean() * (1. - self._mean()) / (1. + self.total_concentration)
@distribution_util.AppendDocstring(
"""Note: The mode is undefined when `concentration1 <= 1` or
`concentration0 <= 1`. If `self.allow_nan_stats` is `True`, `NaN`
is used for undefined modes. If `self.allow_nan_stats` is `False` an
exception is raised when one or more modes are undefined.""")
def _mode(self):
mode = (self.concentration1 - 1.) / (self.total_concentration - 2.)
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
is_defined = math_ops.logical_and(self.concentration1 > 1.,
self.concentration0 > 1.)
return array_ops.where(is_defined, mode, nan)
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.concentration1,
message="Mode undefined for concentration1 <= 1."),
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.concentration0,
message="Mode undefined for concentration0 <= 1.")
], mode)
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of a concentration parameter."""
if not validate_args:
return concentration
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
], concentration)
def _maybe_assert_valid_sample(self, x):
"""Checks the validity of a sample."""
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x, message="sample must be positive"),
check_ops.assert_less(
x,
array_ops.ones([], self.dtype),
message="sample must be less than `1`."),
], x)
class BetaWithSoftplusConcentration(Beta):
"""Beta with softplus transform of `concentration1` and `concentration0`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Beta(tf.nn.softplus(concentration1), "
"tf.nn.softplus(concentration2))` instead.",
warn_once=True)
def __init__(self,
concentration1,
concentration0,
validate_args=False,
allow_nan_stats=True,
name="BetaWithSoftplusConcentration"):
parameters = dict(locals())
with ops.name_scope(name, values=[concentration1,
concentration0]) as name:
super(BetaWithSoftplusConcentration, self).__init__(
concentration1=nn.softplus(concentration1,
name="softplus_concentration1"),
concentration0=nn.softplus(concentration0,
name="softplus_concentration0"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Beta, Beta)
def _kl_beta_beta(d1, d2, name=None):
"""Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.
Args:
d1: instance of a Beta distribution object.
d2: instance of a Beta distribution object.
name: (optional) Name to use for created operations.
default is "kl_beta_beta".
Returns:
Batchwise KL(d1 || d2)
"""
def delta(fn, is_property=True):
fn1 = getattr(d1, fn)
fn2 = getattr(d2, fn)
return (fn2 - fn1) if is_property else (fn2() - fn1())
with ops.name_scope(name, "kl_beta_beta", values=[
d1.concentration1,
d1.concentration0,
d1.total_concentration,
d2.concentration1,
d2.concentration0,
d2.total_concentration,
]):
return (delta("_log_normalization", is_property=False)
- math_ops.digamma(d1.concentration1) * delta("concentration1")
- math_ops.digamma(d1.concentration0) * delta("concentration0")
+ (math_ops.digamma(d1.total_concentration)
* delta("total_concentration")))
|
tensorflow-master
|
tensorflow/python/ops/distributions/beta.py
|
tensorflow-master
|
tensorflow/python/ops/distributions/__init__.py
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Functions "ndtr" and "ndtri" are derived from calculations made in:
# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html
# In the following email exchange, the author gives his consent to redistribute
# derived works under an Apache 2.0 license.
#
# From: Stephen Moshier <steve@moshier.net>
# Date: Sat, Jun 9, 2018 at 2:36 PM
# Subject: Re: Licensing cephes under Apache (BSD-like) license.
# To: rif <rif@google.com>
#
#
#
# Hello Rif,
#
# Yes, Google may distribute Cephes files under the Apache 2 license.
#
# If clarification is needed, I do not favor BSD over other free licenses.
# I would agree that Apache 2 seems to cover the concern you mentioned
# about sublicensees.
#
# Best wishes for good luck with your projects!
# Steve Moshier
#
#
#
# On Thu, 31 May 2018, rif wrote:
#
# > Hello Steve.
# > My name is Rif. I work on machine learning software at Google.
# >
# > Your cephes software continues to be incredibly useful and widely used. I
# > was wondering whether it would be permissible for us to use the Cephes code
# > under the Apache 2.0 license, which is extremely similar in permissions to
# > the BSD license (Wikipedia comparisons). This would be quite helpful to us
# > in terms of avoiding multiple licenses on software.
# >
# > I'm sorry to bother you with this (I can imagine you're sick of hearing
# > about this by now), but I want to be absolutely clear we're on the level and
# > not misusing your important software. In former conversation with Eugene
# > Brevdo (ebrevdo@google.com), you wrote "If your licensing is similar to BSD,
# > the formal way that has been handled is simply to add a statement to the
# > effect that you are incorporating the Cephes software by permission of the
# > author." I wanted to confirm that (a) we could use the Apache license, (b)
# > that we don't need to (and probably you don't want to) keep getting
# > contacted about individual uses, because your intent is generally to allow
# > this software to be reused under "BSD-like" license, and (c) you're OK
# > letting incorporators decide whether a license is sufficiently BSD-like?
# >
# > Best,
# >
# > rif
# >
# >
# >
"""Special Math Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
__all__ = [
"erfinv",
"ndtr",
"ndtri",
"log_ndtr",
"log_cdf_laplace",
]
# log_ndtr uses different functions over the ranges
# (-infty, lower](lower, upper](upper, infty)
# Lower bound values were chosen by examining where the support of ndtr
# appears to be zero, relative to scipy's (which is always 64bit). They were
# then made more conservative just to be safe. (Conservative means use the
# expansion more than we probably need to.) See `NdtrTest` in
# special_math_test.py.
LOGNDTR_FLOAT64_LOWER = np.array(-20, np.float64)
LOGNDTR_FLOAT32_LOWER = np.array(-10, np.float32)
# Upper bound values were chosen by examining for which values of 'x'
# Log[cdf(x)] is 0, after which point we need to use the approximation
# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly
# conservative, meaning we use the approximation earlier than needed.
LOGNDTR_FLOAT64_UPPER = np.array(8, np.float64)
LOGNDTR_FLOAT32_UPPER = np.array(5, np.float32)
def ndtr(x, name="ndtr"):
"""Normal distribution function.
Returns the area under the Gaussian probability density function, integrated
from minus infinity to x:
```
1 / x
ndtr(x) = ---------- | exp(-0.5 t**2) dt
sqrt(2 pi) /-inf
= 0.5 (1 + erf(x / sqrt(2)))
= 0.5 erfc(x / sqrt(2))
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtr").
Returns:
ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"x.dtype=%s is not handled, see docstring for supported types."
% x.dtype)
return _ndtr(x)
def _ndtr(x):
"""Implements ndtr core logic."""
half_sqrt_2 = constant_op.constant(
0.5 * np.sqrt(2.), dtype=x.dtype, name="half_sqrt_2")
w = x * half_sqrt_2
z = math_ops.abs(w)
y = array_ops.where(math_ops.less(z, half_sqrt_2),
1. + math_ops.erf(w),
array_ops.where(math_ops.greater(w, 0.),
2. - math_ops.erfc(z),
math_ops.erfc(z)))
return 0.5 * y
def ndtri(p, name="ndtri"):
"""The inverse of the CDF of the Normal distribution function.
Returns x such that the area under the pdf from minus infinity to x is equal
to p.
A piece-wise rational approximation is done for the function.
This is a port of the implementation in netlib.
Args:
p: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtri").
Returns:
x: `Tensor` with `dtype=p.dtype`.
Raises:
TypeError: if `p` is not floating-type.
"""
with ops.name_scope(name, values=[p]):
p = ops.convert_to_tensor(p, name="p")
if p.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"p.dtype=%s is not handled, see docstring for supported types."
% p.dtype)
return _ndtri(p)
def _ndtri(p):
"""Implements ndtri core logic."""
# Constants used in piece-wise rational approximations. Taken from the cephes
# library:
# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html
p0 = list(reversed([-5.99633501014107895267E1,
9.80010754185999661536E1,
-5.66762857469070293439E1,
1.39312609387279679503E1,
-1.23916583867381258016E0]))
q0 = list(reversed([1.0,
1.95448858338141759834E0,
4.67627912898881538453E0,
8.63602421390890590575E1,
-2.25462687854119370527E2,
2.00260212380060660359E2,
-8.20372256168333339912E1,
1.59056225126211695515E1,
-1.18331621121330003142E0]))
p1 = list(reversed([4.05544892305962419923E0,
3.15251094599893866154E1,
5.71628192246421288162E1,
4.40805073893200834700E1,
1.46849561928858024014E1,
2.18663306850790267539E0,
-1.40256079171354495875E-1,
-3.50424626827848203418E-2,
-8.57456785154685413611E-4]))
q1 = list(reversed([1.0,
1.57799883256466749731E1,
4.53907635128879210584E1,
4.13172038254672030440E1,
1.50425385692907503408E1,
2.50464946208309415979E0,
-1.42182922854787788574E-1,
-3.80806407691578277194E-2,
-9.33259480895457427372E-4]))
p2 = list(reversed([3.23774891776946035970E0,
6.91522889068984211695E0,
3.93881025292474443415E0,
1.33303460815807542389E0,
2.01485389549179081538E-1,
1.23716634817820021358E-2,
3.01581553508235416007E-4,
2.65806974686737550832E-6,
6.23974539184983293730E-9]))
q2 = list(reversed([1.0,
6.02427039364742014255E0,
3.67983563856160859403E0,
1.37702099489081330271E0,
2.16236993594496635890E-1,
1.34204006088543189037E-2,
3.28014464682127739104E-4,
2.89247864745380683936E-6,
6.79019408009981274425E-9]))
def _create_polynomial(var, coeffs):
"""Compute n_th order polynomial via Horner's method."""
coeffs = np.array(coeffs, var.dtype.as_numpy_dtype)
if not coeffs.size:
return array_ops.zeros_like(var)
return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var
maybe_complement_p = array_ops.where(p > -np.expm1(-2.), 1. - p, p)
# Write in an arbitrary value in place of 0 for p since 0 will cause NaNs
# later on. The result from the computation when p == 0 is not used so any
# number that doesn't result in NaNs is fine.
sanitized_mcp = array_ops.where(
maybe_complement_p <= 0.,
array_ops.fill(array_ops.shape(p), np.array(0.5, p.dtype.as_numpy_dtype)),
maybe_complement_p)
# Compute x for p > exp(-2): x/sqrt(2pi) = w + w**3 P0(w**2)/Q0(w**2).
w = sanitized_mcp - 0.5
ww = w ** 2
x_for_big_p = w + w * ww * (_create_polynomial(ww, p0)
/ _create_polynomial(ww, q0))
x_for_big_p *= -np.sqrt(2. * np.pi)
# Compute x for p <= exp(-2): x = z - log(z)/z - (1/z) P(1/z) / Q(1/z),
# where z = sqrt(-2. * log(p)), and P/Q are chosen between two different
# arrays based on whether p < exp(-32).
z = math_ops.sqrt(-2. * math_ops.log(sanitized_mcp))
first_term = z - math_ops.log(z) / z
second_term_small_p = (
_create_polynomial(1. / z, p2) /
_create_polynomial(1. / z, q2) / z)
second_term_otherwise = (
_create_polynomial(1. / z, p1) /
_create_polynomial(1. / z, q1) / z)
x_for_small_p = first_term - second_term_small_p
x_otherwise = first_term - second_term_otherwise
x = array_ops.where(sanitized_mcp > np.exp(-2.),
x_for_big_p,
array_ops.where(z >= 8.0, x_for_small_p, x_otherwise))
x = array_ops.where(p > 1. - np.exp(-2.), x, -x)
infinity_scalar = constant_op.constant(np.inf, dtype=p.dtype)
infinity = array_ops.fill(array_ops.shape(p), infinity_scalar)
x_nan_replaced = array_ops.where(
p <= 0.0, -infinity, array_ops.where(p >= 1.0, infinity, x))
return x_nan_replaced
def log_ndtr(x, series_order=3, name="log_ndtr"):
"""Log Normal distribution function.
For details of the Normal distribution function see `ndtr`.
This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or
using an asymptotic series. Specifically:
- For `x > upper_segment`, use the approximation `-ndtr(-x)` based on
`log(1-x) ~= -x, x << 1`.
- For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique
and take a log.
- For `x <= lower_segment`, we use the series approximation of erf to compute
the log CDF directly.
The `lower_segment` is set based on the precision of the input:
```
lower_segment = { -20, x.dtype=float64
{ -10, x.dtype=float32
upper_segment = { 8, x.dtype=float64
{ 5, x.dtype=float32
```
When `x < lower_segment`, the `ndtr` asymptotic series approximation is:
```
ndtr(x) = scale * (1 + sum) + R_N
scale = exp(-0.5 x**2) / (-x sqrt(2 pi))
sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N}
R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3})
```
where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a
[double-factorial](https://en.wikipedia.org/wiki/Double_factorial).
Args:
x: `Tensor` of type `float32`, `float64`.
series_order: Positive Python `integer`. Maximum depth to
evaluate the asymptotic expansion. This is the `N` above.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
log_ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
TypeError: if `series_order` is a not Python `integer.`
ValueError: if `series_order` is not in `[0, 30]`.
"""
if not isinstance(series_order, int):
raise TypeError("series_order must be a Python integer.")
if series_order < 0:
raise ValueError("series_order must be non-negative.")
if series_order > 30:
raise ValueError("series_order must be <= 30.")
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype == np.float64:
lower_segment = LOGNDTR_FLOAT64_LOWER
upper_segment = LOGNDTR_FLOAT64_UPPER
elif x.dtype.as_numpy_dtype == np.float32:
lower_segment = LOGNDTR_FLOAT32_LOWER
upper_segment = LOGNDTR_FLOAT32_UPPER
else:
raise TypeError("x.dtype=%s is not supported." % x.dtype)
# The basic idea here was ported from:
# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html
# We copy the main idea, with a few changes
# * For x >> 1, and X ~ Normal(0, 1),
# Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x],
# which extends the range of validity of this function.
# * We use one fixed series_order for all of 'x', rather than adaptive.
# * Our docstring properly reflects that this is an asymptotic series, not a
# Taylor series. We also provided a correct bound on the remainder.
# * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when
# x=0. This happens even though the branch is unchosen because when x=0
# the gradient of a select involves the calculation 1*dy+0*(-inf)=nan
# regardless of whether dy is finite. Note that the minimum is a NOP if
# the branch is chosen.
return array_ops.where(
math_ops.greater(x, upper_segment),
-_ndtr(-x), # log(1-x) ~= -x, x << 1
array_ops.where(math_ops.greater(x, lower_segment),
math_ops.log(_ndtr(math_ops.maximum(x, lower_segment))),
_log_ndtr_lower(math_ops.minimum(x, lower_segment),
series_order)))
def _log_ndtr_lower(x, series_order):
"""Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`."""
x_2 = math_ops.square(x)
# Log of the term multiplying (1 + sum)
log_scale = -0.5 * x_2 - math_ops.log(-x) - 0.5 * np.log(2. * np.pi)
return log_scale + math_ops.log(_log_ndtr_asymptotic_series(x, series_order))
def _log_ndtr_asymptotic_series(x, series_order):
"""Calculates the asymptotic series used in log_ndtr."""
dtype = x.dtype.as_numpy_dtype
if series_order <= 0:
return np.array(1, dtype)
x_2 = math_ops.square(x)
even_sum = array_ops.zeros_like(x)
odd_sum = array_ops.zeros_like(x)
x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1.
for n in range(1, series_order + 1):
y = np.array(_double_factorial(2 * n - 1), dtype) / x_2n
if n % 2:
odd_sum += y
else:
even_sum += y
x_2n *= x_2
return 1. + even_sum - odd_sum
def erfinv(x, name="erfinv"):
"""The inverse function for erf, the error function.
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="erfinv").
Returns:
x: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"x.dtype=%s is not handled, see docstring for supported types."
% x.dtype)
return ndtri((x + 1.0) / 2.0) / np.sqrt(2)
def _double_factorial(n):
"""The double factorial function for small Python integer `n`."""
return np.prod(np.arange(n, 1, -2))
def log_cdf_laplace(x, name="log_cdf_laplace"):
"""Log Laplace distribution function.
This function calculates `Log[L(x)]`, where `L(x)` is the cumulative
distribution function of the Laplace distribution, i.e.
```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```
For numerical accuracy, `L(x)` is computed in different ways depending on `x`,
```
x <= 0:
Log[L(x)] = Log[0.5] + x, which is exact
0 < x:
Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
`Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.
lower_solution = -np.log(2.) + x
# safe_exp_neg_x = exp{-x} for x > 0, but is
# bounded above by 1, which avoids
# log[1 - 1] = -inf for x = log(1/2), AND
# exp{-x} --> inf, for x << -1
safe_exp_neg_x = math_ops.exp(-math_ops.abs(x))
# log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used
# internally by log1p, rather than being done explicitly here.
upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x)
return array_ops.where(x < 0., lower_solution, upper_solution)
|
tensorflow-master
|
tensorflow/python/ops/distributions/special_math.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Multinomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Multinomial",
]
_multinomial_sample_note = """For each batch of counts, `value = [n_0, ...
,n_{k-1}]`, `P[value]` is the probability that after sampling `self.total_count`
draws from this Multinomial distribution, the number of draws falling in class
`j` is `n_j`. Since this definition is [exchangeable](
https://en.wikipedia.org/wiki/Exchangeable_random_variables); different
sequences have the same counts so the probability includes a combinatorial
coefficient.
Note: `value` must be a non-negative tensor with dtype `self.dtype`, have no
fractional components, and such that
`tf.reduce_sum(value, -1) = self.total_count`. Its shape must be broadcastable
with `self.probs` and `self.total_count`."""
@tf_export(v1=["distributions.Multinomial"])
class Multinomial(distribution.Distribution):
"""Multinomial distribution.
This Multinomial distribution is parameterized by `probs`, a (batch of)
length-`K` `prob` (probability) vectors (`K > 1`) such that
`tf.reduce_sum(probs, -1) = 1`, and a `total_count` number of trials, i.e.,
the number of trials per draw from the Multinomial. It is defined over a
(batch of) length-`K` vector `counts` such that
`tf.reduce_sum(counts, -1) = total_count`. The Multinomial is identically the
Binomial distribution when `K = 2`.
#### Mathematical Details
The Multinomial is a distribution over `K`-class counts, i.e., a length-`K`
vector of non-negative integer `counts = n = [n_0, ..., n_{K-1}]`.
The probability mass function (pmf) is,
```none
pmf(n; pi, N) = prod_j (pi_j)**n_j / Z
Z = (prod_j n_j!) / N!
```
where:
* `probs = pi = [pi_0, ..., pi_{K-1}]`, `pi_j > 0`, `sum_j pi_j = 1`,
* `total_count = N`, `N` a positive integer,
* `Z` is the normalization constant, and,
* `N!` denotes `N` factorial.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
#### Pitfalls
The number of classes, `K`, must not exceed:
- the largest integer representable by `self.dtype`, i.e.,
`2**(mantissa_bits+1)` (IEE754),
- the maximum `Tensor` index, i.e., `2**31-1`.
In other words,
```python
K <= min(2**31-1, {
tf.float16: 2**11,
tf.float32: 2**24,
tf.float64: 2**53 }[param.dtype])
```
Note: This condition is validated only when `self.validate_args = True`.
#### Examples
Create a 3-class distribution, with the 3rd class is most likely to be drawn,
using logits.
```python
logits = [-50., -43, 0]
dist = Multinomial(total_count=4., logits=logits)
```
Create a 3-class distribution, with the 3rd class is most likely to be drawn.
```python
p = [.2, .3, .5]
dist = Multinomial(total_count=4., probs=p)
```
The distribution functions can be evaluated on counts.
```python
# counts same shape as p.
counts = [1., 0, 3]
dist.prob(counts) # Shape []
# p will be broadcast to [[.2, .3, .5], [.2, .3, .5]] to match counts.
counts = [[1., 2, 1], [2, 2, 0]]
dist.prob(counts) # Shape [2]
# p will be broadcast to shape [5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7]
```
Create a 2-batch of 3-class distributions.
```python
p = [[.1, .2, .7], [.3, .3, .4]] # Shape [2, 3]
dist = Multinomial(total_count=[4., 5], probs=p)
counts = [[2., 1, 1], [3, 1, 1]]
dist.prob(counts) # Shape [2]
dist.sample(5) # Shape [5, 2, 3]
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
total_count,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="Multinomial"):
"""Initialize a batch of Multinomial distributions.
Args:
total_count: Non-negative floating point tensor with shape broadcastable
to `[N1,..., Nm]` with `m >= 0`. Defines this as a batch of
`N1 x ... x Nm` different Multinomial distributions. Its components
should be equal to integer values.
logits: Floating point tensor representing unnormalized log-probabilities
of a positive event with shape broadcastable to
`[N1,..., Nm, K]` `m >= 0`, and the same dtype as `total_count`. Defines
this as a batch of `N1 x ... x Nm` different `K` class Multinomial
distributions. Only one of `logits` or `probs` should be passed in.
probs: Positive floating point tensor with shape broadcastable to
`[N1,..., Nm, K]` `m >= 0` and same dtype as `total_count`. Defines
this as a batch of `N1 x ... x Nm` different `K` class Multinomial
distributions. `probs`'s components in the last portion of its shape
should sum to `1`. Only one of `logits` or `probs` should be passed in.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[total_count, logits, probs]) as name:
self._total_count = ops.convert_to_tensor(total_count, name="total_count")
if validate_args:
self._total_count = (
distribution_util.embed_check_nonnegative_integer_form(
self._total_count))
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits,
probs=probs,
multidimensional=True,
validate_args=validate_args,
name=name)
self._mean_val = self._total_count[..., array_ops.newaxis] * self._probs
super(Multinomial, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._total_count,
self._logits,
self._probs],
name=name)
@property
def total_count(self):
"""Number of trials used to construct a sample."""
return self._total_count
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def probs(self):
"""Probability of drawing a `1` in that coordinate."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self._mean_val)[:-1]
def _batch_shape(self):
return self._mean_val.get_shape().with_rank_at_least(1)[:-1]
def _event_shape_tensor(self):
return array_ops.shape(self._mean_val)[-1:]
def _event_shape(self):
return self._mean_val.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
k = self.event_shape_tensor()[0]
# broadcast the total_count and logits to same shape
n_draws = array_ops.ones_like(
self.logits[..., 0], dtype=n_draws.dtype) * n_draws
logits = array_ops.ones_like(
n_draws[..., array_ops.newaxis], dtype=self.logits.dtype) * self.logits
# flatten the total_count and logits
flat_logits = array_ops.reshape(logits, [-1, k]) # [B1B2...Bm, k]
flat_ndraws = n * array_ops.reshape(n_draws, [-1]) # [B1B2...Bm]
# computes each total_count and logits situation by map_fn
def _sample_single(args):
logits, n_draw = args[0], args[1] # [K], []
x = random_ops.multinomial(logits[array_ops.newaxis, ...], n_draw,
seed) # [1, n*n_draw]
x = array_ops.reshape(x, shape=[n, -1]) # [n, n_draw]
x = math_ops.reduce_sum(array_ops.one_hot(x, depth=k), axis=-2) # [n, k]
return x
x = map_fn.map_fn(
_sample_single, [flat_logits, flat_ndraws],
dtype=self.dtype) # [B1B2...Bm, n, k]
# reshape the results to proper shape
x = array_ops.transpose(x, perm=[1, 0, 2])
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
x = array_ops.reshape(x, final_shape) # [n, B1, B2,..., Bm, k]
return x
@distribution_util.AppendDocstring(_multinomial_sample_note)
def _log_prob(self, counts):
return self._log_unnormalized_prob(counts) - self._log_normalization(counts)
def _log_unnormalized_prob(self, counts):
counts = self._maybe_assert_valid_sample(counts)
return math_ops.reduce_sum(counts * nn_ops.log_softmax(self.logits), -1)
def _log_normalization(self, counts):
counts = self._maybe_assert_valid_sample(counts)
return -distribution_util.log_combinations(self.total_count, counts)
def _mean(self):
return array_ops.identity(self._mean_val)
def _covariance(self):
p = self.probs * array_ops.ones_like(
self.total_count)[..., array_ops.newaxis]
return array_ops.matrix_set_diag(
-math_ops.matmul(self._mean_val[..., array_ops.newaxis],
p[..., array_ops.newaxis, :]), # outer product
self._variance())
def _variance(self):
p = self.probs * array_ops.ones_like(
self.total_count)[..., array_ops.newaxis]
return self._mean_val - self._mean_val * p
def _maybe_assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return counts
counts = distribution_util.embed_check_nonnegative_integer_form(counts)
return control_flow_ops.with_dependencies([
check_ops.assert_equal(
self.total_count, math_ops.reduce_sum(counts, -1),
message="counts must sum to `self.total_count`"),
], counts)
|
tensorflow-master
|
tensorflow/python/ops/distributions/multinomial.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Exponential distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import gamma
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Exponential",
"ExponentialWithSoftplusRate",
]
@tf_export(v1=["distributions.Exponential"])
class Exponential(gamma.Gamma):
"""Exponential distribution.
The Exponential distribution is parameterized by an event `rate` parameter.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; lambda, x > 0) = exp(-lambda x) / Z
Z = 1 / lambda
```
where `rate = lambda` and `Z` is the normalizaing constant.
The Exponential distribution is a special case of the Gamma distribution,
i.e.,
```python
Exponential(rate) = Gamma(concentration=1., rate)
```
The Exponential distribution uses a `rate` parameter, or "inverse scale",
which can be intuited as,
```none
X ~ Exponential(rate=1)
Y = X / rate
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
rate,
validate_args=False,
allow_nan_stats=True,
name="Exponential"):
"""Construct Exponential distribution with parameter `rate`.
Args:
rate: Floating point tensor, equivalent to `1 / mean`. Must contain only
positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
# Even though all statistics of are defined for valid inputs, this is not
# true in the parent class "Gamma." Therefore, passing
# allow_nan_stats=True
# through to the parent class results in unnecessary asserts.
with ops.name_scope(name, values=[rate]) as name:
self._rate = ops.convert_to_tensor(rate, name="rate")
super(Exponential, self).__init__(
concentration=array_ops.ones([], dtype=self._rate.dtype),
rate=self._rate,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=name)
self._parameters = parameters
self._graph_parents += [self._rate]
@staticmethod
def _param_shapes(sample_shape):
return {"rate": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def rate(self):
return self._rate
def _log_survival_function(self, value):
return self._log_prob(value) - math_ops.log(self._rate)
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], array_ops.shape(self._rate)], 0)
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
sampled = random_ops.random_uniform(
shape,
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
seed=seed,
dtype=self.dtype)
return -math_ops.log(sampled) / self._rate
class ExponentialWithSoftplusRate(Exponential):
"""Exponential with softplus transform on `rate`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Exponential(tf.nn.softplus(rate)).",
warn_once=True)
def __init__(self,
rate,
validate_args=False,
allow_nan_stats=True,
name="ExponentialWithSoftplusRate"):
parameters = dict(locals())
with ops.name_scope(name, values=[rate]) as name:
super(ExponentialWithSoftplusRate, self).__init__(
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-master
|
tensorflow/python/ops/distributions/exponential.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bijector base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import re
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import util as distribution_util
__all__ = [
"Bijector",
]
class _Mapping(collections.namedtuple(
"_Mapping", ["x", "y", "ildj_map", "kwargs"])):
"""Helper class to make it easier to manage caching in `Bijector`."""
def __new__(cls, x=None, y=None, ildj_map=None, kwargs=None):
"""Custom __new__ so namedtuple items have defaults.
Args:
x: `Tensor`. Forward.
y: `Tensor`. Inverse.
ildj_map: `Dictionary`. This is a mapping from event_ndims to a `Tensor`
representing the inverse log det jacobian.
kwargs: Python dictionary. Extra args supplied to
forward/inverse/etc functions.
Returns:
mapping: New instance of _Mapping.
"""
return super(_Mapping, cls).__new__(cls, x, y, ildj_map, kwargs)
@property
def x_key(self):
"""Returns key used for caching Y=g(X)."""
return (self.x,) + self._deep_tuple(tuple(sorted(self.kwargs.items())))
@property
def y_key(self):
"""Returns key used for caching X=g^{-1}(Y)."""
return (self.y,) + self._deep_tuple(tuple(sorted(self.kwargs.items())))
def merge(self, x=None, y=None, ildj_map=None, kwargs=None, mapping=None):
"""Returns new _Mapping with args merged with self.
Args:
x: `Tensor`. Forward.
y: `Tensor`. Inverse.
ildj_map: `Dictionary`. This is a mapping from event_ndims to a `Tensor`
representing the inverse log det jacobian.
kwargs: Python dictionary. Extra args supplied to
forward/inverse/etc functions.
mapping: Instance of _Mapping to merge. Can only be specified if no other
arg is specified.
Returns:
mapping: New instance of `_Mapping` which has inputs merged with self.
Raises:
ValueError: if mapping and any other arg is not `None`.
"""
if mapping is None:
mapping = _Mapping(x=x, y=y, ildj_map=ildj_map, kwargs=kwargs)
elif any(arg is not None for arg in [x, y, ildj_map, kwargs]):
raise ValueError("Cannot simultaneously specify mapping and individual "
"arguments.")
return _Mapping(
x=self._merge(self.x, mapping.x),
y=self._merge(self.y, mapping.y),
ildj_map=self._merge_dicts(self.ildj_map, mapping.ildj_map),
kwargs=self._merge(self.kwargs, mapping.kwargs))
def _merge_dicts(self, old=None, new=None):
"""Helper to merge two dictionaries."""
old = {} if old is None else old
new = {} if new is None else new
for k, v in six.iteritems(new):
val = old.get(k, None)
if val is not None and val != v:
raise ValueError("Found different value for existing key "
"(key:{} old_value:{} new_value:{}".format(
k, old[k], v))
old[k] = v
return old
def _merge(self, old, new):
"""Helper to merge which handles merging one value."""
if old is None:
return new
elif new is not None and old != new:
raise ValueError("Incompatible values: %s != %s" % (old, new))
return old
def _deep_tuple(self, x):
"""Converts lists of lists to tuples of tuples."""
return (tuple(map(self._deep_tuple, x))
if isinstance(x, (list, tuple)) else x)
@six.add_metaclass(abc.ABCMeta)
class Bijector(object):
r"""Interface for transformations of a `Distribution` sample.
Bijectors can be used to represent any differentiable and injective
(one to one) function defined on an open subset of `R^n`. Some non-injective
transformations are also supported (see "Non Injective Transforms" below).
#### Mathematical Details
A `Bijector` implements a [smooth covering map](
https://en.wikipedia.org/wiki/Local_diffeomorphism), i.e., a local
diffeomorphism such that every point in the target has a neighborhood evenly
covered by a map ([see also](
https://en.wikipedia.org/wiki/Covering_space#Covering_of_a_manifold)).
A `Bijector` is used by `TransformedDistribution` but can be generally used
for transforming a `Distribution` generated `Tensor`. A `Bijector` is
characterized by three operations:
1. Forward
Useful for turning one random outcome into another random outcome from a
different distribution.
2. Inverse
Useful for "reversing" a transformation to compute one probability in
terms of another.
3. `log_det_jacobian(x)`
"The log of the absolute value of the determinant of the matrix of all
first-order partial derivatives of the inverse function."
Useful for inverting a transformation to compute one probability in terms
of another. Geometrically, the Jacobian determinant is the volume of the
transformation and is used to scale the probability.
We take the absolute value of the determinant before log to avoid NaN
values. Geometrically, a negative determinant corresponds to an
orientation-reversing transformation. It is ok for us to discard the sign
of the determinant because we only integrate everywhere-nonnegative
functions (probability densities) and the correct orientation is always the
one that produces a nonnegative integrand.
By convention, transformations of random variables are named in terms of the
forward transformation. The forward transformation creates samples, the
inverse is useful for computing probabilities.
#### Example Uses
- Basic properties:
```python
x = ... # A tensor.
# Evaluate forward transformation.
fwd_x = my_bijector.forward(x)
x == my_bijector.inverse(fwd_x)
x != my_bijector.forward(fwd_x) # Not equal because x != g(g(x)).
```
- Computing a log-likelihood:
```python
def transformed_log_prob(bijector, log_prob, x):
return (bijector.inverse_log_det_jacobian(x, event_ndims=0) +
log_prob(bijector.inverse(x)))
```
- Transforming a random outcome:
```python
def transformed_sample(bijector, x):
return bijector.forward(x)
```
#### Example Bijectors
- "Exponential"
```none
Y = g(X) = exp(X)
X ~ Normal(0, 1) # Univariate.
```
Implies:
```none
g^{-1}(Y) = log(Y)
|Jacobian(g^{-1})(y)| = 1 / y
Y ~ LogNormal(0, 1), i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= (1 / y) Normal(log(y); 0, 1)
```
Here is an example of how one might implement the `Exp` bijector:
```python
class Exp(Bijector):
def __init__(self, validate_args=False, name="exp"):
super(Exp, self).__init__(
validate_args=validate_args,
forward_min_event_ndims=0,
name=name)
def _forward(self, x):
return math_ops.exp(x)
def _inverse(self, y):
return math_ops.log(y)
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jacobian(self._inverse(y))
def _forward_log_det_jacobian(self, x):
# Notice that we needn't do any reducing, even when`event_ndims > 0`.
# The base Bijector class will handle reducing for us; it knows how
# to do so because we called `super` `__init__` with
# `forward_min_event_ndims = 0`.
return x
```
- "Affine"
```none
Y = g(X) = sqrtSigma * X + mu
X ~ MultivariateNormal(0, I_d)
```
Implies:
```none
g^{-1}(Y) = inv(sqrtSigma) * (Y - mu)
|Jacobian(g^{-1})(y)| = det(inv(sqrtSigma))
Y ~ MultivariateNormal(mu, sqrtSigma) , i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= det(sqrtSigma)^(-d) *
MultivariateNormal(inv(sqrtSigma) * (y - mu); 0, I_d)
```
#### Min_event_ndims and Naming
Bijectors are named for the dimensionality of data they act on (i.e. without
broadcasting). We can think of bijectors having an intrinsic `min_event_ndims`
, which is the minimum number of dimensions for the bijector act on. For
instance, a Cholesky decomposition requires a matrix, and hence
`min_event_ndims=2`.
Some examples:
`AffineScalar: min_event_ndims=0`
`Affine: min_event_ndims=1`
`Cholesky: min_event_ndims=2`
`Exp: min_event_ndims=0`
`Sigmoid: min_event_ndims=0`
`SoftmaxCentered: min_event_ndims=1`
Note the difference between `Affine` and `AffineScalar`. `AffineScalar`
operates on scalar events, whereas `Affine` operates on vector-valued events.
More generally, there is a `forward_min_event_ndims` and an
`inverse_min_event_ndims`. In most cases, these will be the same.
However, for some shape changing bijectors, these will be different
(e.g. a bijector which pads an extra dimension at the end, might have
`forward_min_event_ndims=0` and `inverse_min_event_ndims=1`.
#### Jacobian Determinant
The Jacobian determinant is a reduction over `event_ndims - min_event_ndims`
(`forward_min_event_ndims` for `forward_log_det_jacobian` and
`inverse_min_event_ndims` for `inverse_log_det_jacobian`).
To see this, consider the `Exp` `Bijector` applied to a `Tensor` which has
sample, batch, and event (S, B, E) shape semantics. Suppose the `Tensor`'s
partitioned-shape is `(S=[4], B=[2], E=[3, 3])`. The shape of the `Tensor`
returned by `forward` and `inverse` is unchanged, i.e., `[4, 2, 3, 3]`.
However the shape returned by `inverse_log_det_jacobian` is `[4, 2]` because
the Jacobian determinant is a reduction over the event dimensions.
Another example is the `Affine` `Bijector`. Because `min_event_ndims = 1`, the
Jacobian determinant reduction is over `event_ndims - 1`.
It is sometimes useful to implement the inverse Jacobian determinant as the
negative forward Jacobian determinant. For example,
```python
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jac(self._inverse(y)) # Note negation.
```
The correctness of this approach can be seen from the following claim.
- Claim:
Assume `Y = g(X)` is a bijection whose derivative exists and is nonzero
for its domain, i.e., `dY/dX = d/dX g(X) != 0`. Then:
```none
(log o det o jacobian o g^{-1})(Y) = -(log o det o jacobian o g)(X)
```
- Proof:
From the bijective, nonzero differentiability of `g`, the
[inverse function theorem](
https://en.wikipedia.org/wiki/Inverse_function_theorem)
implies `g^{-1}` is differentiable in the image of `g`.
Applying the chain rule to `y = g(x) = g(g^{-1}(y))` yields
`I = g'(g^{-1}(y))*g^{-1}'(y)`.
The same theorem also implies `g^{-1}'` is non-singular therefore:
`inv[ g'(g^{-1}(y)) ] = g^{-1}'(y)`.
The claim follows from [properties of determinant](
https://en.wikipedia.org/wiki/Determinant#Multiplicativity_and_matrix_groups).
Generally its preferable to directly implement the inverse Jacobian
determinant. This should have superior numerical stability and will often
share subgraphs with the `_inverse` implementation.
#### Is_constant_jacobian
Certain bijectors will have constant jacobian matrices. For instance, the
`Affine` bijector encodes multiplication by a matrix plus a shift, with
jacobian matrix, the same aforementioned matrix.
`is_constant_jacobian` encodes the fact that the jacobian matrix is constant.
The semantics of this argument are the following:
* Repeated calls to "log_det_jacobian" functions with the same
`event_ndims` (but not necessarily same input), will return the first
computed jacobian (because the matrix is constant, and hence is input
independent).
* `log_det_jacobian` implementations are merely broadcastable to the true
`log_det_jacobian` (because, again, the jacobian matrix is input
independent). Specifically, `log_det_jacobian` is implemented as the
log jacobian determinant for a single input.
```python
class Identity(Bijector):
def __init__(self, validate_args=False, name="identity"):
super(Identity, self).__init__(
is_constant_jacobian=True,
validate_args=validate_args,
forward_min_event_ndims=0,
name=name)
def _forward(self, x):
return x
def _inverse(self, y):
return y
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jacobian(self._inverse(y))
def _forward_log_det_jacobian(self, x):
# The full log jacobian determinant would be array_ops.zero_like(x).
# However, we circumvent materializing that, since the jacobian
# calculation is input independent, and we specify it for one input.
return constant_op.constant(0., x.dtype.base_dtype)
```
#### Subclass Requirements
- Subclasses typically implement:
- `_forward`,
- `_inverse`,
- `_inverse_log_det_jacobian`,
- `_forward_log_det_jacobian` (optional).
The `_forward_log_det_jacobian` is called when the bijector is inverted via
the `Invert` bijector. If undefined, a slightly less efficiently
calculation, `-1 * _inverse_log_det_jacobian`, is used.
If the bijector changes the shape of the input, you must also implement:
- _forward_event_shape_tensor,
- _forward_event_shape (optional),
- _inverse_event_shape_tensor,
- _inverse_event_shape (optional).
By default the event-shape is assumed unchanged from input.
- If the `Bijector`'s use is limited to `TransformedDistribution` (or friends
like `QuantizedDistribution`) then depending on your use, you may not need
to implement all of `_forward` and `_inverse` functions.
Examples:
1. Sampling (e.g., `sample`) only requires `_forward`.
2. Probability functions (e.g., `prob`, `cdf`, `survival`) only require
`_inverse` (and related).
3. Only calling probability functions on the output of `sample` means
`_inverse` can be implemented as a cache lookup.
See "Example Uses" [above] which shows how these functions are used to
transform a distribution. (Note: `_forward` could theoretically be
implemented as a cache lookup but this would require controlling the
underlying sample generation mechanism.)
#### Non Injective Transforms
**WARNING** Handing of non-injective transforms is subject to change.
Non injective maps `g` are supported, provided their domain `D` can be
partitioned into `k` disjoint subsets, `Union{D1, ..., Dk}`, such that,
ignoring sets of measure zero, the restriction of `g` to each subset is a
differentiable bijection onto `g(D)`. In particular, this imples that for
`y in g(D)`, the set inverse, i.e. `g^{-1}(y) = {x in D : g(x) = y}`, always
contains exactly `k` distinct points.
The property, `_is_injective` is set to `False` to indicate that the bijector
is not injective, yet satisfies the above condition.
The usual bijector API is modified in the case `_is_injective is False` (see
method docstrings for specifics). Here we show by example the `AbsoluteValue`
bijector. In this case, the domain `D = (-inf, inf)`, can be partitioned
into `D1 = (-inf, 0)`, `D2 = {0}`, and `D3 = (0, inf)`. Let `gi` be the
restriction of `g` to `Di`, then both `g1` and `g3` are bijections onto
`(0, inf)`, with `g1^{-1}(y) = -y`, and `g3^{-1}(y) = y`. We will use
`g1` and `g3` to define bijector methods over `D1` and `D3`. `D2 = {0}` is
an oddball in that `g2` is one to one, and the derivative is not well defined.
Fortunately, when considering transformations of probability densities
(e.g. in `TransformedDistribution`), sets of measure zero have no effect in
theory, and only a small effect in 32 or 64 bit precision. For that reason,
we define `inverse(0)` and `inverse_log_det_jacobian(0)` both as `[0, 0]`,
which is convenient and results in a left-semicontinuous pdf.
```python
abs = tfp.distributions.bijectors.AbsoluteValue()
abs.forward(-1.)
==> 1.
abs.forward(1.)
==> 1.
abs.inverse(1.)
==> (-1., 1.)
# The |dX/dY| is constant, == 1. So Log|dX/dY| == 0.
abs.inverse_log_det_jacobian(1., event_ndims=0)
==> (0., 0.)
# Special case handling of 0.
abs.inverse(0.)
==> (0., 0.)
abs.inverse_log_det_jacobian(0., event_ndims=0)
==> (0., 0.)
```
"""
@abc.abstractmethod
def __init__(self,
graph_parents=None,
is_constant_jacobian=False,
validate_args=False,
dtype=None,
forward_min_event_ndims=None,
inverse_min_event_ndims=None,
name=None):
"""Constructs Bijector.
A `Bijector` transforms random variables into new random variables.
Examples:
```python
# Create the Y = g(X) = X transform.
identity = Identity()
# Create the Y = g(X) = exp(X) transform.
exp = Exp()
```
See `Bijector` subclass docstring for more details and specific examples.
Args:
graph_parents: Python list of graph prerequisites of this `Bijector`.
is_constant_jacobian: Python `bool` indicating that the Jacobian matrix is
not a function of the input.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not
enforced.
forward_min_event_ndims: Python `integer` indicating the minimum number of
dimensions `forward` operates on.
inverse_min_event_ndims: Python `integer` indicating the minimum number of
dimensions `inverse` operates on. Will be set to
`forward_min_event_ndims` by default, if no value is provided.
name: The name to give Ops created by the initializer.
Raises:
ValueError: If neither `forward_min_event_ndims` and
`inverse_min_event_ndims` are specified, or if either of them is
negative.
ValueError: If a member of `graph_parents` is not a `Tensor`.
"""
self._graph_parents = graph_parents or []
if forward_min_event_ndims is None and inverse_min_event_ndims is None:
raise ValueError("Must specify at least one of `forward_min_event_ndims` "
"and `inverse_min_event_ndims`.")
elif inverse_min_event_ndims is None:
inverse_min_event_ndims = forward_min_event_ndims
elif forward_min_event_ndims is None:
forward_min_event_ndims = inverse_min_event_ndims
if not isinstance(forward_min_event_ndims, int):
raise TypeError("Expected forward_min_event_ndims to be of "
"type int, got {}".format(
type(forward_min_event_ndims).__name__))
if not isinstance(inverse_min_event_ndims, int):
raise TypeError("Expected inverse_min_event_ndims to be of "
"type int, got {}".format(
type(inverse_min_event_ndims).__name__))
if forward_min_event_ndims < 0:
raise ValueError("forward_min_event_ndims must be a non-negative "
"integer.")
if inverse_min_event_ndims < 0:
raise ValueError("inverse_min_event_ndims must be a non-negative "
"integer.")
self._forward_min_event_ndims = forward_min_event_ndims
self._inverse_min_event_ndims = inverse_min_event_ndims
self._is_constant_jacobian = is_constant_jacobian
self._constant_ildj_map = {}
self._validate_args = validate_args
self._dtype = dtype
self._from_y = {}
self._from_x = {}
if name:
self._name = name
else:
# We want the default convention to be snake_case rather than CamelCase
# since `Chain` uses bijector.name as the kwargs dictionary key.
def camel_to_snake(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
self._name = camel_to_snake(type(self).__name__.lstrip("_"))
for i, t in enumerate(self._graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
@property
def graph_parents(self):
"""Returns this `Bijector`'s graph_parents as a Python list."""
return self._graph_parents
@property
def forward_min_event_ndims(self):
"""Returns the minimal number of dimensions bijector.forward operates on."""
return self._forward_min_event_ndims
@property
def inverse_min_event_ndims(self):
"""Returns the minimal number of dimensions bijector.inverse operates on."""
return self._inverse_min_event_ndims
@property
def is_constant_jacobian(self):
"""Returns true iff the Jacobian matrix is not a function of x.
Note: Jacobian matrix is either constant for both forward and inverse or
neither.
Returns:
is_constant_jacobian: Python `bool`.
"""
return self._is_constant_jacobian
@property
def _is_injective(self):
"""Returns true iff the forward map `g` is injective (one-to-one function).
**WARNING** This hidden property and its behavior are subject to change.
Note: Non-injective maps `g` are supported, provided their domain `D` can
be partitioned into `k` disjoint subsets, `Union{D1, ..., Dk}`, such that,
ignoring sets of measure zero, the restriction of `g` to each subset is a
differentiable bijection onto `g(D)`.
Returns:
is_injective: Python `bool`.
"""
return True
@property
def validate_args(self):
"""Returns True if Tensor arguments will be validated."""
return self._validate_args
@property
def dtype(self):
"""dtype of `Tensor`s transformable by this distribution."""
return self._dtype
@property
def name(self):
"""Returns the string name of this `Bijector`."""
return self._name
def _forward_event_shape_tensor(self, input_shape):
"""Subclass implementation for `forward_event_shape_tensor` function."""
# By default, we assume event_shape is unchanged.
return input_shape
def forward_event_shape_tensor(self,
input_shape,
name="forward_event_shape_tensor"):
"""Shape of a single sample from a single batch as an `int32` 1D `Tensor`.
Args:
input_shape: `Tensor`, `int32` vector indicating event-portion shape
passed into `forward` function.
name: name to give to the op
Returns:
forward_event_shape_tensor: `Tensor`, `int32` vector indicating
event-portion shape after applying `forward`.
"""
with self._name_scope(name, [input_shape]):
input_shape = ops.convert_to_tensor(input_shape, dtype=dtypes.int32,
name="input_shape")
return self._forward_event_shape_tensor(input_shape)
def _forward_event_shape(self, input_shape):
"""Subclass implementation for `forward_event_shape` public function."""
# By default, we assume event_shape is unchanged.
return input_shape
def forward_event_shape(self, input_shape):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `forward_event_shape_tensor`. May be only partially defined.
Args:
input_shape: `TensorShape` indicating event-portion shape passed into
`forward` function.
Returns:
forward_event_shape_tensor: `TensorShape` indicating event-portion shape
after applying `forward`. Possibly unknown.
"""
return self._forward_event_shape(tensor_shape.TensorShape(input_shape))
def _inverse_event_shape_tensor(self, output_shape):
"""Subclass implementation for `inverse_event_shape_tensor` function."""
# By default, we assume event_shape is unchanged.
return output_shape
def inverse_event_shape_tensor(self,
output_shape,
name="inverse_event_shape_tensor"):
"""Shape of a single sample from a single batch as an `int32` 1D `Tensor`.
Args:
output_shape: `Tensor`, `int32` vector indicating event-portion shape
passed into `inverse` function.
name: name to give to the op
Returns:
inverse_event_shape_tensor: `Tensor`, `int32` vector indicating
event-portion shape after applying `inverse`.
"""
with self._name_scope(name, [output_shape]):
output_shape = ops.convert_to_tensor(output_shape, dtype=dtypes.int32,
name="output_shape")
return self._inverse_event_shape_tensor(output_shape)
def _inverse_event_shape(self, output_shape):
"""Subclass implementation for `inverse_event_shape` public function."""
# By default, we assume event_shape is unchanged.
return tensor_shape.TensorShape(output_shape)
def inverse_event_shape(self, output_shape):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `inverse_event_shape_tensor`. May be only partially defined.
Args:
output_shape: `TensorShape` indicating event-portion shape passed into
`inverse` function.
Returns:
inverse_event_shape_tensor: `TensorShape` indicating event-portion shape
after applying `inverse`. Possibly unknown.
"""
return self._inverse_event_shape(output_shape)
def _forward(self, x):
"""Subclass implementation for `forward` public function."""
raise NotImplementedError("forward not implemented.")
def _call_forward(self, x, name, **kwargs):
with self._name_scope(name, [x]):
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
if not self._is_injective: # No caching for non-injective
return self._forward(x, **kwargs)
mapping = self._lookup(x=x, kwargs=kwargs)
if mapping.y is not None:
return mapping.y
mapping = mapping.merge(y=self._forward(x, **kwargs))
self._cache(mapping)
return mapping.y
def forward(self, x, name="forward"):
"""Returns the forward `Bijector` evaluation, i.e., X = g(Y).
Args:
x: `Tensor`. The input to the "forward" evaluation.
name: The name to give this op.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `x.dtype` is not
`self.dtype`.
NotImplementedError: if `_forward` is not implemented.
"""
return self._call_forward(x, name)
def _inverse(self, y):
"""Subclass implementation for `inverse` public function."""
raise NotImplementedError("inverse not implemented")
def _call_inverse(self, y, name, **kwargs):
with self._name_scope(name, [y]):
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
if not self._is_injective: # No caching for non-injective
return self._inverse(y, **kwargs)
mapping = self._lookup(y=y, kwargs=kwargs)
if mapping.x is not None:
return mapping.x
mapping = mapping.merge(x=self._inverse(y, **kwargs))
self._cache(mapping)
return mapping.x
def inverse(self, y, name="inverse"):
"""Returns the inverse `Bijector` evaluation, i.e., X = g^{-1}(Y).
Args:
y: `Tensor`. The input to the "inverse" evaluation.
name: The name to give this op.
Returns:
`Tensor`, if this bijector is injective.
If not injective, returns the k-tuple containing the unique
`k` points `(x1, ..., xk)` such that `g(xi) = y`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if `_inverse` is not implemented.
"""
return self._call_inverse(y, name)
def _inverse_log_det_jacobian(self, y):
"""Subclass implementation of `inverse_log_det_jacobian` public function.
In particular, this method differs from the public function, in that it
does not take `event_ndims`. Thus, this implements the minimal Jacobian
determinant calculation (i.e. over `inverse_min_event_ndims`).
Args:
y: `Tensor`. The input to the "inverse_log_det_jacobian" evaluation.
Returns:
inverse_log_det_jacobian: `Tensor`, if this bijector is injective.
If not injective, returns the k-tuple containing jacobians for the
unique `k` points `(x1, ..., xk)` such that `g(xi) = y`.
"""
raise NotImplementedError("inverse_log_det_jacobian not implemented.")
def _call_inverse_log_det_jacobian(self, y, event_ndims, name, **kwargs):
with self._name_scope(name, [y]):
if event_ndims in self._constant_ildj_map:
return self._constant_ildj_map[event_ndims]
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
with ops.control_dependencies(self._check_valid_event_ndims(
min_event_ndims=self.inverse_min_event_ndims,
event_ndims=event_ndims)):
if not self._is_injective: # No caching for non-injective
try:
ildjs = self._inverse_log_det_jacobian(y, **kwargs)
return tuple(self._reduce_jacobian_det_over_event(
y, ildj, self.inverse_min_event_ndims, event_ndims)
for ildj in ildjs)
except NotImplementedError as original_exception:
try:
x = self._inverse(y, **kwargs)
fldjs = self._forward_log_det_jacobian(x, **kwargs)
return tuple(self._reduce_jacobian_det_over_event(
x, -fldj, self.forward_min_event_ndims, event_ndims)
for fldj in fldjs)
except NotImplementedError:
raise original_exception
mapping = self._lookup(y=y, kwargs=kwargs)
if mapping.ildj_map is not None and event_ndims in mapping.ildj_map:
return mapping.ildj_map[event_ndims]
try:
x = None # Not needed; leave cache as is.
ildj = self._inverse_log_det_jacobian(y, **kwargs)
ildj = self._reduce_jacobian_det_over_event(
y, ildj, self.inverse_min_event_ndims, event_ndims)
except NotImplementedError as original_exception:
try:
x = (mapping.x if mapping.x is not None
else self._inverse(y, **kwargs))
ildj = -self._forward_log_det_jacobian(x, **kwargs)
ildj = self._reduce_jacobian_det_over_event(
x, ildj, self.forward_min_event_ndims, event_ndims)
except NotImplementedError:
raise original_exception
mapping = mapping.merge(x=x, ildj_map={event_ndims: ildj})
self._cache(mapping)
if self.is_constant_jacobian:
self._constant_ildj_map[event_ndims] = ildj
return ildj
def inverse_log_det_jacobian(
self, y, event_ndims, name="inverse_log_det_jacobian"):
"""Returns the (log o det o Jacobian o inverse)(y).
Mathematically, returns: `log(det(dX/dY))(Y)`. (Recall that: `X=g^{-1}(Y)`.)
Note that `forward_log_det_jacobian` is the negative of this function,
evaluated at `g^{-1}(y)`.
Args:
y: `Tensor`. The input to the "inverse" Jacobian determinant evaluation.
event_ndims: Number of dimensions in the probabilistic events being
transformed. Must be greater than or equal to
`self.inverse_min_event_ndims`. The result is summed over the final
dimensions to produce a scalar Jacobian determinant for each event,
i.e. it has shape `y.shape.ndims - event_ndims` dimensions.
name: The name to give this op.
Returns:
`Tensor`, if this bijector is injective.
If not injective, returns the tuple of local log det
Jacobians, `log(det(Dg_i^{-1}(y)))`, where `g_i` is the restriction
of `g` to the `ith` partition `Di`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if `_inverse_log_det_jacobian` is not implemented.
"""
return self._call_inverse_log_det_jacobian(y, event_ndims, name)
def _forward_log_det_jacobian(self, x):
"""Subclass implementation of `forward_log_det_jacobian` public function.
In particular, this method differs from the public function, in that it
does not take `event_ndims`. Thus, this implements the minimal Jacobian
determinant calculation (i.e. over `forward_min_event_ndims`).
Args:
x: `Tensor`. The input to the "forward_log_det_jacobian" evaluation.
Returns:
forward_log_det_jacobian: `Tensor`, if this bijector is injective.
If not injective, returns the k-tuple containing jacobians for the
unique `k` points `(x1, ..., xk)` such that `g(xi) = y`.
"""
raise NotImplementedError(
"forward_log_det_jacobian not implemented.")
def _call_forward_log_det_jacobian(self, x, event_ndims, name, **kwargs):
if not self._is_injective:
raise NotImplementedError(
"forward_log_det_jacobian cannot be implemented for non-injective "
"transforms.")
with self._name_scope(name, [x]):
with ops.control_dependencies(self._check_valid_event_ndims(
min_event_ndims=self.forward_min_event_ndims,
event_ndims=event_ndims)):
if event_ndims in self._constant_ildj_map:
# Need "-1. *" to avoid invalid-unary-operand-type linter warning.
return -1. * self._constant_ildj_map[event_ndims]
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
if not self._is_injective: # No caching for non-injective
try:
fldjs = self._forward_log_det_jacobian(x, **kwargs) # No caching.
return tuple(self._reduce_jacobian_det_over_event(
x, fldj, self.forward_min_event_ndims, event_ndims)
for fldj in fldjs)
except NotImplementedError as original_exception:
try:
y = self._forward(x, **kwargs)
ildjs = self._inverse_log_det_jacobian(y, **kwargs)
return tuple(self._reduce_jacobian_det_over_event(
y, -ildj, self.inverse_min_event_ndims, event_ndims)
for ildj in ildjs)
except NotImplementedError:
raise original_exception
mapping = self._lookup(x=x, kwargs=kwargs)
if mapping.ildj_map is not None and event_ndims in mapping.ildj_map:
return -mapping.ildj_map[event_ndims]
try:
y = None # Not needed; leave cache as is.
ildj = -self._forward_log_det_jacobian(x, **kwargs)
ildj = self._reduce_jacobian_det_over_event(
x, ildj, self.forward_min_event_ndims, event_ndims)
except NotImplementedError as original_exception:
try:
y = (mapping.y if mapping.y is not None
else self._forward(x, **kwargs))
ildj = self._inverse_log_det_jacobian(y, **kwargs)
ildj = self._reduce_jacobian_det_over_event(
y, ildj, self.inverse_min_event_ndims, event_ndims)
except NotImplementedError:
raise original_exception
mapping = mapping.merge(y=y, ildj_map={event_ndims: ildj})
self._cache(mapping)
if self.is_constant_jacobian:
self._constant_ildj_map[event_ndims] = ildj
return -ildj
def forward_log_det_jacobian(
self, x, event_ndims, name="forward_log_det_jacobian"):
"""Returns both the forward_log_det_jacobian.
Args:
x: `Tensor`. The input to the "forward" Jacobian determinant evaluation.
event_ndims: Number of dimensions in the probabilistic events being
transformed. Must be greater than or equal to
`self.forward_min_event_ndims`. The result is summed over the final
dimensions to produce a scalar Jacobian determinant for each event,
i.e. it has shape `x.shape.ndims - event_ndims` dimensions.
name: The name to give this op.
Returns:
`Tensor`, if this bijector is injective.
If not injective this is not implemented.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_forward_log_det_jacobian`
nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented, or
this is a non-injective bijector.
"""
return self._call_forward_log_det_jacobian(x, event_ndims, name)
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(
name, values=(values or []) + self.graph_parents) as scope:
yield scope
def _maybe_assert_dtype(self, x):
"""Helper to check dtype when self.dtype is known."""
if self.dtype is not None and self.dtype.base_dtype != x.dtype.base_dtype:
raise TypeError("Input had dtype %s but expected %s." %
(self.dtype, x.dtype))
def _cache(self, mapping):
"""Helper which stores mapping info in forward/inverse dicts."""
# Merging from lookup is an added check that we're not overwriting anything
# which is not None.
mapping = mapping.merge(mapping=self._lookup(
mapping.x, mapping.y, mapping.kwargs))
if mapping.x is None and mapping.y is None:
raise ValueError("Caching expects at least one of (x,y) to be known, "
"i.e., not None.")
self._from_x[mapping.x_key] = mapping
self._from_y[mapping.y_key] = mapping
def _lookup(self, x=None, y=None, kwargs=None):
"""Helper which retrieves mapping info from forward/inverse dicts."""
mapping = _Mapping(x=x, y=y, kwargs=kwargs)
# Since _cache requires both x,y to be set, we only need to do one cache
# lookup since the mapping is always in both or neither.
if mapping.x is not None:
return self._from_x.get(mapping.x_key, mapping)
if mapping.y is not None:
return self._from_y.get(mapping.y_key, mapping)
return mapping
def _reduce_jacobian_det_over_event(
self, y, ildj, min_event_ndims, event_ndims):
"""Reduce jacobian over event_ndims - min_event_ndims."""
# In this case, we need to tile the Jacobian over the event and reduce.
y_rank = array_ops.rank(y)
y_shape = array_ops.shape(y)[
y_rank - event_ndims : y_rank - min_event_ndims]
ones = array_ops.ones(y_shape, ildj.dtype)
reduced_ildj = math_ops.reduce_sum(
ones * ildj,
axis=self._get_event_reduce_dims(min_event_ndims, event_ndims))
# The multiplication by ones can change the inferred static shape so we try
# to recover as much as possible.
event_ndims_ = self._maybe_get_static_event_ndims(event_ndims)
if (event_ndims_ is not None and
y.shape.ndims is not None and
ildj.shape.ndims is not None):
y_shape = y.shape[y.shape.ndims - event_ndims_ :
y.shape.ndims - min_event_ndims]
broadcast_shape = array_ops.broadcast_static_shape(ildj.shape, y_shape)
reduced_ildj.set_shape(
broadcast_shape[: broadcast_shape.ndims - (
event_ndims_ - min_event_ndims)])
return reduced_ildj
def _get_event_reduce_dims(self, min_event_ndims, event_ndims):
"""Compute the reduction dimensions given event_ndims."""
event_ndims_ = self._maybe_get_static_event_ndims(event_ndims)
if event_ndims_ is not None:
return [-index for index in range(1, event_ndims_ - min_event_ndims + 1)]
else:
reduce_ndims = event_ndims - min_event_ndims
return math_ops.range(-reduce_ndims, 0)
def _check_valid_event_ndims(self, min_event_ndims, event_ndims):
"""Check whether event_ndims is atleast min_event_ndims."""
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
event_ndims_ = tensor_util.constant_value(event_ndims)
assertions = []
if not event_ndims.dtype.is_integer:
raise ValueError("Expected integer dtype, got dtype {}".format(
event_ndims.dtype))
if event_ndims_ is not None:
if event_ndims.shape.ndims != 0:
raise ValueError("Expected scalar event_ndims, got shape {}".format(
event_ndims.shape))
if min_event_ndims > event_ndims_:
raise ValueError("event_ndims ({}) must be larger than "
"min_event_ndims ({})".format(
event_ndims_, min_event_ndims))
elif self.validate_args:
assertions += [
check_ops.assert_greater_equal(event_ndims, min_event_ndims)]
if event_ndims.shape.is_fully_defined():
if event_ndims.shape.ndims != 0:
raise ValueError("Expected scalar shape, got ndims {}".format(
event_ndims.shape.ndims))
elif self.validate_args:
assertions += [
check_ops.assert_rank(event_ndims, 0, message="Expected scalar.")]
return assertions
def _maybe_get_static_event_ndims(self, event_ndims):
"""Helper which returns tries to return an integer static value."""
event_ndims_ = distribution_util.maybe_get_static_value(event_ndims)
if isinstance(event_ndims_, (np.generic, np.ndarray)):
if event_ndims_.dtype not in (np.int32, np.int64):
raise ValueError("Expected integer dtype, got dtype {}".format(
event_ndims_.dtype))
if isinstance(event_ndims_, np.ndarray) and len(event_ndims_.shape):
raise ValueError("Expected a scalar integer, got {}".format(
event_ndims_))
event_ndims_ = int(event_ndims_)
return event_ndims_
|
tensorflow-master
|
tensorflow/python/ops/distributions/bijector_impl.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import types
import numpy as np
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"ReparameterizationType",
"FULLY_REPARAMETERIZED",
"NOT_REPARAMETERIZED",
"Distribution",
]
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape",
"batch_shape_tensor",
"cdf",
"covariance",
"cross_entropy",
"entropy",
"event_shape",
"event_shape_tensor",
"kl_divergence",
"log_cdf",
"log_prob",
"log_survival_function",
"mean",
"mode",
"prob",
"sample",
"stddev",
"survival_function",
"variance",
]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str = old_str or ""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
def _convert_to_tensor(value, name=None, preferred_dtype=None):
"""Converts to tensor avoiding an eager bug that loses float precision."""
# TODO(b/116672045): Remove this function.
if (context.executing_eagerly() and preferred_dtype is not None and
(preferred_dtype.is_integer or preferred_dtype.is_bool)):
v = ops.convert_to_tensor(value, name=name)
if v.dtype.is_floating:
return v
return ops.convert_to_tensor(
value, name=name, preferred_dtype=preferred_dtype)
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = tf_inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = tf_inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
@tf_export(v1=["distributions.ReparameterizationType"])
class ReparameterizationType(object):
"""Instances of this class represent how sampling is reparameterized.
Two static instances exist in the distributions library, signifying
one of two possible properties for samples from a distribution:
`FULLY_REPARAMETERIZED`: Samples from the distribution are fully
reparameterized, and straight-through gradients are supported.
`NOT_REPARAMETERIZED`: Samples from the distribution are not fully
reparameterized, and straight-through gradients are either partially
unsupported or are not supported at all. In this case, for purposes of
e.g. RL or variational inference, it is generally safest to wrap the
sample results in a `stop_gradients` call and use policy
gradients / surrogate loss instead.
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self, rep_type):
self._rep_type = rep_type
def __repr__(self):
return "<Reparameteriation Type: %s>" % self._rep_type
def __eq__(self, other):
"""Determine if this `ReparameterizationType` is equal to another.
Since RepaparameterizationType instances are constant static global
instances, equality checks if two instances' id() values are equal.
Args:
other: Object to compare against.
Returns:
`self is other`.
"""
return self is other
# Fully reparameterized distribution: samples from a fully
# reparameterized distribution support straight-through gradients with
# respect to all parameters.
FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED")
tf_export(v1=["distributions.FULLY_REPARAMETERIZED"]).export_constant(
__name__, "FULLY_REPARAMETERIZED")
# Not reparameterized distribution: samples from a non-
# reparameterized distribution do not support straight-through gradients for
# at least some of the parameters.
NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED")
tf_export(v1=["distributions.NOT_REPARAMETERIZED"]).export_constant(
__name__, "NOT_REPARAMETERIZED")
@six.add_metaclass(_DistributionMeta)
@tf_export(v1=["distributions.Distribution"])
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
#### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
#### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.event_shape
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape_tensor()
# Sampling returns a sample per distribution. `samples` has shape
# [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5,
# batch_shape=[2, 2], and event_shape=[].
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
#### Shapes
There are three important concepts associated with TensorFlow Distributions
shapes:
- Event shape describes the shape of a single draw from the distribution;
it may be dependent across dimensions. For scalar distributions, the event
shape is `[]`. For a 5-dimensional MultivariateNormal, the event shape is
`[5]`.
- Batch shape describes independent, not identically distributed draws, aka a
"collection" or "bunch" of distributions.
- Sample shape describes independent, identically distributed draws of batches
from the distribution family.
The event shape and the batch shape are properties of a Distribution object,
whereas the sample shape is associated with a specific call to `sample` or
`log_prob`.
For detailed usage examples of TensorFlow Distributions shapes, see
[this tutorial](
https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb)
#### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `concentration1` and `concentration0`, and does not have
well-defined mode if `concentration1 < 1` or `concentration0 < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
dtype,
reparameterization_type,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
reparameterization_type: Instance of `ReparameterizationType`.
If `distributions.FULLY_REPARAMETERIZED`, this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution. If `distributions.NOT_REPARAMETERIZED`,
then no such reparameterization is available.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
parameters: Python `dict` of parameters used to instantiate this
`Distribution`.
graph_parents: Python `list` of graph prerequisites of this
`Distribution`.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
if not name or name[-1] != "/": # `name` is not a name scope
non_unique_name = name or type(self).__name__
with ops.name_scope(non_unique_name) as name:
pass
self._dtype = dtype
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters or {}
self._graph_parents = graph_parents
self._name = name
@property
def _parameters(self):
return self._parameter_dict
@_parameters.setter
def _parameters(self, value):
"""Intercept assignments to self._parameters to avoid reference cycles.
Parameters are often created using locals(), so we need to clean out any
references to `self` before assigning it to an attribute.
Args:
value: A dictionary of parameters to assign to the `_parameters` property.
"""
if "self" in value:
del value["self"]
self._parameter_dict = value
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`.
Subclasses should override class method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
# Remove "self", "__class__", or other special variables. These can appear
# if the subclass used:
# `parameters = dict(locals())`.
return {k: v for k, v in self._parameters.items()
if not k.startswith("__") and k != "self"}
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`distributions.FULLY_REPARAMETERIZED`
or `distributions.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`.
"""
return self._reparameterization_type
@property
def allow_nan_stats(self):
"""Python `bool` describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance of a
Cauchy distribution is infinity. However, sometimes the statistic is
undefined, e.g., if a distribution's pdf does not achieve a maximum within
the support of the distribution, the mode is undefined. If the mean is
undefined, then by definition the variance is undefined. E.g. the mean for
Student's T for df = 1 is undefined (no clear way to say it is either + or -
infinity), so the variance = E[(X - mean)**2] is also undefined.
Returns:
allow_nan_stats: Python `bool`.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python `bool` indicating possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
initialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` initialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
return type(self)(**parameters)
def _batch_shape_tensor(self):
raise NotImplementedError(
"batch_shape_tensor is not implemented: {}".format(type(self).__name__))
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
if self.batch_shape.is_fully_defined():
return ops.convert_to_tensor(self.batch_shape.as_list(),
dtype=dtypes.int32,
name="batch_shape")
return self._batch_shape_tensor()
def _batch_shape(self):
return tensor_shape.TensorShape(None)
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return tensor_shape.as_shape(self._batch_shape())
def _event_shape_tensor(self):
raise NotImplementedError(
"event_shape_tensor is not implemented: {}".format(type(self).__name__))
def event_shape_tensor(self, name="event_shape_tensor"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
if self.event_shape.is_fully_defined():
return ops.convert_to_tensor(self.event_shape.as_list(),
dtype=dtypes.int32,
name="event_shape")
return self._event_shape_tensor()
def _event_shape(self):
return tensor_shape.TensorShape(None)
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return tensor_shape.as_shape(self._event_shape())
def is_scalar_event(self, name="is_scalar_event"):
"""Indicates that `event_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_event: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.event_shape, self.event_shape_tensor),
name="is_scalar_event")
def is_scalar_batch(self, name="is_scalar_batch"):
"""Indicates that `batch_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_batch: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor),
name="is_scalar_batch")
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented: {}".format(
type(self).__name__))
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
samples = self._sample_n(n, seed, **kwargs)
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
samples = array_ops.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
def sample(self, sample_shape=(), seed=None, name="sample"):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
return self._call_sample_n(sample_shape, seed, name)
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented: {}".format(
type(self).__name__))
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._log_prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_prob(self, value, name="log_prob"):
"""Log probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_prob(value, name)
def _prob(self, value):
raise NotImplementedError("prob is not implemented: {}".format(
type(self).__name__))
def _call_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob"):
"""Probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_prob(value, name)
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented: {}".format(
type(self).__name__))
def _call_log_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._log_cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_cdf(self, value, name="log_cdf"):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_cdf(value, name)
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented: {}".format(
type(self).__name__))
def _call_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def cdf(self, value, name="cdf"):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_cdf(value, name)
def _log_survival_function(self, value):
raise NotImplementedError(
"log_survival_function is not implemented: {}".format(
type(self).__name__))
def _call_log_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log1p(-self.cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_survival_function(self, value, name="log_survival_function"):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```none
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_log_survival_function(value, name)
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented: {}".format(
type(self).__name__))
def _call_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **kwargs)
except NotImplementedError:
raise original_exception
def survival_function(self, value, name="survival_function"):
"""Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_survival_function(value, name)
def _entropy(self):
raise NotImplementedError("entropy is not implemented: {}".format(
type(self).__name__))
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented: {}".format(
type(self).__name__))
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _quantile(self, value):
raise NotImplementedError("quantile is not implemented: {}".format(
type(self).__name__))
def _call_quantile(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
return self._quantile(value, **kwargs)
def quantile(self, value, name="quantile"):
"""Quantile function. Aka "inverse cdf" or "percent point function".
Given random variable `X` and `p in [0, 1]`, the `quantile` is:
```none
quantile(p) := x such that P[X <= x] == p
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_quantile(value, name)
def _variance(self):
raise NotImplementedError("variance is not implemented: {}".format(
type(self).__name__))
def variance(self, name="variance"):
"""Variance.
Variance is defined as,
```none
Var = E[(X - E[X])**2]
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `Var.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
variance: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._variance()
except NotImplementedError as original_exception:
try:
return math_ops.square(self._stddev())
except NotImplementedError:
raise original_exception
def _stddev(self):
raise NotImplementedError("stddev is not implemented: {}".format(
type(self).__name__))
def stddev(self, name="stddev"):
"""Standard deviation.
Standard deviation is defined as,
```none
stddev = E[(X - E[X])**2]**0.5
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `stddev.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
stddev: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._stddev()
except NotImplementedError as original_exception:
try:
return math_ops.sqrt(self._variance())
except NotImplementedError:
raise original_exception
def _covariance(self):
raise NotImplementedError("covariance is not implemented: {}".format(
type(self).__name__))
def covariance(self, name="covariance"):
"""Covariance.
Covariance is (possibly) defined only for non-scalar-event distributions.
For example, for a length-`k`, vector-valued distribution, it is calculated
as,
```none
Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
```
where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
denotes expectation.
Alternatively, for non-vector, multivariate distributions (e.g.,
matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
under some vectorization of the events, i.e.,
```none
Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
```
where `Cov` is a (batch of) `k' x k'` matrices,
`0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
mapping indices of this distribution's event dimensions to indices of a
length-`k'` vector.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
where the first `n` dimensions are batch coordinates and
`k' = reduce_prod(self.event_shape)`.
"""
with self._name_scope(name):
return self._covariance()
def _mode(self):
raise NotImplementedError("mode is not implemented: {}".format(
type(self).__name__))
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
def _cross_entropy(self, other):
return kullback_leibler.cross_entropy(
self, other, allow_nan_stats=self.allow_nan_stats)
def cross_entropy(self, other, name="cross_entropy"):
"""Computes the (Shannon) cross entropy.
Denote this distribution (`self`) by `P` and the `other` distribution by
`Q`. Assuming `P, Q` are absolutely continuous with respect to
one another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shanon)
cross entropy is defined as:
```none
H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
```
where `F` denotes the support of the random variable `X ~ P`.
Args:
other: `tfp.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
cross_entropy: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of (Shanon) cross entropy.
"""
with self._name_scope(name):
return self._cross_entropy(other)
def _kl_divergence(self, other):
return kullback_leibler.kl_divergence(
self, other, allow_nan_stats=self.allow_nan_stats)
def kl_divergence(self, other, name="kl_divergence"):
"""Computes the Kullback--Leibler divergence.
Denote this distribution (`self`) by `p` and the `other` distribution by
`q`. Assuming `p, q` are absolutely continuous with respect to reference
measure `r`, the KL divergence is defined as:
```none
KL[p, q] = E_p[log(p(X)/q(X))]
= -int_F p(x) log q(x) dr(x) + int_F p(x) log p(x) dr(x)
= H[p, q] - H[p]
```
where `F` denotes the support of the random variable `X ~ p`, `H[., .]`
denotes (Shanon) cross entropy, and `H[.]` denotes (Shanon) entropy.
Args:
other: `tfp.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
kl_divergence: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of the Kullback-Leibler
divergence.
"""
with self._name_scope(name):
return self._kl_divergence(other)
def __str__(self):
return ("tfp.distributions.{type_name}("
"\"{self_name}\""
"{maybe_batch_shape}"
"{maybe_event_shape}"
", dtype={dtype})".format(
type_name=type(self).__name__,
self_name=self.name,
maybe_batch_shape=(", batch_shape={}".format(self.batch_shape)
if self.batch_shape.ndims is not None
else ""),
maybe_event_shape=(", event_shape={}".format(self.event_shape)
if self.event_shape.ndims is not None
else ""),
dtype=self.dtype.name))
def __repr__(self):
return ("<tfp.distributions.{type_name} "
"'{self_name}'"
" batch_shape={batch_shape}"
" event_shape={event_shape}"
" dtype={dtype}>".format(
type_name=type(self).__name__,
self_name=self.name,
batch_shape=self.batch_shape,
event_shape=self.event_shape,
dtype=self.dtype.name))
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
([] if values is None else values) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape_to_vector(self, x, name):
"""Helper to `sample` which ensures input is 1D."""
x_static_val = tensor_util.constant_value(x)
if x_static_val is None:
prod = math_ops.reduce_prod(x)
else:
prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype())
ndims = x.get_shape().ndims # != sample_ndims
if ndims is None:
# Maybe expand_dims.
ndims = array_ops.rank(x)
expanded_shape = util.pick_vector(
math_ops.equal(ndims, 0),
np.array([1], dtype=np.int32), array_ops.shape(x))
x = array_ops.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand_dims.
if x_static_val is not None:
x = ops.convert_to_tensor(
np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()),
name=name)
else:
x = array_ops.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
return x, prod
def _set_sample_static_shape(self, x, sample_shape):
"""Helper to `sample`; sets static shape info."""
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(sample_shape))
ndims = x.get_shape().ndims
sample_ndims = sample_shape.ndims
batch_ndims = self.batch_shape.ndims
event_ndims = self.event_shape.ndims
# Infer rank(x).
if (ndims is None and
sample_ndims is not None and
batch_ndims is not None and
event_ndims is not None):
ndims = sample_ndims + batch_ndims + event_ndims
x.set_shape([None] * ndims)
# Infer sample shape.
if ndims is not None and sample_ndims is not None:
shape = sample_shape.concatenate([None]*(ndims - sample_ndims))
x.set_shape(x.get_shape().merge_with(shape))
# Infer event shape.
if ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape(
[None]*(ndims - event_ndims)).concatenate(self.event_shape)
x.set_shape(x.get_shape().merge_with(shape))
# Infer batch shape.
if batch_ndims is not None:
if ndims is not None:
if sample_ndims is None and event_ndims is not None:
sample_ndims = ndims - batch_ndims - event_ndims
elif event_ndims is None and sample_ndims is not None:
event_ndims = ndims - batch_ndims - sample_ndims
if sample_ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate(
self.batch_shape).concatenate([None]*event_ndims)
x.set_shape(x.get_shape().merge_with(shape))
return x
def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if static_shape.ndims is not None:
return static_shape.ndims == 0
shape = dynamic_shape_fn()
if (shape.get_shape().ndims is not None and
shape.get_shape().dims[0].value is not None):
# If the static_shape is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return shape.get_shape().as_list() == [0]
return math_ops.equal(array_ops.shape(shape)[0], 0)
|
tensorflow-master
|
tensorflow/python/ops/distributions/distribution.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core module for TensorFlow distribution objects and helpers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util import deprecation
# pylint: disable=wildcard-import,unused-import,g-import-not-at-top
with deprecation.silence():
from tensorflow.python.ops.distributions.bernoulli import Bernoulli
from tensorflow.python.ops.distributions.beta import Beta
from tensorflow.python.ops.distributions.categorical import Categorical
from tensorflow.python.ops.distributions.dirichlet import Dirichlet
from tensorflow.python.ops.distributions.dirichlet_multinomial import DirichletMultinomial
from tensorflow.python.ops.distributions.distribution import *
from tensorflow.python.ops.distributions.exponential import Exponential
from tensorflow.python.ops.distributions.gamma import Gamma
from tensorflow.python.ops.distributions.kullback_leibler import *
from tensorflow.python.ops.distributions.laplace import Laplace
from tensorflow.python.ops.distributions.multinomial import Multinomial
from tensorflow.python.ops.distributions.normal import Normal
from tensorflow.python.ops.distributions.student_t import StudentT
from tensorflow.python.ops.distributions.uniform import Uniform
# pylint: enable=wildcard-import,unused-import
del deprecation
|
tensorflow-master
|
tensorflow/python/ops/distributions/distributions.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The DirichletMultinomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"DirichletMultinomial",
]
_dirichlet_multinomial_sample_note = """For each batch of counts,
`value = [n_0, ..., n_{K-1}]`, `P[value]` is the probability that after
sampling `self.total_count` draws from this Dirichlet-Multinomial distribution,
the number of draws falling in class `j` is `n_j`. Since this definition is
[exchangeable](https://en.wikipedia.org/wiki/Exchangeable_random_variables);
different sequences have the same counts so the probability includes a
combinatorial coefficient.
Note: `value` must be a non-negative tensor with dtype `self.dtype`, have no
fractional components, and such that
`tf.reduce_sum(value, -1) = self.total_count`. Its shape must be broadcastable
with `self.concentration` and `self.total_count`."""
@tf_export(v1=["distributions.DirichletMultinomial"])
class DirichletMultinomial(distribution.Distribution):
"""Dirichlet-Multinomial compound distribution.
The Dirichlet-Multinomial distribution is parameterized by a (batch of)
length-`K` `concentration` vectors (`K > 1`) and a `total_count` number of
trials, i.e., the number of trials per draw from the DirichletMultinomial. It
is defined over a (batch of) length-`K` vector `counts` such that
`tf.reduce_sum(counts, -1) = total_count`. The Dirichlet-Multinomial is
identically the Beta-Binomial distribution when `K = 2`.
#### Mathematical Details
The Dirichlet-Multinomial is a distribution over `K`-class counts, i.e., a
length-`K` vector of non-negative integer `counts = n = [n_0, ..., n_{K-1}]`.
The probability mass function (pmf) is,
```none
pmf(n; alpha, N) = Beta(alpha + n) / (prod_j n_j!) / Z
Z = Beta(alpha) / N!
```
where:
* `concentration = alpha = [alpha_0, ..., alpha_{K-1}]`, `alpha_j > 0`,
* `total_count = N`, `N` a positive integer,
* `N!` is `N` factorial, and,
* `Beta(x) = prod_j Gamma(x_j) / Gamma(sum_j x_j)` is the
[multivariate beta function](
https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function),
and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
Dirichlet-Multinomial is a [compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e., its
samples are generated as follows.
1. Choose class probabilities:
`probs = [p_0,...,p_{K-1}] ~ Dir(concentration)`
2. Draw integers:
`counts = [n_0,...,n_{K-1}] ~ Multinomial(total_count, probs)`
The last `concentration` dimension parametrizes a single Dirichlet-Multinomial
distribution. When calling distribution functions (e.g., `dist.prob(counts)`),
`concentration`, `total_count` and `counts` are broadcast to the same shape.
The last dimension of `counts` corresponds single Dirichlet-Multinomial
distributions.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
#### Pitfalls
The number of classes, `K`, must not exceed:
- the largest integer representable by `self.dtype`, i.e.,
`2**(mantissa_bits+1)` (IEE754),
- the maximum `Tensor` index, i.e., `2**31-1`.
In other words,
```python
K <= min(2**31-1, {
tf.float16: 2**11,
tf.float32: 2**24,
tf.float64: 2**53 }[param.dtype])
```
Note: This condition is validated only when `self.validate_args = True`.
#### Examples
```python
alpha = [1., 2., 3.]
n = 2.
dist = DirichletMultinomial(n, alpha)
```
Creates a 3-class distribution, with the 3rd class is most likely to be
drawn.
The distribution functions can be evaluated on counts.
```python
# counts same shape as alpha.
counts = [0., 0., 2.]
dist.prob(counts) # Shape []
# alpha will be broadcast to [[1., 2., 3.], [1., 2., 3.]] to match counts.
counts = [[1., 1., 0.], [1., 0., 1.]]
dist.prob(counts) # Shape [2]
# alpha will be broadcast to shape [5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7]
```
Creates a 2-batch of 3-class distributions.
```python
alpha = [[1., 2., 3.], [4., 5., 6.]] # Shape [2, 3]
n = [3., 3.]
dist = DirichletMultinomial(n, alpha)
# counts will be broadcast to [[2., 1., 0.], [2., 1., 0.]] to match alpha.
counts = [2., 1., 0.]
dist.prob(counts) # Shape [2]
```
"""
# TODO(b/27419586) Change docstring for dtype of concentration once int
# allowed.
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
total_count,
concentration,
validate_args=False,
allow_nan_stats=True,
name="DirichletMultinomial"):
"""Initialize a batch of DirichletMultinomial distributions.
Args:
total_count: Non-negative floating point tensor, whose dtype is the same
as `concentration`. The shape is broadcastable to `[N1,..., Nm]` with
`m >= 0`. Defines this as a batch of `N1 x ... x Nm` different
Dirichlet multinomial distributions. Its components should be equal to
integer values.
concentration: Positive floating point tensor, whose dtype is the
same as `n` with shape broadcastable to `[N1,..., Nm, K]` `m >= 0`.
Defines this as a batch of `N1 x ... x Nm` different `K` class Dirichlet
multinomial distributions.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[total_count, concentration]) as name:
# Broadcasting works because:
# * The broadcasting convention is to prepend dimensions of size [1], and
# we use the last dimension for the distribution, whereas
# the batch dimensions are the leading dimensions, which forces the
# distribution dimension to be defined explicitly (i.e. it cannot be
# created automatically by prepending). This forces enough explicitness.
# * All calls involving `counts` eventually require a broadcast between
# `counts` and concentration.
self._total_count = ops.convert_to_tensor(total_count, name="total_count")
if validate_args:
self._total_count = (
distribution_util.embed_check_nonnegative_integer_form(
self._total_count))
self._concentration = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration,
name="concentration"),
validate_args)
self._total_concentration = math_ops.reduce_sum(self._concentration, -1)
super(DirichletMultinomial, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._total_count,
self._concentration],
name=name)
@property
def total_count(self):
"""Number of trials used to construct a sample."""
return self._total_count
@property
def concentration(self):
"""Concentration parameter; expected prior counts for that coordinate."""
return self._concentration
@property
def total_concentration(self):
"""Sum of last dim of concentration parameter."""
return self._total_concentration
def _batch_shape_tensor(self):
return array_ops.shape(self.total_concentration)
def _batch_shape(self):
return self.total_concentration.get_shape()
def _event_shape_tensor(self):
return array_ops.shape(self.concentration)[-1:]
def _event_shape(self):
# Event shape depends only on total_concentration, not "n".
return self.concentration.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
k = self.event_shape_tensor()[0]
unnormalized_logits = array_ops.reshape(
math_ops.log(random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
dtype=self.dtype,
seed=seed)),
shape=[-1, k])
draws = random_ops.multinomial(
logits=unnormalized_logits,
num_samples=n_draws,
seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial"))
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), -2)
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
x = array_ops.reshape(x, final_shape)
return math_ops.cast(x, self.dtype)
@distribution_util.AppendDocstring(_dirichlet_multinomial_sample_note)
def _log_prob(self, counts):
counts = self._maybe_assert_valid_sample(counts)
ordered_prob = (
special_math_ops.lbeta(self.concentration + counts)
- special_math_ops.lbeta(self.concentration))
return ordered_prob + distribution_util.log_combinations(
self.total_count, counts)
@distribution_util.AppendDocstring(_dirichlet_multinomial_sample_note)
def _prob(self, counts):
return math_ops.exp(self._log_prob(counts))
def _mean(self):
return self.total_count * (self.concentration /
self.total_concentration[..., array_ops.newaxis])
@distribution_util.AppendDocstring(
"""The covariance for each batch member is defined as the following:
```none
Var(X_j) = n * alpha_j / alpha_0 * (1 - alpha_j / alpha_0) *
(n + alpha_0) / (1 + alpha_0)
```
where `concentration = alpha` and
`total_concentration = alpha_0 = sum_j alpha_j`.
The covariance between elements in a batch is defined as:
```none
Cov(X_i, X_j) = -n * alpha_i * alpha_j / alpha_0 ** 2 *
(n + alpha_0) / (1 + alpha_0)
```
""")
def _covariance(self):
x = self._variance_scale_term() * self._mean()
return array_ops.matrix_set_diag(
-math_ops.matmul(x[..., array_ops.newaxis],
x[..., array_ops.newaxis, :]), # outer prod
self._variance())
def _variance(self):
scale = self._variance_scale_term()
x = scale * self._mean()
return x * (self.total_count * scale - x)
def _variance_scale_term(self):
"""Helper to `_covariance` and `_variance` which computes a shared scale."""
# We must take care to expand back the last dim whenever we use the
# total_concentration.
c0 = self.total_concentration[..., array_ops.newaxis]
return math_ops.sqrt((1. + c0 / self.total_count) / (1. + c0))
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
concentration = distribution_util.embed_check_categorical_event_shape(
concentration)
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
], concentration)
def _maybe_assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return counts
counts = distribution_util.embed_check_nonnegative_integer_form(counts)
return control_flow_ops.with_dependencies([
check_ops.assert_equal(
self.total_count, math_ops.reduce_sum(counts, -1),
message="counts last-dimension must sum to `self.total_count`"),
], counts)
|
tensorflow-master
|
tensorflow/python/ops/distributions/dirichlet_multinomial.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Uniform distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["distributions.Uniform"])
class Uniform(distribution.Distribution):
"""Uniform distribution with `low` and `high` parameters.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; a, b) = I[a <= x < b] / Z
Z = b - a
```
where
- `low = a`,
- `high = b`,
- `Z` is the normalizing constant, and
- `I[predicate]` is the [indicator function](
https://en.wikipedia.org/wiki/Indicator_function) for `predicate`.
The parameters `low` and `high` must be shaped in a way that supports
broadcasting (e.g., `high - low` is a valid operation).
#### Examples
```python
# Without broadcasting:
u1 = Uniform(low=3.0, high=4.0) # a single uniform distribution [3, 4]
u2 = Uniform(low=[1.0, 2.0],
high=[3.0, 4.0]) # 2 distributions [1, 3], [2, 4]
u3 = Uniform(low=[[1.0, 2.0],
[3.0, 4.0]],
high=[[1.5, 2.5],
[3.5, 4.5]]) # 4 distributions
```
```python
# With broadcasting:
u1 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) # 3 distributions
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
low=0.,
high=1.,
validate_args=False,
allow_nan_stats=True,
name="Uniform"):
"""Initialize a batch of Uniform distributions.
Args:
low: Floating point tensor, lower boundary of the output interval. Must
have `low < high`.
high: Floating point tensor, upper boundary of the output interval. Must
have `low < high`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
InvalidArgumentError: if `low >= high` and `validate_args=False`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[low, high]) as name:
with ops.control_dependencies([
check_ops.assert_less(
low, high, message="uniform not defined when low >= high.")
] if validate_args else []):
self._low = array_ops.identity(low, name="low")
self._high = array_ops.identity(high, name="high")
check_ops.assert_same_float_dtype([self._low, self._high])
super(Uniform, self).__init__(
dtype=self._low.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._low,
self._high],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("low", "high"),
([ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2)))
@property
def low(self):
"""Lower boundary of the output interval."""
return self._low
@property
def high(self):
"""Upper boundary of the output interval."""
return self._high
def range(self, name="range"):
"""`high - low`."""
with self._name_scope(name):
return self.high - self.low
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.low),
array_ops.shape(self.high))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.low.get_shape(),
self.high.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
samples = random_ops.random_uniform(shape=shape,
dtype=self.dtype,
seed=seed)
return self.low + self.range() * samples
def _prob(self, x):
broadcasted_x = x * array_ops.ones(
self.batch_shape_tensor(), dtype=x.dtype)
return array_ops.where(
math_ops.is_nan(broadcasted_x),
broadcasted_x,
array_ops.where(
math_ops.logical_or(broadcasted_x < self.low,
broadcasted_x >= self.high),
array_ops.zeros_like(broadcasted_x),
array_ops.ones_like(broadcasted_x) / self.range()))
def _cdf(self, x):
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(x), self.batch_shape_tensor())
zeros = array_ops.zeros(broadcast_shape, dtype=self.dtype)
ones = array_ops.ones(broadcast_shape, dtype=self.dtype)
broadcasted_x = x * ones
result_if_not_big = array_ops.where(
x < self.low, zeros, (broadcasted_x - self.low) / self.range())
return array_ops.where(x >= self.high, ones, result_if_not_big)
def _entropy(self):
return math_ops.log(self.range())
def _mean(self):
return (self.low + self.high) / 2.
def _variance(self):
return math_ops.square(self.range()) / 12.
def _stddev(self):
return self.range() / math.sqrt(12.)
|
tensorflow-master
|
tensorflow/python/ops/distributions/uniform.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Bernoulli distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["distributions.Bernoulli"])
class Bernoulli(distribution.Distribution):
"""Bernoulli distribution.
The Bernoulli distribution with `probs` parameter, i.e., the probability of a
`1` outcome (vs a `0` outcome).
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
logits=None,
probs=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="Bernoulli"):
"""Construct Bernoulli distributions.
Args:
logits: An N-D `Tensor` representing the log-odds of a `1` event. Each
entry in the `Tensor` parametrizes an independent Bernoulli distribution
where the probability of an event is sigmoid(logits). Only one of
`logits` or `probs` should be passed in.
probs: An N-D `Tensor` representing the probability of a `1`
event. Each entry in the `Tensor` parameterizes an independent
Bernoulli distribution. Only one of `logits` or `probs` should be passed
in.
dtype: The type of the event samples. Default: `int32`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If p and logits are passed, or if neither are passed.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits,
probs=probs,
validate_args=validate_args,
name=name)
super(Bernoulli, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._logits, self._probs],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return {"logits": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def logits(self):
"""Log-odds of a `1` outcome (vs `0`)."""
return self._logits
@property
def probs(self):
"""Probability of a `1` outcome (vs `0`)."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self._logits)
def _batch_shape(self):
return self._logits.get_shape()
def _event_shape_tensor(self):
return array_ops.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
new_shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
uniform = random_ops.random_uniform(
new_shape, seed=seed, dtype=self.probs.dtype)
sample = math_ops.less(uniform, self.probs)
return math_ops.cast(sample, self.dtype)
def _log_prob(self, event):
if self.validate_args:
event = distribution_util.embed_check_integer_casting_closed(
event, target_dtype=dtypes.bool)
# TODO(jaana): The current sigmoid_cross_entropy_with_logits has
# inconsistent behavior for logits = inf/-inf.
event = math_ops.cast(event, self.logits.dtype)
logits = self.logits
# sigmoid_cross_entropy_with_logits doesn't broadcast shape,
# so we do this here.
def _broadcast(logits, event):
return (array_ops.ones_like(event) * logits,
array_ops.ones_like(logits) * event)
if not (event.get_shape().is_fully_defined() and
logits.get_shape().is_fully_defined() and
event.get_shape() == logits.get_shape()):
logits, event = _broadcast(logits, event)
return -nn.sigmoid_cross_entropy_with_logits(labels=event, logits=logits)
def _entropy(self):
return (-self.logits * (math_ops.sigmoid(self.logits) - 1) +
nn.softplus(-self.logits))
def _mean(self):
return array_ops.identity(self.probs)
def _variance(self):
return self._mean() * (1. - self.probs)
def _mode(self):
"""Returns `1` if `prob > 0.5` and `0` otherwise."""
return math_ops.cast(self.probs > 0.5, self.dtype)
@kullback_leibler.RegisterKL(Bernoulli, Bernoulli)
def _kl_bernoulli_bernoulli(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.
Args:
a: instance of a Bernoulli distribution object.
b: instance of a Bernoulli distribution object.
name: (optional) Name to use for created operations.
default is "kl_bernoulli_bernoulli".
Returns:
Batchwise KL(a || b)
"""
with ops.name_scope(name, "kl_bernoulli_bernoulli",
values=[a.logits, b.logits]):
delta_probs0 = nn.softplus(-b.logits) - nn.softplus(-a.logits)
delta_probs1 = nn.softplus(b.logits) - nn.softplus(a.logits)
return (math_ops.sigmoid(a.logits) * delta_probs0
+ math_ops.sigmoid(-a.logits) * delta_probs1)
|
tensorflow-master
|
tensorflow/python/ops/distributions/bernoulli.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bijector unit-test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import uniform as uniform_lib
def assert_finite(array):
if not np.isfinite(array).all():
raise AssertionError("array was not all finite. %s" % array[:15])
def assert_strictly_increasing(array):
np.testing.assert_array_less(0., np.diff(array))
def assert_strictly_decreasing(array):
np.testing.assert_array_less(np.diff(array), 0.)
def assert_strictly_monotonic(array):
if array[0] < array[-1]:
assert_strictly_increasing(array)
else:
assert_strictly_decreasing(array)
def assert_scalar_congruency(bijector,
lower_x,
upper_x,
n=int(10e3),
rtol=0.01,
sess=None):
"""Assert `bijector`'s forward/inverse/inverse_log_det_jacobian are congruent.
We draw samples `X ~ U(lower_x, upper_x)`, then feed these through the
`bijector` in order to check that:
1. the forward is strictly monotonic.
2. the forward/inverse methods are inverses of each other.
3. the jacobian is the correct change of measure.
This can only be used for a Bijector mapping open subsets of the real line
to themselves. This is due to the fact that this test compares the `prob`
before/after transformation with the Lebesgue measure on the line.
Args:
bijector: Instance of Bijector
lower_x: Python scalar.
upper_x: Python scalar. Must have `lower_x < upper_x`, and both must be in
the domain of the `bijector`. The `bijector` should probably not produce
huge variation in values in the interval `(lower_x, upper_x)`, or else
the variance based check of the Jacobian will require small `rtol` or
huge `n`.
n: Number of samples to draw for the checks.
rtol: Positive number. Used for the Jacobian check.
sess: `tf.compat.v1.Session`. Defaults to the default session.
Raises:
AssertionError: If tests fail.
"""
# Checks and defaults.
if sess is None:
sess = ops.get_default_session()
# Should be monotonic over this interval
ten_x_pts = np.linspace(lower_x, upper_x, num=10).astype(np.float32)
if bijector.dtype is not None:
ten_x_pts = ten_x_pts.astype(bijector.dtype.as_numpy_dtype)
forward_on_10_pts = bijector.forward(ten_x_pts)
# Set the lower/upper limits in the range of the bijector.
lower_y, upper_y = sess.run(
[bijector.forward(lower_x), bijector.forward(upper_x)])
if upper_y < lower_y: # If bijector.forward is a decreasing function.
lower_y, upper_y = upper_y, lower_y
# Uniform samples from the domain, range.
uniform_x_samps = uniform_lib.Uniform(
low=lower_x, high=upper_x).sample(n, seed=0)
uniform_y_samps = uniform_lib.Uniform(
low=lower_y, high=upper_y).sample(n, seed=1)
# These compositions should be the identity.
inverse_forward_x = bijector.inverse(bijector.forward(uniform_x_samps))
forward_inverse_y = bijector.forward(bijector.inverse(uniform_y_samps))
# For a < b, and transformation y = y(x),
# (b - a) = \int_a^b dx = \int_{y(a)}^{y(b)} |dx/dy| dy
# "change_measure_dy_dx" below is a Monte Carlo approximation to the right
# hand side, which should then be close to the left, which is (b - a).
# We assume event_ndims=0 because we assume scalar -> scalar. The log_det
# methods will handle whether they expect event_ndims > 0.
dy_dx = math_ops.exp(bijector.inverse_log_det_jacobian(
uniform_y_samps, event_ndims=0))
# E[|dx/dy|] under Uniform[lower_y, upper_y]
# = \int_{y(a)}^{y(b)} |dx/dy| dP(u), where dP(u) is the uniform measure
expectation_of_dy_dx_under_uniform = math_ops.reduce_mean(dy_dx)
# dy = dP(u) * (upper_y - lower_y)
change_measure_dy_dx = (
(upper_y - lower_y) * expectation_of_dy_dx_under_uniform)
# We'll also check that dy_dx = 1 / dx_dy.
dx_dy = math_ops.exp(
bijector.forward_log_det_jacobian(
bijector.inverse(uniform_y_samps), event_ndims=0))
[
forward_on_10_pts_v,
dy_dx_v,
dx_dy_v,
change_measure_dy_dx_v,
uniform_x_samps_v,
uniform_y_samps_v,
inverse_forward_x_v,
forward_inverse_y_v,
] = sess.run([
forward_on_10_pts,
dy_dx,
dx_dy,
change_measure_dy_dx,
uniform_x_samps,
uniform_y_samps,
inverse_forward_x,
forward_inverse_y,
])
assert_strictly_monotonic(forward_on_10_pts_v)
# Composition of forward/inverse should be the identity.
np.testing.assert_allclose(
inverse_forward_x_v, uniform_x_samps_v, atol=1e-5, rtol=1e-3)
np.testing.assert_allclose(
forward_inverse_y_v, uniform_y_samps_v, atol=1e-5, rtol=1e-3)
# Change of measure should be correct.
np.testing.assert_allclose(
upper_x - lower_x, change_measure_dy_dx_v, atol=0, rtol=rtol)
# Inverse Jacobian should be equivalent to the reciprocal of the forward
# Jacobian.
np.testing.assert_allclose(
dy_dx_v, np.divide(1., dx_dy_v), atol=1e-5, rtol=1e-3)
def assert_bijective_and_finite(
bijector, x, y, event_ndims, atol=0, rtol=1e-5, sess=None):
"""Assert that forward/inverse (along with jacobians) are inverses and finite.
It is recommended to use x and y values that are very very close to the edge
of the Bijector's domain.
Args:
bijector: A Bijector instance.
x: np.array of values in the domain of bijector.forward.
y: np.array of values in the domain of bijector.inverse.
event_ndims: Integer describing the number of event dimensions this bijector
operates on.
atol: Absolute tolerance.
rtol: Relative tolerance.
sess: TensorFlow session. Defaults to the default session.
Raises:
AssertionError: If tests fail.
"""
sess = sess or ops.get_default_session()
# These are the incoming points, but people often create a crazy range of
# values for which these end up being bad, especially in 16bit.
assert_finite(x)
assert_finite(y)
f_x = bijector.forward(x)
g_y = bijector.inverse(y)
[
x_from_x,
y_from_y,
ildj_f_x,
fldj_x,
ildj_y,
fldj_g_y,
f_x_v,
g_y_v,
] = sess.run([
bijector.inverse(f_x),
bijector.forward(g_y),
bijector.inverse_log_det_jacobian(f_x, event_ndims=event_ndims),
bijector.forward_log_det_jacobian(x, event_ndims=event_ndims),
bijector.inverse_log_det_jacobian(y, event_ndims=event_ndims),
bijector.forward_log_det_jacobian(g_y, event_ndims=event_ndims),
f_x,
g_y,
])
assert_finite(x_from_x)
assert_finite(y_from_y)
assert_finite(ildj_f_x)
assert_finite(fldj_x)
assert_finite(ildj_y)
assert_finite(fldj_g_y)
assert_finite(f_x_v)
assert_finite(g_y_v)
np.testing.assert_allclose(x_from_x, x, atol=atol, rtol=rtol)
np.testing.assert_allclose(y_from_y, y, atol=atol, rtol=rtol)
np.testing.assert_allclose(-ildj_f_x, fldj_x, atol=atol, rtol=rtol)
np.testing.assert_allclose(-ildj_y, fldj_g_y, atol=atol, rtol=rtol)
|
tensorflow-master
|
tensorflow/python/ops/distributions/bijector_test_util.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registration and usage mechanisms for KL-divergences."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
_DIVERGENCES = {}
__all__ = [
"RegisterKL",
"kl_divergence",
]
def _registered_kl(type_a, type_b):
"""Get the KL function registered for classes a and b."""
hierarchy_a = tf_inspect.getmro(type_a)
hierarchy_b = tf_inspect.getmro(type_b)
dist_to_children = None
kl_fn = None
for mro_to_a, parent_a in enumerate(hierarchy_a):
for mro_to_b, parent_b in enumerate(hierarchy_b):
candidate_dist = mro_to_a + mro_to_b
candidate_kl_fn = _DIVERGENCES.get((parent_a, parent_b), None)
if not kl_fn or (candidate_kl_fn and candidate_dist < dist_to_children):
dist_to_children = candidate_dist
kl_fn = candidate_kl_fn
return kl_fn
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
@tf_export(v1=["distributions.kl_divergence"])
def kl_divergence(distribution_a, distribution_b,
allow_nan_stats=True, name=None):
"""Get the KL-divergence KL(distribution_a || distribution_b).
If there is no KL method registered specifically for `type(distribution_a)`
and `type(distribution_b)`, then the class hierarchies of these types are
searched.
If one KL method is registered between any pairs of classes in these two
parent hierarchies, it is used.
If more than one such registered method exists, the method whose registered
classes have the shortest sum MRO paths to the input types is used.
If more than one such shortest path exists, the first method
identified in the search is used (favoring a shorter MRO distance to
`type(distribution_a)`).
Args:
distribution_a: The first distribution.
distribution_b: The second distribution.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Returns:
A Tensor with the batchwise KL-divergence between `distribution_a`
and `distribution_b`.
Raises:
NotImplementedError: If no KL method is defined for distribution types
of `distribution_a` and `distribution_b`.
"""
kl_fn = _registered_kl(type(distribution_a), type(distribution_b))
if kl_fn is None:
raise NotImplementedError(
"No KL(distribution_a || distribution_b) registered for distribution_a "
"type %s and distribution_b type %s"
% (type(distribution_a).__name__, type(distribution_b).__name__))
with ops.name_scope("KullbackLeibler"):
kl_t = kl_fn(distribution_a, distribution_b, name=name)
if allow_nan_stats:
return kl_t
# Check KL for NaNs
kl_t = array_ops.identity(kl_t, name="kl")
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_not(
math_ops.reduce_any(math_ops.is_nan(kl_t))),
["KL calculation between %s and %s returned NaN values "
"(and was called with allow_nan_stats=False). Values:"
% (distribution_a.name, distribution_b.name), kl_t])]):
return array_ops.identity(kl_t, name="checked_kl")
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def cross_entropy(ref, other,
allow_nan_stats=True, name=None):
"""Computes the (Shannon) cross entropy.
Denote two distributions by `P` (`ref`) and `Q` (`other`). Assuming `P, Q`
are absolutely continuous with respect to one another and permit densities
`p(x) dr(x)` and `q(x) dr(x)`, (Shanon) cross entropy is defined as:
```none
H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
```
where `F` denotes the support of the random variable `X ~ P`.
Args:
ref: `tfd.Distribution` instance.
other: `tfd.Distribution` instance.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` prepended to names of ops created by this function.
Returns:
cross_entropy: `ref.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of (Shanon) cross entropy.
"""
with ops.name_scope(name, "cross_entropy"):
return ref.entropy() + kl_divergence(
ref, other, allow_nan_stats=allow_nan_stats)
@tf_export(v1=["distributions.RegisterKL"])
class RegisterKL(object):
"""Decorator to register a KL divergence implementation function.
Usage:
@distributions.RegisterKL(distributions.Normal, distributions.Normal)
def _kl_normal_mvn(norm_a, norm_b):
# Return KL(norm_a || norm_b)
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self, dist_cls_a, dist_cls_b):
"""Initialize the KL registrar.
Args:
dist_cls_a: the class of the first argument of the KL divergence.
dist_cls_b: the class of the second argument of the KL divergence.
"""
self._key = (dist_cls_a, dist_cls_b)
def __call__(self, kl_fn):
"""Perform the KL registration.
Args:
kl_fn: The function to use for the KL divergence.
Returns:
kl_fn
Raises:
TypeError: if kl_fn is not a callable.
ValueError: if a KL divergence function has already been registered for
the given argument classes.
"""
if not callable(kl_fn):
raise TypeError("kl_fn must be callable, received: %s" % kl_fn)
if self._key in _DIVERGENCES:
raise ValueError("KL(%s || %s) has already been registered to: %s"
% (self._key[0].__name__, self._key[1].__name__,
_DIVERGENCES[self._key]))
_DIVERGENCES[self._key] = kl_fn
return kl_fn
|
tensorflow-master
|
tensorflow/python/ops/distributions/kullback_leibler.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Gamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Gamma",
"GammaWithSoftplusConcentrationRate",
]
@tf_export(v1=["distributions.Gamma"])
class Gamma(distribution.Distribution):
"""Gamma distribution.
The Gamma distribution is defined over positive real numbers using
parameters `concentration` (aka "alpha") and `rate` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta, x > 0) = x**(alpha - 1) exp(-x beta) / Z
Z = Gamma(alpha) beta**(-alpha)
```
where:
* `concentration = alpha`, `alpha > 0`,
* `rate = beta`, `beta > 0`,
* `Z` is the normalizing constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The cumulative density function (cdf) is,
```none
cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta x) / Gamma(alpha)
```
where `GammaInc` is the [lower incomplete Gamma function](
https://en.wikipedia.org/wiki/Incomplete_gamma_function).
The parameters can be intuited via their relationship to mean and stddev,
```none
concentration = alpha = (mean / stddev)**2
rate = beta = mean / stddev**2 = concentration / mean
```
Distribution parameters are automatically broadcast in all functions; see
examples for details.
Warning: The samples of this distribution are always non-negative. However,
the samples that are smaller than `np.finfo(dtype).tiny` are rounded
to this value, so it appears more often than it should.
This should only be noticeable when the `concentration` is very small, or the
`rate` is very large. See note in `tf.random.gamma` docstring.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
dist = tfd.Gamma(concentration=3.0, rate=2.0)
dist2 = tfd.Gamma(concentration=[3.0, 4.0], rate=[2.0, 3.0])
```
Compute the gradients of samples w.r.t. the parameters:
```python
concentration = tf.constant(3.0)
rate = tf.constant(2.0)
dist = tfd.Gamma(concentration, rate)
samples = dist.sample(5) # Shape [5]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, [concentration, rate])
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="Gamma"):
"""Construct Gamma with `concentration` and `rate` parameters.
The parameters `concentration` and `rate` must be shaped in a way that
supports broadcasting (e.g. `concentration + rate` is a valid operation).
Args:
concentration: Floating point tensor, the concentration params of the
distribution(s). Must contain only positive values.
rate: Floating point tensor, the inverse scale params of the
distribution(s). Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `concentration` and `rate` are different dtypes.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[concentration, rate]) as name:
with ops.control_dependencies([
check_ops.assert_positive(concentration),
check_ops.assert_positive(rate),
] if validate_args else []):
self._concentration = array_ops.identity(
concentration, name="concentration")
self._rate = array_ops.identity(rate, name="rate")
check_ops.assert_same_float_dtype(
[self._concentration, self._rate])
super(Gamma, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._rate],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("concentration", "rate"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
@property
def rate(self):
"""Rate parameter."""
return self._rate
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.concentration),
array_ops.shape(self.rate))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.concentration.get_shape(),
self.rate.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(
"""Note: See `tf.random.gamma` docstring for sampling details and
caveats.""")
def _sample_n(self, n, seed=None):
return random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
beta=self.rate,
dtype=self.dtype,
seed=seed)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _cdf(self, x):
x = self._maybe_assert_valid_sample(x)
# Note that igamma returns the regularized incomplete gamma function,
# which is what we want for the CDF.
return math_ops.igamma(self.concentration, self.rate * x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return math_ops.xlogy(self.concentration - 1., x) - self.rate * x
def _log_normalization(self):
return (math_ops.lgamma(self.concentration)
- self.concentration * math_ops.log(self.rate))
def _entropy(self):
return (self.concentration
- math_ops.log(self.rate)
+ math_ops.lgamma(self.concentration)
+ ((1. - self.concentration) *
math_ops.digamma(self.concentration)))
def _mean(self):
return self.concentration / self.rate
def _variance(self):
return self.concentration / math_ops.square(self.rate)
def _stddev(self):
return math_ops.sqrt(self.concentration) / self.rate
@distribution_util.AppendDocstring(
"""The mode of a gamma distribution is `(shape - 1) / rate` when
`shape > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is `False`,
an exception will be raised rather than returning `NaN`.""")
def _mode(self):
mode = (self.concentration - 1.) / self.rate
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 1., mode, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype),
self.concentration,
message="mode not defined when any concentration <= 1"),
], mode)
def _maybe_assert_valid_sample(self, x):
check_ops.assert_same_float_dtype(tensors=[x], dtype=self.dtype)
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x),
], x)
class GammaWithSoftplusConcentrationRate(Gamma):
"""`Gamma` with softplus of `concentration` and `rate`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Gamma(tf.nn.softplus(concentration), "
"tf.nn.softplus(rate))` instead.",
warn_once=True)
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="GammaWithSoftplusConcentrationRate"):
parameters = dict(locals())
with ops.name_scope(name, values=[concentration, rate]) as name:
super(GammaWithSoftplusConcentrationRate, self).__init__(
concentration=nn.softplus(concentration,
name="softplus_concentration"),
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Gamma, Gamma)
def _kl_gamma_gamma(g0, g1, name=None):
"""Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.
Args:
g0: instance of a Gamma distribution object.
g1: instance of a Gamma distribution object.
name: (optional) Name to use for created operations.
Default is "kl_gamma_gamma".
Returns:
kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
"""
with ops.name_scope(name, "kl_gamma_gamma", values=[
g0.concentration, g0.rate, g1.concentration, g1.rate]):
# Result from:
# http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
# For derivation see:
# http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions pylint: disable=line-too-long
return (((g0.concentration - g1.concentration)
* math_ops.digamma(g0.concentration))
+ math_ops.lgamma(g1.concentration)
- math_ops.lgamma(g0.concentration)
+ g1.concentration * math_ops.log(g0.rate)
- g1.concentration * math_ops.log(g1.rate)
+ g0.concentration * (g1.rate / g0.rate - 1.))
|
tensorflow-master
|
tensorflow/python/ops/distributions/gamma.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging tensorflow::tfprof::OpLogProto.
OpLogProto is used to add extra model information for offline analysis.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import six
from tensorflow.core.profiler import tfprof_log_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.profiler.internal import flops_registry # pylint: disable=unused-import
from tensorflow.python.util.tf_export import tf_export
TRAINABLE_VARIABLES = '_trainable_variables'
REGISTERED_FLOP_STATS = 'flops'
def _fill_missing_graph_shape(graph, run_meta):
"""Fill Tensor shapes in 'graph' with run time shape from 'run_meta'."""
for dev_stat in run_meta.step_stats.dev_stats:
for node_stat in dev_stat.node_stats:
if not node_stat.output:
continue
try:
op = graph.get_operation_by_name(node_stat.node_name)
except KeyError as e:
# Graph doesn't contains the node_stat, usually RecvTensor.
continue
if len(node_stat.output) != len(op.outputs):
# For example, conditional op has only 1 output at run time.
continue
for (i, node_stat_out) in enumerate(node_stat.output):
if op.outputs[i].get_shape().is_fully_defined():
continue
node_stat_dims = node_stat_out.tensor_description.shape.dim
node_stat_shape = tensor_shape.TensorShape(
[d.size for d in node_stat_dims])
try:
op.outputs[i].set_shape(op.outputs[i].get_shape().merge_with(
node_stat_shape))
except ValueError as e:
sys.stderr.write('Node %s incompatible shapes: %s.\n' %
(node_stat.node_name, e))
return graph
def _str_id(s, str_to_id):
"""Maps string to id."""
num = str_to_id.get(s, None)
if num is None:
num = len(str_to_id)
str_to_id[s] = num
return num
def _get_logged_ops(graph, run_meta=None, add_trace=True,
add_trainable_var=True):
"""Extract trainable model parameters and FLOPs for ops from a Graph.
Args:
graph: tf.Graph.
run_meta: RunMetadata proto used to complete shape information.
add_trace: Whether to add op trace information.
add_trainable_var: Whether to assign tf.compat.v1.trainable_variables() op
type '_trainable_variables'.
Returns:
logged_ops: dict mapping from op_name to OpLogEntry.
string_to_id: dict mapping from string to id.
"""
if run_meta:
graph = _fill_missing_graph_shape(graph, run_meta)
op_missing_shape = 0
logged_ops = {}
string_to_id = {}
string_to_id['none'] = len(string_to_id)
# TODO(xpan): Work with Profiler more efficiently.
for op in graph.get_operations():
try:
stats = ops.get_stats_for_node_def(
graph, op.node_def, REGISTERED_FLOP_STATS)
except ValueError:
# Catch Exception When shape is incomplete. Skip it.
op_missing_shape += 1
stats = None
entry = tfprof_log_pb2.OpLogEntry()
entry.name = op.name
add_entry = False
if stats and stats.value:
entry.float_ops = int(stats.value)
add_entry = True
if add_trace:
for tb in op.traceback_with_start_lines:
trace = entry.code_def.traces.add()
trace.file_id = _str_id(tb[0], string_to_id) if tb[0] else 0
trace.lineno = tb[1] if tb[1] else -1
trace.function_id = _str_id(tb[2], string_to_id) if tb[2] else 0
trace.line_id = _str_id(tb[3], string_to_id) if tb[3] else 0
trace.func_start_line = tb[4] if tb[4] else -1
add_entry = True
if add_entry:
logged_ops[entry.name] = entry
if add_trainable_var:
for v in graph.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES):
if v.op.name not in logged_ops:
entry = tfprof_log_pb2.OpLogEntry()
entry.name = v.op.name
entry.types.append(TRAINABLE_VARIABLES)
logged_ops[entry.name] = entry
else:
logged_ops[v.op.name].types.append(TRAINABLE_VARIABLES)
if op_missing_shape > 0 and not run_meta:
sys.stderr.write('%d ops no flops stats due to incomplete shapes.\n' %
op_missing_shape)
return logged_ops, string_to_id
def merge_default_with_oplog(graph, op_log=None, run_meta=None,
add_trace=True, add_trainable_var=True):
"""Merge the tfprof default extra info with caller's op_log.
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
op_log: OpLogProto proto.
run_meta: RunMetadata proto used to complete shape information.
add_trace: Whether to add op trace information.
add_trainable_var: Whether to assign tf.compat.v1.trainable_variables() op
type '_trainable_variables'.
Returns:
tmp_op_log: Merged OpLogProto proto.
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
tmp_op_log = tfprof_log_pb2.OpLogProto()
if not graph:
return tmp_op_log
logged_ops, string_to_id = _get_logged_ops(
graph, run_meta, add_trace=add_trace, add_trainable_var=add_trainable_var)
if not op_log:
tmp_op_log.log_entries.extend(logged_ops.values())
else:
all_ops = {}
for entry in op_log.log_entries:
all_ops[entry.name] = entry
for op_name, entry in six.iteritems(logged_ops):
if op_name in all_ops:
all_ops[op_name].types.extend(entry.types)
if entry.float_ops > 0 and all_ops[op_name].float_ops == 0:
all_ops[op_name].float_ops = entry.float_ops
if entry.code_def.traces and not all_ops[op_name].code_def.traces:
all_ops[op_name].code_def.MergeFrom(entry.code_def)
else:
all_ops[op_name] = entry
tmp_op_log.log_entries.extend(all_ops.values())
for s, i in six.iteritems(string_to_id):
tmp_op_log.id_to_string[i] = s
return tmp_op_log
@tf_export(v1=['profiler.write_op_log'])
def write_op_log(graph, log_dir, op_log=None, run_meta=None, add_trace=True):
"""Log provided 'op_log', and add additional model information below.
The API also assigns ops in tf.compat.v1.trainable_variables() an op type
called '_trainable_variables'.
The API also logs 'flops' statistics for ops with op.RegisterStatistics()
defined. flops calculation depends on Tensor shapes defined in 'graph',
which might not be complete. 'run_meta', if provided, completes the shape
information with best effort.
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
log_dir: directory to write the log file.
op_log: (Optional) OpLogProto proto to be written. If not provided, an new
one is created.
run_meta: (Optional) RunMetadata proto that helps flops computation using
run time shape information.
add_trace: Whether to add python code trace information.
Used to support "code" view.
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
op_log = merge_default_with_oplog(graph, op_log, run_meta, add_trace)
with gfile.Open(os.path.join(log_dir, 'tfprof_log'), 'w') as log:
log.write(op_log.SerializeToString())
|
tensorflow-master
|
tensorflow/python/profiler/tfprof_logger.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Context that captures profile and performs profiling/dumping.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import random
import sys
import threading
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as print_mdl
from tensorflow.python.client import session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.util import compat
WARMUP_STEPS = 10
MAX_TRACED_STEPS = 100
def _profiled_init(self, target='', graph=None, config=None):
"""Overwrites the session.__init__."""
self._profiler_init_internal(target, graph, config) # pylint: disable=protected-access
def _profiled_run(self,
fetches,
feed_dict=None,
options=None,
run_metadata=None):
"""Overwrites the session.run()."""
# pylint: disable=protected-access
# Count the session steps.
with self.profile_context._new_step() as state:
step, locked = state
# Fast path if no need for profiling.
if locked and not self.profile_context._is_fast_path(step):
# Maybe trace this step.
if self.profile_context._should_trace(step, self.graph, fetches):
if self.profile_context._debug:
sys.stderr.write('debug: tracing step: %d\n' % step)
# Enable tracing, perform auto profiling or auto dump.
if not run_metadata:
run_metadata = config_pb2.RunMetadata()
if not options:
options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
old_trace_level = options.trace_level
else:
old_trace_level = options.trace_level
options.trace_level = config_pb2.RunOptions.FULL_TRACE
ret = self._profiler_run_internal(
fetches, feed_dict, options, run_metadata)
if self.profile_context._debug:
self.profile_context._dump_file(run_metadata, 'run_meta_%d' % step)
self.profile_context.profiler._graph = self.graph
self.profile_context.profiler.add_step(step, run_metadata)
options.trace_level = old_trace_level
else:
ret = self._profiler_run_internal(fetches, feed_dict, options)
# Maybe dump profile.
self.profile_context._maybe_dump(step)
# Maybe profile:
to_profiles = self.profile_context._profile_candidates()
for to_prof in to_profiles:
cmd, opts, _ = to_prof
saved_views = self.profile_context._views.setdefault(cmd, {})
if self.profile_context._debug:
sys.stderr.write('debug: profiling %s step: %d\n' % (cmd, step))
if cmd == 'graph':
saved_views[step] = self.profile_context.profiler.profile_graph(opts)
elif cmd == 'scope':
saved_views[step] = self.profile_context.profiler.profile_name_scope(
opts)
elif cmd == 'op':
saved_views[step] = self.profile_context.profiler.profile_operations(
opts)
elif cmd == 'code':
saved_views[step] = self.profile_context.profiler.profile_python(opts)
else:
raise ValueError('Unknown cmd: %s\n' % cmd)
return ret
# Fast no lock path.
return self._profiler_run_internal(
fetches, feed_dict, options, run_metadata)
# pylint: enable=protected-access
class ProfileContext(object):
"""A Context that captures RunMetadata and performs profiling.
```python
# Trace steps 100~200, profile at [150, 200] and dump profile at 200.
with tf.contrib.tfprof.ProfileContext('/tmp/train_dir',
trace_steps=range(100, 200, 3),
dump_steps=[200]) as pctx:
opts = tf.profiler.ProfileOptionBuilder.time_and_memory()
pctx.add_auto_profiling('op', opts, [150, 200])
train_loop().
# Tracing only.
with tf.contrib.tfprof.ProfileContext('/tmp/train_dir') as pctx:
# Run train/eval loop for at least few hundred steps. Profiles will be
# dumped to train_dir. Use web UI or command line to do profiling.
train_loop().
# When session object is available, do explicit trace, profile and dump.
with tf.contrib.tfprof.ProfileContext('/tmp/train_dir',
trace_steps=[],
dump_steps=[]) as pctx:
opts = tf.profiler.ProfileOptionBuilder.time_and_memory()
pctx.trace_next_step()
_ = session.run(train_op)
pctx.profiler.profile_operations(options=opts)
```
Args:
profile_dir: Directory to store profiles.
trace_steps: A list of session run steps to trace. If None, use
pre-defined steps.
dump_steps: A list of steps to dump the profile to `profile_dir`. If None,
use pre-defined steps.
enabled: If false, everything is disabled with minimal overhead. It allows
user to only enable profiling when needed.
debug: If true, also dumps the raw trace RunMetadata text file to
profile_dir. And print debugging message. Useful for bug report.
"""
def __init__(self,
profile_dir,
trace_steps=None,
dump_steps=None,
enabled=True,
debug=False):
self._enabled = enabled
if not self._enabled:
return
self._debug = debug
if not profile_dir:
raise ValueError('Must have a directory for profile.\n')
self._profiler_dir = profile_dir
if trace_steps is None:
self._trace_steps = set()
self._auto_tracing = True
else:
if len(trace_steps) > MAX_TRACED_STEPS:
raise ValueError('Only support tracing up to 100 steps.\n')
self._trace_steps = set(trace_steps[:])
self._auto_tracing = False
if dump_steps is None:
self._dump_steps = set([MAX_TRACED_STEPS])
else:
self._dump_steps = set(dump_steps[:])
self._rng = random.Random(111)
self._fetched = set()
self._slow_path_steps = self._dump_steps | self._trace_steps
self._trace_next_step = False
self._dump_next_step = False
self._step = 0
self._traced_steps = 0
self._auto_profiles = []
self._profiler = None
self._views = {}
self._lock = threading.Lock()
def get_profiles(self, cmd):
"""Returns profiling results for each step at which `cmd` was run.
Args:
cmd: string, profiling command used in an `add_auto_profiling` call.
Returns:
dict[int: (MultiGraphNodeProto | GraphNodeProto)]. Keys are steps at which
the profiling command was run. Values are the outputs of profiling.
For "code" and "op" commands this will be a `MultiGraphNodeProto`, for
"scope" and "graph" commands this will be a `GraphNodeProto.
Raises:
ValueError: if `cmd` was never run (either because no session.run call was
made or because there was no `add_auto_profiling` call with the specified
`cmd`.
"""
if cmd not in self._views:
raise ValueError('No autoprofiler for command: {}, was run'.format(cmd))
return self._views[cmd]
def add_auto_profiling(self, cmd, options, profile_steps):
"""Traces and profiles at some session run steps.
Args:
cmd: The profiling commands. (i.e. scope, op, python, graph)
options: The profiling options.
profile_steps: A list/set of integers. The profiling command and options
will be run automatically at these integer steps. Each step is
a session.run.
"""
if not self._enabled:
return
self._auto_profiles.append((cmd, options, profile_steps[:]))
self._slow_path_steps |= set(profile_steps)
self._trace_steps |= set(profile_steps)
@property
def profiler(self):
"""Returns the current profiler object."""
if not self._enabled:
return None
if not self._profiler:
self._profiler = model_analyzer.Profiler(ops.get_default_graph())
return self._profiler
def trace_next_step(self):
"""Enables tracing and adds traces to profiler at next step."""
if not self._enabled:
return
self._trace_next_step = True
self._slow_path_steps.add(self._step)
def dump_next_step(self):
"""Enable tracing and dump profiles at next step."""
if not self._enabled:
return
self._dump_next_step = True
self._slow_path_steps.add(self._step)
def _is_fast_path(self, step):
if step in self._slow_path_steps:
return False
# When user doesn't set the tracing steps explicitly, auto decide it.
if (self._auto_tracing and step > WARMUP_STEPS and
self._traced_steps <= MAX_TRACED_STEPS):
return False
return True
def _should_trace(self, step, graph, fetches):
"""Whether should do tracing at current step."""
if self._traced_steps > MAX_TRACED_STEPS:
return False
# Check user-set tracing steps.
if step in self._trace_steps or self._trace_next_step:
self._traced_steps += 1
return True
# If no user-set tracing steps set and passes warm up steps, auto trace.
if self._auto_tracing and step > WARMUP_STEPS:
# If the fetches have not been seen before, trace it.
with graph.as_default():
fetch_names = [f.name for f in
session._FetchMapper.for_fetch(fetches).unique_fetches()] # pylint: disable=protected-access
fetch_name = '-'.join(sorted(fetch_names))
if self._debug:
sys.stderr.write('debug: trace fetches: %s\n' % fetch_name)
if fetch_name not in self._fetched:
self._fetched.add(fetch_name)
self._traced_steps += 1
return True
# If the trace coverage is low, does some random tracing.
if (self.profiler._coverage < 0.5 and step < MAX_TRACED_STEPS and # pylint: disable=protected-access
self._rng.randint(0, 10) < 2):
self._traced_steps += 1
return True
return False
def _maybe_dump(self, step):
"""Maybe dump the profile file."""
if not (step in self._dump_steps or self._dump_next_step):
return
if self._debug:
sys.stderr.write('debug: dumping file at step: %d\n' % step)
if not gfile.Exists(self._profiler_dir):
gfile.MakeDirs(self._profiler_dir)
filename = os.path.join(compat.as_bytes(self._profiler_dir),
compat.as_bytes('profile_%d' % step))
self.profiler._write_profile(filename) # pylint: disable=protected-access
def _dump_file(self, pb, basename):
if not gfile.Exists(self._profiler_dir):
gfile.MakeDirs(self._profiler_dir)
with gfile.Open(os.path.join(self._profiler_dir, basename), 'w') as f:
f.write('%s' % pb)
@contextlib.contextmanager
def _new_step(self):
acquired = self._lock.acquire(False)
yield (self._step, acquired)
self._step += 1
self._trace_next_step = False
self._dump_next_step = False
if acquired:
self._lock.release()
def _profile_candidates(self):
to_profile = []
for auto_prof in self._auto_profiles:
_, _, prof_steps = auto_prof
if self._step in prof_steps:
to_profile.append(auto_prof)
return to_profile
def __enter__(self):
if self._enabled:
self.old_run = getattr(session.BaseSession, 'run', None)
self.old_init = getattr(session.BaseSession, '__init__', None)
if not self.old_run:
raise errors.InternalError(None, None, 'BaseSession misses run method.')
elif not self.old_init:
raise errors.InternalError(None, None,
'BaseSession misses __init__ method.')
elif getattr(session.BaseSession, '_profiler_run_internal', None):
raise errors.InternalError(None, None,
'Already in context or context not cleaned.')
elif getattr(session.BaseSession, '_profiler_init_internal', None):
raise errors.InternalError(None, None,
'Already in context or context not cleaned.')
else:
setattr(session.BaseSession, 'run', _profiled_run)
setattr(session.BaseSession, '__init__', _profiled_init)
setattr(session.BaseSession, '_profiler_run_internal', self.old_run)
setattr(session.BaseSession, '_profiler_init_internal', self.old_init)
setattr(session.BaseSession, 'profile_context', self)
return self
else:
return self
def __exit__(self, exec_type, exec_value, exec_tb):
if not self._enabled:
return
print_mdl.DeleteProfiler()
setattr(session.BaseSession, 'run', self.old_run)
setattr(session.BaseSession, '__init__', self.old_init)
setattr(session.BaseSession, '_profiler_run_internal', None)
setattr(session.BaseSession, '_profiler_init_internal', None)
setattr(session.BaseSession, 'profile_context', None)
|
tensorflow-master
|
tensorflow/python/profiler/profile_context.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pprof_profiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
from proto import profile_pb2
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.profiler import pprof_profiler
class PprofProfilerTest(test.TestCase):
def testDataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
graph.get_operations.return_value = []
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEquals(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEquals(0, len(profile_files))
def testRunMetadataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [('a/b/file1', 10, 'some_var')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEquals(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEquals(0, len(profile_files))
def testValidProfile(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
node1 = step_stats_pb2.NodeExecStats(
node_name='Add/123',
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = 'deviceA'
device1.node_stats.extend([node1])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [
('a/b/file1', 10, 'apply_op', 'abc'), ('a/c/file2', 12, 'my_op', 'def')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
expected_proto = """sample_type {
type: 5
unit: 5
}
sample_type {
type: 6
unit: 7
}
sample_type {
type: 8
unit: 7
}
sample {
value: 1
value: 4
value: 2
label {
key: 1
str: 2
}
label {
key: 3
str: 4
}
}
string_table: ""
string_table: "node_name"
string_table: "Add/123"
string_table: "op_type"
string_table: "add"
string_table: "count"
string_table: "all_time"
string_table: "nanoseconds"
string_table: "op_time"
string_table: "Device 1 of 1: deviceA"
comment: 9
"""
# Test with protos
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEquals(1, len(profiles))
self.assertTrue('deviceA' in profiles)
self.assertEquals(expected_proto, str(profiles['deviceA']))
# Test with files
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEquals(1, len(profile_files))
with gzip.open(profile_files[0]) as profile_file:
profile_contents = profile_file.read()
profile = profile_pb2.Profile()
profile.ParseFromString(profile_contents)
self.assertEquals(expected_proto, str(profile))
@test_util.run_v1_only('b/120545219')
def testProfileWithWhileLoop(self):
options = config_pb2.RunOptions()
options.trace_level = config_pb2.RunOptions.FULL_TRACE
run_metadata = config_pb2.RunMetadata()
num_iters = 5
with self.cached_session() as sess:
i = constant_op.constant(0)
c = lambda i: math_ops.less(i, num_iters)
b = lambda i: math_ops.add(i, 1)
r = control_flow_ops.while_loop(c, b, [i])
sess.run(r, options=options, run_metadata=run_metadata)
profiles = pprof_profiler.get_profiles(sess.graph, run_metadata)
self.assertEquals(1, len(profiles))
profile = next(iter(profiles.values()))
add_samples = [] # Samples for the while/Add node
for sample in profile.sample:
if profile.string_table[sample.label[0].str] == 'while/Add':
add_samples.append(sample)
# Values for same nodes are aggregated.
self.assertEquals(1, len(add_samples))
# Value of "count" should be equal to number of iterations.
self.assertEquals(num_iters, add_samples[0].value[0])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/profiler/pprof_profiler_test.py
|
tensorflow-master
|
tensorflow/python/profiler/__init__.py
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.profiler import option_builder
# pylint: disable=g-bad-import-order
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.profiler.internal import model_analyzer_testlib as lib
builder = option_builder.ProfileOptionBuilder
class ProfilerTest(test.TestCase):
@test_util.run_deprecated_v1
def testProfileBasic(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.select(['params', 'float_ops', 'micros', 'bytes',
'device', 'op_types', 'occurrence']).build())
# Test the output without run_meta.
sess = session.Session()
r = lib.BuildFullModel()
sess.run(variables.global_variables_initializer())
# Test the output with run_meta.
run_meta = config_pb2.RunMetadata()
_ = sess.run(r,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
profiler = model_analyzer.Profiler(sess.graph)
profiler.add_step(1, run_meta)
profiler.profile_graph(opts)
with gfile.Open(outfile, 'r') as f:
profiler_str = f.read()
model_analyzer.profile(
sess.graph, cmd='graph', run_meta=run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
pma_str = f.read()
self.assertEqual(pma_str, profiler_str)
profiler.profile_name_scope(opts)
with gfile.Open(outfile, 'r') as f:
profiler_str = f.read()
model_analyzer.profile(
sess.graph, cmd='scope', run_meta=run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
pma_str = f.read()
self.assertEqual(pma_str, profiler_str)
profiler.profile_python(opts)
with gfile.Open(outfile, 'r') as f:
profiler_str = f.read()
model_analyzer.profile(
sess.graph, cmd='code', run_meta=run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
pma_str = f.read()
self.assertEqual(pma_str, profiler_str)
profiler.profile_operations(opts)
with gfile.Open(outfile, 'r') as f:
profiler_str = f.read()
model_analyzer.profile(
sess.graph, cmd='op', run_meta=run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
pma_str = f.read()
self.assertEqual(pma_str, profiler_str)
model_analyzer.profile(
sess.graph, cmd='scope', run_meta=run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
pma_str = f.read()
self.assertNotEqual(pma_str, profiler_str)
def testMultiStepProfile(self):
ops.reset_default_graph()
opts = builder.time_and_memory(min_bytes=0)
with session.Session() as sess:
r1, r2, r3 = lib.BuildSplitableModel()
sess.run(variables.global_variables_initializer())
profiler = model_analyzer.Profiler(sess.graph)
pb0 = profiler.profile_name_scope(opts)
run_meta = config_pb2.RunMetadata()
_ = sess.run(r1,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
profiler.add_step(1, run_meta)
pb1 = profiler.profile_name_scope(opts)
self.assertNotEqual(lib.SearchTFProfNode(pb1, 'DW'), None)
self.assertEqual(lib.SearchTFProfNode(pb1, 'DW2'), None)
self.assertEqual(lib.SearchTFProfNode(pb1, 'add'), None)
run_meta2 = config_pb2.RunMetadata()
_ = sess.run(r2,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta2)
profiler.add_step(2, run_meta2)
pb2 = profiler.profile_name_scope(opts)
self.assertNotEqual(lib.SearchTFProfNode(pb2, 'DW'), None)
self.assertNotEqual(lib.SearchTFProfNode(pb2, 'DW2'), None)
self.assertEqual(lib.SearchTFProfNode(pb2, 'add'), None)
run_meta3 = config_pb2.RunMetadata()
_ = sess.run(r3,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta3)
profiler.add_step(3, run_meta3)
pb3 = profiler.profile_name_scope(opts)
self.assertNotEqual(lib.SearchTFProfNode(pb3, 'DW'), None)
self.assertNotEqual(lib.SearchTFProfNode(pb3, 'DW2'), None)
self.assertNotEqual(lib.SearchTFProfNode(pb3, 'add'), None)
self.assertEqual(lib.SearchTFProfNode(pb0, 'Conv2D'), None)
self.assertGreater(lib.SearchTFProfNode(pb1, 'Conv2D').exec_micros, 0)
self.assertEqual(lib.SearchTFProfNode(pb1, 'Conv2D_1'), None)
self.assertGreater(lib.SearchTFProfNode(pb2, 'Conv2D_1').exec_micros, 0)
self.assertEqual(lib.SearchTFProfNode(pb2, 'add'), None)
self.assertGreater(lib.SearchTFProfNode(pb3, 'add').exec_micros, 0)
advice_pb = profiler.advise(model_analyzer.ALL_ADVICE)
self.assertTrue('AcceleratorUtilizationChecker' in advice_pb.checkers)
self.assertTrue('ExpensiveOperationChecker' in advice_pb.checkers)
self.assertTrue('OperationChecker' in advice_pb.checkers)
checker = advice_pb.checkers['AcceleratorUtilizationChecker']
if test.is_gpu_available():
self.assertGreater(len(checker.reports), 0)
else:
self.assertEqual(len(checker.reports), 0)
checker = advice_pb.checkers['ExpensiveOperationChecker']
self.assertGreater(len(checker.reports), 0)
@test_util.run_deprecated_v1
def testMultipleProfilePerStep(self):
ops.reset_default_graph()
opts = (builder(builder.trainable_variables_parameter())
.with_empty_output()
.with_accounted_types(['.*'])
.select(['micros', 'bytes', 'peak_bytes',
'residual_bytes', 'output_bytes']).build())
r = lib.BuildSmallModel()
sess = session.Session()
profiler = model_analyzer.Profiler(sess.graph)
init_var_run_meta = config_pb2.RunMetadata()
sess.run(variables.global_variables_initializer(),
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=init_var_run_meta)
train_run_meta = config_pb2.RunMetadata()
sess.run(r,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=train_run_meta)
profiler.add_step(0, train_run_meta)
ret1 = profiler.profile_name_scope(opts)
n1 = lib.SearchTFProfNode(
ret1, 'DW/Initializer/random_normal/RandomStandardNormal')
# Without the var initialization run_meta, it doesn't have the
# information of var_initialization.
self.assertEqual(n1.exec_micros, 0)
self.assertEqual(n1.requested_bytes, 0)
self.assertEqual(n1.peak_bytes, 0)
self.assertEqual(n1.residual_bytes, 0)
profiler.add_step(0, init_var_run_meta)
ret2 = profiler.profile_name_scope(opts)
n2 = lib.SearchTFProfNode(
ret2, 'DW/Initializer/random_normal/RandomStandardNormal')
# After adding the var initialization run_meta.
self.assertGreater(n2.exec_micros, 0)
self.assertGreater(n2.requested_bytes, 0)
self.assertGreater(n2.peak_bytes, 0)
self.assertGreater(n2.residual_bytes, 0)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/profiler/profiler_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for building profiler options."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.profiler import tfprof_logger
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['profiler.ProfileOptionBuilder'])
class ProfileOptionBuilder(object):
# pylint: disable=line-too-long
"""Option Builder for Profiling API.
For tutorial on the options, see
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/options.md
```python
# Users can use pre-built options:
opts = (
tf.profiler.ProfileOptionBuilder.trainable_variables_parameter())
# Or, build your own options:
opts = (tf.compat.v1.profiler.ProfileOptionBuilder()
.with_max_depth(10)
.with_min_micros(1000)
.select(['accelerator_micros'])
.with_stdout_output()
.build()
# Or customize the pre-built options:
opts = (tf.compat.v1.profiler.ProfileOptionBuilder(
tf.profiler.ProfileOptionBuilder.time_and_memory())
.with_displaying_options(show_name_regexes=['.*rnn.*'])
.build())
# Finally, profiling with the options:
_ = tf.compat.v1.profiler.profile(tf.compat.v1.get_default_graph(),
run_meta=run_meta,
cmd='scope',
options=opts)
```
"""
# pylint: enable=line-too-long
def __init__(self, options=None):
"""Constructor.
Args:
options: Optional initial option dict to start with.
"""
if options is not None:
self._options = copy.deepcopy(options)
else:
self._options = {'max_depth': 100,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'min_occurrence': 0,
'order_by': 'name',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': False,
'select': ['micros'],
'step': -1,
'output': 'stdout'}
@staticmethod
def trainable_variables_parameter():
"""Options used to profile trainable variable parameters.
Normally used together with 'scope' view.
Returns:
A dict of profiling options.
"""
return {'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'min_occurrence': 0,
'order_by': 'name',
'account_type_regexes': [tfprof_logger.TRAINABLE_VARIABLES],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['params'],
'step': -1,
'output': 'stdout'}
@staticmethod
def float_operation():
# pylint: disable=line-too-long
"""Options used to profile float operations.
Please see https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/profile_model_architecture.md
on the caveats of calculating float operations.
Returns:
A dict of profiling options.
"""
# pylint: enable=line-too-long
return {'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 1,
'min_occurrence': 0,
'order_by': 'float_ops',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['float_ops'],
'step': -1,
'output': 'stdout'}
@staticmethod
def time_and_memory(min_micros=1, min_bytes=1, min_accelerator_micros=0,
min_cpu_micros=0, min_peak_bytes=0, min_residual_bytes=0,
min_output_bytes=0):
"""Show operation time and memory consumptions.
Args:
min_micros: Only show profiler nodes with execution time
no less than this. It sums accelerator and cpu times.
min_bytes: Only show profiler nodes requested to allocate no less bytes
than this.
min_accelerator_micros: Only show profiler nodes spend no less than
this time on accelerator (e.g. GPU).
min_cpu_micros: Only show profiler nodes spend no less than
this time on cpu.
min_peak_bytes: Only show profiler nodes using no less than this bytes
at peak (high watermark). For profiler nodes consist of multiple
graph nodes, it sums the graph nodes' peak_bytes.
min_residual_bytes: Only show profiler nodes have no less than
this bytes not being de-allocated after Compute() ends. For
profiler nodes consist of multiple graph nodes, it sums the
graph nodes' residual_bytes.
min_output_bytes: Only show profiler nodes have no less than this bytes
output. The output are not necessarily allocated by this profiler
nodes.
Returns:
A dict of profiling options.
"""
return {'max_depth': 10000,
'min_bytes': min_bytes,
'min_peak_bytes': min_peak_bytes,
'min_residual_bytes': min_residual_bytes,
'min_output_bytes': min_output_bytes,
'min_micros': min_micros,
'min_accelerator_micros': min_accelerator_micros,
'min_cpu_micros': min_cpu_micros,
'min_params': 0,
'min_float_ops': 0,
'min_occurrence': 0,
'order_by': 'micros',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['micros', 'bytes'],
'step': -1,
'output': 'stdout'}
def build(self):
"""Build a profiling option.
Returns:
A dict of profiling options.
"""
return copy.deepcopy(self._options)
def with_max_depth(self, max_depth):
"""Set the maximum depth of display.
The depth depends on profiling view. For 'scope' view, it's the
depth of name scope hierarchy (tree), for 'op' view, it's the number
of operation types (list), etc.
Args:
max_depth: Maximum depth of the data structure to display.
Returns:
self
"""
self._options['max_depth'] = max_depth
return self
def with_min_memory(self,
min_bytes=0,
min_peak_bytes=0,
min_residual_bytes=0,
min_output_bytes=0):
"""Only show profiler nodes consuming no less than 'min_bytes'.
Args:
min_bytes: Only show profiler nodes requested to allocate no less bytes
than this.
min_peak_bytes: Only show profiler nodes using no less than this bytes
at peak (high watermark). For profiler nodes consist of multiple
graph nodes, it sums the graph nodes' peak_bytes.
min_residual_bytes: Only show profiler nodes have no less than
this bytes not being de-allocated after Compute() ends. For
profiler nodes consist of multiple graph nodes, it sums the
graph nodes' residual_bytes.
min_output_bytes: Only show profiler nodes have no less than this bytes
output. The output are not necessarily allocated by this profiler
nodes.
Returns:
self
"""
self._options['min_bytes'] = min_bytes
self._options['min_peak_bytes'] = min_peak_bytes
self._options['min_residual_bytes'] = min_residual_bytes
self._options['min_output_bytes'] = min_output_bytes
return self
def with_min_execution_time(self,
min_micros=0,
min_accelerator_micros=0,
min_cpu_micros=0):
"""Only show profiler nodes consuming no less than 'min_micros'.
Args:
min_micros: Only show profiler nodes with execution time
no less than this. It sums accelerator and cpu times.
min_accelerator_micros: Only show profiler nodes spend no less than
this time on accelerator (e.g. GPU).
min_cpu_micros: Only show profiler nodes spend no less than
this time on cpu.
Returns:
self
"""
self._options['min_micros'] = min_micros
self._options['min_accelerator_micros'] = min_accelerator_micros
self._options['min_cpu_micros'] = min_cpu_micros
return self
def with_min_parameters(self, min_params):
"""Only show profiler nodes holding no less than 'min_params' parameters.
'Parameters' normally refers the weights of in TensorFlow variables.
It reflects the 'capacity' of models.
Args:
min_params: Only show profiler nodes holding number parameters
no less than this.
Returns:
self
"""
self._options['min_params'] = min_params
return self
def with_min_occurrence(self, min_occurrence):
# pylint: disable=line-too-long
"""Only show profiler nodes including no less than 'min_occurrence' graph nodes.
A "node" means a profiler output node, which can be a python line
(code view), an operation type (op view), or a graph node
(graph/scope view). A python line includes all graph nodes created by that
line, while an operation type includes all graph nodes of that type.
Args:
min_occurrence: Only show nodes including no less than this.
Returns:
self
"""
# pylint: enable=line-too-long
self._options['min_occurrence'] = min_occurrence
return self
def with_min_float_operations(self, min_float_ops):
# pylint: disable=line-too-long
"""Only show profiler nodes consuming no less than 'min_float_ops'.
Please see https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/profile_model_architecture.md
on the caveats of calculating float operations.
Args:
min_float_ops: Only show profiler nodes with float operations
no less than this.
Returns:
self
"""
# pylint: enable=line-too-long
self._options['min_float_ops'] = min_float_ops
return self
def with_accounted_types(self, account_type_regexes):
"""Selectively counting statistics based on node types.
Here, 'types' means the profiler nodes' properties. Profiler by default
consider device name (e.g. /job:xx/.../device:GPU:0) and operation type
(e.g. MatMul) as profiler nodes' properties. User can also associate
customized 'types' to profiler nodes through OpLogProto proto.
For example, user can select profiler nodes placed on gpu:0 with:
`account_type_regexes=['.*gpu:0.*']`
If none of a node's properties match the specified regexes, the node is
not displayed nor accounted.
Args:
account_type_regexes: A list of regexes specifying the types.
Returns:
self.
"""
self._options['account_type_regexes'] = copy.copy(account_type_regexes)
return self
def with_node_names(self,
start_name_regexes=None,
show_name_regexes=None,
hide_name_regexes=None,
trim_name_regexes=None):
"""Regular expressions used to select profiler nodes to display.
After 'with_accounted_types' is evaluated, 'with_node_names' are
evaluated as follows:
For a profile data structure, profiler first finds the profiler
nodes matching 'start_name_regexes', and starts displaying profiler
nodes from there. Then, if a node matches 'show_name_regexes' and
doesn't match 'hide_name_regexes', it's displayed. If a node matches
'trim_name_regexes', profiler stops further searching that branch.
Args:
start_name_regexes: list of node name regexes to start displaying.
show_name_regexes: list of node names regexes to display.
hide_name_regexes: list of node_names regexes that should be hidden.
trim_name_regexes: list of node name regexes from where to stop.
Returns:
self
"""
if start_name_regexes is not None:
self._options['start_name_regexes'] = copy.copy(start_name_regexes)
if show_name_regexes is not None:
self._options['show_name_regexes'] = copy.copy(show_name_regexes)
if hide_name_regexes is not None:
self._options['hide_name_regexes'] = copy.copy(hide_name_regexes)
if trim_name_regexes is not None:
self._options['trim_name_regexes'] = copy.copy(trim_name_regexes)
return self
def account_displayed_op_only(self, is_true):
"""Whether only account the statistics of displayed profiler nodes.
Args:
is_true: If true, only account statistics of nodes eventually
displayed by the outputs.
Otherwise, a node's statistics are accounted by its parents
as long as it's types match 'account_type_regexes', even if
it is hidden from the output, say, by hide_name_regexes.
Returns:
self
"""
self._options['account_displayed_op_only'] = is_true
return self
def with_empty_output(self):
"""Do not generate side-effect outputs."""
self._options['output'] = 'none'
return self
def with_stdout_output(self):
"""Print the result to stdout."""
self._options['output'] = 'stdout'
return self
def with_file_output(self, outfile):
"""Print the result to a file."""
self._options['output'] = 'file:outfile=%s' % outfile
return self
def with_timeline_output(self, timeline_file):
"""Generate a timeline json file."""
self._options['output'] = 'timeline:outfile=%s' % timeline_file
return self
def with_pprof_output(self, pprof_file):
"""Generate a pprof profile gzip file.
To use the pprof file:
pprof -png --nodecount=100 --sample_index=1 <pprof_file>
Args:
pprof_file: filename for output, usually suffixed with .pb.gz.
Returns:
self.
"""
self._options['output'] = 'pprof:outfile=%s' % pprof_file
return self
def order_by(self, attribute):
# pylint: disable=line-too-long
"""Order the displayed profiler nodes based on a attribute.
Supported attribute includes micros, bytes, occurrence, params, etc.
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/options.md
Args:
attribute: An attribute the profiler node has.
Returns:
self
"""
# pylint: enable=line-too-long
self._options['order_by'] = attribute
return self
def select(self, attributes):
# pylint: disable=line-too-long
"""Select the attributes to display.
See https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/options.md
for supported attributes.
Args:
attributes: A list of attribute the profiler node has.
Returns:
self
"""
# pylint: enable=line-too-long
self._options['select'] = copy.copy(attributes)
return self
def with_step(self, step):
"""Which profile step to use for profiling.
The 'step' here refers to the step defined by `Profiler.add_step()` API.
Args:
step: When multiple steps of profiles are available, select which step's
profile to use. If -1, use average of all available steps.
Returns:
self
"""
self._options['step'] = step
return self
|
tensorflow-master
|
tensorflow/python/profiler/option_builder.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.profiler import option_builder
# pylint: disable=g-bad-import-order
from tensorflow.python.profiler import profile_context
from tensorflow.python.profiler.internal import model_analyzer_testlib as lib
builder = option_builder.ProfileOptionBuilder
class ProfilerContextTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasics(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), "dump")
opts = builder(builder.time_and_memory()
).with_file_output(outfile).build()
x = lib.BuildFullModel()
profile_str = None
profile_step100 = os.path.join(test.get_temp_dir(), "profile_100")
with profile_context.ProfileContext(test.get_temp_dir()) as pctx:
pctx.add_auto_profiling("op", options=opts, profile_steps=[15, 50, 100])
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
total_steps = 101
for i in range(total_steps):
self.evaluate(x)
if i == 14 or i == 49:
self.assertTrue(gfile.Exists(outfile))
gfile.Remove(outfile)
if i == 99:
self.assertTrue(gfile.Exists(profile_step100))
with gfile.Open(outfile, "r") as f:
profile_str = f.read()
gfile.Remove(outfile)
self.assertEqual(set([15, 50, 100]), set(pctx.get_profiles("op").keys()))
with lib.ProfilerFromFile(
os.path.join(test.get_temp_dir(), "profile_100")) as profiler:
profiler.profile_operations(options=opts)
with gfile.Open(outfile, "r") as f:
self.assertEqual(profile_str, f.read())
@test_util.run_deprecated_v1
def testAutoTracingInDeubMode(self):
ops.reset_default_graph()
x = lib.BuildFullModel()
with profile_context.ProfileContext(test.get_temp_dir(), debug=True):
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
for _ in range(10):
self.evaluate(x)
for f in gfile.ListDirectory(test.get_temp_dir()):
# Warm up, no tracing.
self.assertFalse("run_meta" in f)
self.evaluate(x)
self.assertTrue(
gfile.Exists(os.path.join(test.get_temp_dir(), "run_meta_11")))
gfile.Remove(os.path.join(test.get_temp_dir(), "run_meta_11"))
# fetched already.
self.evaluate(x)
for f in gfile.ListDirectory(test.get_temp_dir()):
self.assertFalse("run_meta" in f)
@test_util.run_deprecated_v1
def testDisabled(self):
ops.reset_default_graph()
x = lib.BuildFullModel()
with profile_context.ProfileContext(test.get_temp_dir(),
enabled=False) as pctx:
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
for _ in range(10):
self.evaluate(x)
self.assertTrue(pctx.profiler is None)
self.assertTrue(
getattr(session.BaseSession, "profile_context", None) is None)
with profile_context.ProfileContext(test.get_temp_dir()) as pctx:
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
for _ in range(10):
self.evaluate(x)
self.assertFalse(pctx.profiler is None)
self.assertFalse(
getattr(session.BaseSession, "profile_context", None) is None)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/profiler/profile_context_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import io
import os
import random
import re
import numpy as np
from tensorflow.core.profiler import profile_pb2
from tensorflow.core.profiler import tfprof_log_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.profiler import option_builder
from tensorflow.python.profiler import profile_context
from tensorflow.python.profiler.internal import model_analyzer_testlib as lib
from tensorflow.python.util import compat
builder = option_builder.ProfileOptionBuilder
class PrintModelAnalysisTest(test.TestCase):
def _no_rewrite_session_config(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
pin_to_host_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def testDumpToFile(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = builder(builder.trainable_variables_parameter()
).with_file_output(outfile).build()
with session.Session(config=self._no_rewrite_session_config()) as sess:
_ = lib.BuildSmallModel()
model_analyzer.profile(sess.graph, options=opts)
with gfile.Open(outfile, 'r') as f:
self.assertEqual(u'node name | # parameters\n'
'_TFProfRoot (--/451 params)\n'
' DW (3x3x3x6, 162/162 params)\n'
' DW2 (2x2x6x12, 288/288 params)\n'
' ScalarW (1, 1/1 params)\n',
lib.CheckAndRemoveDoc(f.read()))
@test_util.run_v1_only('b/120545219')
def testSelectEverythingDetail(self):
ops.reset_default_graph()
dev = '/device:GPU:0' if test.is_gpu_available() else '/device:CPU:0'
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.select(['micros', 'bytes', 'params', 'float_ops', 'occurrence',
'device', 'op_types', 'input_shapes']).build())
with profile_context.ProfileContext(test.get_temp_dir(),
trace_steps=[],
dump_steps=[]) as pctx:
with session.Session(
config=self._no_rewrite_session_config()) as sess, ops.device(dev):
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
pctx.trace_next_step()
pctx.dump_next_step()
_ = self.evaluate(x)
pctx.profiler.profile_name_scope(options=opts)
with gfile.Open(outfile, 'r') as f:
# pylint: disable=line-too-long
dump_str = lib.CheckAndRemoveDoc(f.read())
outputs = dump_str.split('\n')
self.assertEqual(outputs[0],
'node name | # parameters | # float_ops | requested bytes | total execution time | accelerator execution time | cpu execution time | assigned devices | op types | op count (run|defined) | input shapes')
for o in outputs[1:]:
if o.find('Conv2D ') > 0:
metrics = o[o.find('(') +1: o.find(')')].split(',')
# Make sure time is profiled.
gap = 1 if test.is_gpu_available() else 2
for i in range(3, 6, gap):
mat = re.search('(.*)(?:us|ms|sec)/(.*)(?:us|ms|sec)', metrics[i])
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# Make sure device is profiled.
if test.is_gpu_available():
self.assertTrue(metrics[6].find('gpu') > 0)
self.assertFalse(metrics[6].find('cpu') > 0)
else:
self.assertFalse(metrics[6].find('gpu') > 0)
self.assertTrue(metrics[6].find('cpu') > 0)
# Make sure float_ops is profiled.
mat = re.search('(.*)k/(.*)k flops', metrics[1].strip())
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# Make sure op_count is profiled.
self.assertEqual(metrics[8].strip(), '1/1|1/1')
# Make sure input_shapes is profiled.
self.assertEqual(metrics[9].strip(), '0:2x6x6x3|1:3x3x3x6')
if o.find('DW (3x3x3x6') > 0:
metrics = o[o.find('(') +1: o.find(')')].split(',')
mat = re.search('(.*)/(.*) params', metrics[1].strip())
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# pylint: enable=line-too-long
# Test that profiler restored from profile file gives the same result.
gfile.Remove(outfile)
profile_file = os.path.join(test.get_temp_dir(), 'profile_1')
with lib.ProfilerFromFile(profile_file) as profiler:
profiler.profile_name_scope(options=opts)
with gfile.Open(outfile, 'r') as f:
self.assertEqual(dump_str, lib.CheckAndRemoveDoc(f.read()))
def testSelectEverything(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.select(['params', 'float_ops', 'occurrence', 'device', 'op_types',
'input_shapes']).build())
with session.Session(config=self._no_rewrite_session_config()
) as sess, ops.device('/device:CPU:0'):
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
model_analyzer.profile(
sess.graph, run_meta, options=opts)
def testSimpleCodeView(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
# TODO(xpan): Test 'micros'. Since the execution time changes each run,
# it's a bit difficult to test it now.
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.with_node_names(show_name_regexes=['.*model_analyzer_testlib.*'])
.account_displayed_op_only(False)
.select(['bytes', 'params', 'float_ops', 'num_hidden_ops', 'device',
'input_shapes']).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
model_analyzer.profile(
sess.graph, run_meta, cmd='code', options=opts)
with gfile.Open(outfile, 'r') as f:
# pylint: disable=line-too-long
self.assertEqual(
'node name | requested bytes | # parameters | # float_ops | assigned devices | in',
lib.CheckAndRemoveDoc(f.read())[0:80])
# pylint: enable=line-too-long
@test_util.run_v1_only('b/120545219')
def testComplexCodeView(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.with_node_names(show_name_regexes=
['.*model_analyzer_testlib.py.*'])
.account_displayed_op_only(False)
.select(['params', 'float_ops']).build())
with profile_context.ProfileContext(test.get_temp_dir(),
trace_steps=[],
dump_steps=[]) as pctx:
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
self.evaluate(variables.global_variables_initializer())
pctx.trace_next_step()
_ = self.evaluate(x)
tfprof_node = pctx.profiler.profile_python(options=opts)
# pylint: disable=line-too-long
with gfile.Open(outfile, 'r') as f:
lines = f.read().split('\n')
self.assertGreater(len(lines), 5)
result = '\n'.join([l[:min(len(l), 80)] for l in lines])
self.assertTrue(
compat.as_text(lib.CheckAndRemoveDoc(result))
.startswith('node name | # parameters | # float_ops'))
self.assertLess(0, tfprof_node.total_exec_micros)
self.assertEqual(2844, tfprof_node.total_parameters)
#The graph is modifed when MKL is enabled,total_float_ops will
#be different
if test_util.IsMklEnabled():
self.assertLess(101600, tfprof_node.total_float_ops)
else:
self.assertLess(145660, tfprof_node.total_float_ops)
self.assertEqual(8, len(tfprof_node.children))
self.assertEqual('_TFProfRoot', tfprof_node.name)
self.assertEqual(
'model_analyzer_testlib.py:63:BuildFullModel',
tfprof_node.children[0].name)
self.assertEqual(
'model_analyzer_testlib.py:63:BuildFullModel (gradient)',
tfprof_node.children[1].name)
self.assertEqual(
'model_analyzer_testlib.py:67:BuildFullModel',
tfprof_node.children[2].name)
self.assertEqual(
'model_analyzer_testlib.py:67:BuildFullModel (gradient)',
tfprof_node.children[3].name)
self.assertEqual(
'model_analyzer_testlib.py:69:BuildFullModel',
tfprof_node.children[4].name)
self.assertEqual(
'model_analyzer_testlib.py:70:BuildFullModel',
tfprof_node.children[5].name)
self.assertEqual(
'model_analyzer_testlib.py:70:BuildFullModel (gradient)',
tfprof_node.children[6].name)
self.assertEqual(
'model_analyzer_testlib.py:72:BuildFullModel',
tfprof_node.children[7].name)
# pylint: enable=line-too-long
def testCodeViewLeafGraphNode(self):
ops.reset_default_graph()
opts = (builder(builder.trainable_variables_parameter())
.with_empty_output()
.with_accounted_types(['.*'])
.account_displayed_op_only(False)
.select(['bytes', 'params', 'float_ops', 'device']).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
tfprof_node = model_analyzer.profile(
sess.graph, run_meta, cmd='code', options=opts)
leaf = tfprof_node
while leaf.children:
self.assertEqual(0, len(leaf.graph_nodes))
leaf = leaf.children[0]
self.assertEqual(1, len(leaf.graph_nodes))
def testTimeline(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'timeline')
opts = (builder(builder.trainable_variables_parameter())
.with_max_depth(100000)
.with_step(0)
.with_timeline_output(outfile)
.with_accounted_types(['.*']).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
_ = model_analyzer.profile(
sess.graph, run_meta, cmd='graph', options=opts)
with gfile.Open(outfile + '_0', 'r') as f:
# Test that a json file is created.
# TODO(xpan): tfprof Timeline isn't quite correct on Windows.
# Investigate why.
if os.name != 'nt':
self.assertLess(1000, len(f.read()))
else:
self.assertLess(1, len(f.read()))
def testOpView(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.with_min_occurrence(10)
.order_by('occurrence')
.select(['params', 'micros', 'bytes',
'peak_bytes', 'residual_bytes',
'output_bytes', 'occurrence', 'input_shapes']).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
tfprof_node = model_analyzer.profile(
sess.graph, run_meta, cmd='op', options=opts)
with gfile.Open(outfile, 'r') as f:
# pylint: disable=line-too-long
self.assertEqual(
'nodename|requestedbytes|peakbytes|residualbytes|outputbytes|totalexecutiontime|acceleratorexecutiontime|cpuexecutiontime|#parameters|opoccurrence(run|defined)|inputshapes',
lib.CheckAndRemoveDoc(f.read()).replace('\t',
'').replace(' ', '')[0:170])
# pylint: enable=line-too-long
total_children = 0
last_occurrence = 1e32
input_shapes = 0
last_total_micros = tfprof_node.total_exec_micros
last_micros = tfprof_node.exec_micros
while tfprof_node.children:
for gnode in tfprof_node.graph_nodes:
input_shapes += len(gnode.input_shapes)
self.assertEqual(len(tfprof_node.children), 1)
tfprof_node = tfprof_node.children[0]
self.assertEqual(
last_total_micros, tfprof_node.total_exec_micros + last_micros)
last_total_micros = tfprof_node.total_exec_micros
last_micros = tfprof_node.exec_micros
total_children += 1
self.assertLessEqual(len(tfprof_node.graph_nodes), last_occurrence)
last_occurrence = len(tfprof_node.graph_nodes)
self.assertGreater(input_shapes, 0)
def testAdvisor(self):
ops.reset_default_graph()
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
advice_pb = model_analyzer.advise(sess.graph, run_meta)
self.assertTrue('AcceleratorUtilizationChecker' in advice_pb.checkers)
self.assertTrue('ExpensiveOperationChecker' in advice_pb.checkers)
self.assertTrue('OperationChecker' in advice_pb.checkers)
checker = advice_pb.checkers['AcceleratorUtilizationChecker']
if test.is_gpu_available():
self.assertGreater(len(checker.reports), 0)
else:
self.assertEqual(len(checker.reports), 0)
checker = advice_pb.checkers['ExpensiveOperationChecker']
self.assertGreater(len(checker.reports), 0)
def pprof_test_helper(self, attribute, should_fail=False):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), attribute + '_pprof.pb.gz')
opts = (builder(builder.time_and_memory())
.select([attribute])
.with_max_depth(100000)
.with_node_names(trim_name_regexes=['ops.py.*'])
.with_pprof_output(outfile).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
_ = model_analyzer.profile(
sess.graph, run_meta, cmd='code', options=opts)
if should_fail:
self.assertFalse(gfile.Exists(outfile))
return
profile_pb = profile_pb2.Profile()
with gfile.Open(outfile, 'rb') as f:
with gzip.GzipFile(fileobj=io.BytesIO(f.read())) as gzipf:
profile_pb.ParseFromString(gzipf.read())
self.assertGreater(len(profile_pb.sample), 10)
self.assertGreater(len(profile_pb.location), 10)
self.assertGreater(len(profile_pb.function), 10)
self.assertGreater(len(profile_pb.string_table), 30)
has_rnn = False
has_loop = False
for s in profile_pb.string_table:
if s.find('rnn') > 0:
has_rnn = True
if s.find('while') > 0:
has_loop = True
self.assertFalse(s.startswith('ops.py'))
self.assertTrue(has_rnn)
self.assertTrue(has_loop)
def testPprof(self):
for attr in ['micros', 'bytes', 'accelerator_micros', 'cpu_micros',
'params', 'float_ops']:
self.pprof_test_helper(attr)
for attr in ['op_types', 'device', 'input_shapes']:
self.pprof_test_helper(attr, True)
def testMinOption(self):
ops.reset_default_graph()
def check_min(nodes, mm=0, mam=0, mcm=0, mb=0, mpb=0, mrb=0, mob=0):
for n in nodes:
if mm > 0:
self.assertGreaterEqual(n.exec_micros, mm)
if mam > 0:
self.assertGreaterEqual(n.accelerator_exec_micros, mam)
if mcm > 0:
self.assertGreaterEqual(n.cpu_exec_micros, mcm)
if mb > 0:
self.assertGreaterEqual(n.requested_bytes, mb)
if mpb > 0:
self.assertGreaterEqual(n.peak_bytes, mpb)
if mrb > 0:
self.assertGreaterEqual(n.residual_bytes, mrb)
if mob > 0:
self.assertGreaterEqual(n.output_bytes, mob)
check_min(n.children, mm, mam, mcm, mb, mpb, mrb, mob)
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
min_val = random.randint(0, 10000)
opts = builder(builder.time_and_memory(min_micros=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mm=min_val)
opts = builder(builder.time_and_memory(min_accelerator_micros=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mam=min_val)
opts = builder(builder.time_and_memory(min_cpu_micros=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mcm=min_val)
opts = builder(builder.time_and_memory(min_bytes=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mb=min_val)
opts = builder(builder.time_and_memory(min_peak_bytes=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mpb=min_val)
opts = builder(builder.time_and_memory(min_residual_bytes=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mrb=min_val)
opts = builder(builder.time_and_memory(min_output_bytes=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mob=min_val)
def testSelectOption(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
def check_selection(selected, not_selected):
with gfile.Open(outfile, 'r') as f:
s = f.read()
for attr in selected:
self.assertTrue(s.find(attr) > 0, s)
for attr in not_selected:
self.assertFalse(s.find(attr) > 0, s)
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
opts = builder(builder.time_and_memory()
).with_file_output(outfile).select(['micros']).build()
_ = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_selection(['total execution time', 'accelerator execution time'],
['bytes'])
opts = builder(builder.time_and_memory()
).with_file_output(outfile).select(['bytes']).build()
_ = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_selection(['requested bytes'],
['peak bytes', 'residual bytes', 'output bytes'])
opts = builder(builder.time_and_memory()).with_file_output(
outfile).select(
['peak_bytes', 'residual_bytes', 'output_bytes']).build()
_ = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_selection(['peak bytes', 'residual bytes', 'output bytes'],
['requested_bytes'])
def _trainLoop(self, train_op, train_steps, time_dir, time_step,
memory_dir, memory_step, profile_dir, dump_step):
with session.Session(config=self._no_rewrite_session_config()) as sess:
self.evaluate(variables.global_variables_initializer())
# start from 1 because variable_initializer took one step.
for i in range(1, train_steps + 1):
_ = self.evaluate(train_op)
if i in time_step:
ret = gfile.ListDirectory(time_dir)
self.assertEqual(len(ret), 1)
self.assertTrue(
gfile.Open(os.path.join(time_dir, ret[0]), 'r').read()
.find('execution time') > 0)
_ = [gfile.Remove(os.path.join(time_dir, x)) for x in ret]
else:
self.assertEqual(len(gfile.ListDirectory(time_dir)), 0)
if i in memory_step:
ret = gfile.ListDirectory(memory_dir)
self.assertEqual(len(ret), 1)
self.assertTrue(
gfile.Open(os.path.join(memory_dir, ret[0]), 'r').read()
.find('requested bytes') > 0)
_ = [gfile.Remove(os.path.join(memory_dir, x)) for x in ret]
else:
self.assertEqual(len(gfile.ListDirectory(memory_dir)), 0)
if i in dump_step:
ret = gfile.ListDirectory(profile_dir)
self.assertAllEqual(ret, ['profile_%d' % i])
_ = [gfile.Remove(os.path.join(profile_dir, x)) for x in ret]
else:
if i < dump_step[0]:
self.assertFalse(gfile.Exists(profile_dir))
else:
self.assertEqual(len(gfile.ListDirectory(profile_dir)), 0)
@test_util.run_v1_only('b/120545219')
def testAutoProfiling(self):
ops.reset_default_graph()
time_dir = os.path.join(test.get_temp_dir(), 'time')
memory_dir = os.path.join(test.get_temp_dir(), 'memory')
profile_dir = os.path.join(test.get_temp_dir(), 'dir/dir2/profile')
# TODO(xpan): Should we create parent directory for them?
gfile.MkDir(time_dir)
gfile.MkDir(memory_dir)
time_opts = (builder(builder.time_and_memory())
.with_file_output(os.path.join(time_dir, 'profile'))
.select(['micros']).build())
memory_opts = (builder(builder.time_and_memory())
.with_file_output(os.path.join(memory_dir, 'profile'))
.select(['bytes']).build())
time_steps = [2, 3]
memory_steps = [1, 3]
dump_steps = [3, 4]
x = lib.BuildSmallModel()
with profile_context.ProfileContext(profile_dir,
trace_steps=[1, 2, 3],
dump_steps=[3, 4]) as pctx:
pctx.add_auto_profiling('scope', time_opts, time_steps)
pctx.add_auto_profiling('scope', memory_opts, memory_steps)
self._trainLoop(x, 10, time_dir, time_steps,
memory_dir, memory_steps, profile_dir, dump_steps)
@test_util.run_v1_only('b/120545219')
def testOOM(self):
if not test.is_gpu_available():
return
ops.reset_default_graph()
with ops.device('/device:GPU:0'):
a = random_ops.random_normal([1, 10000, 20000], name='test_random1')
b = random_ops.random_normal([30000, 10000, 1], name='test_random2')
c = a * b
try:
with session.Session(config=self._no_rewrite_session_config()) as sess:
sess.run(c, options=config_pb2.RunOptions(
report_tensor_allocations_upon_oom=True))
except Exception as e: # pylint: disable=broad-except
exception_str = '%s' % e
# This trace reports allocations for to random tensor.
self.assertTrue(
'OOM when allocating tensor with shape[30000,10000,20000]' in
exception_str)
mat = re.search('(.*)GiB from test_random2/RandomStandardNormal',
exception_str)
self.assertGreater(float(mat.group(1)), 0.0)
mat = re.search('(.*)MiB from test_random1/RandomStandardNormal',
exception_str)
self.assertGreater(float(mat.group(1)), 0.0)
@test_util.run_v1_only('b/120545219')
def testDistributedOOM(self):
if not test.is_gpu_available():
return
ops.reset_default_graph()
workers, _ = test_util.create_local_cluster(2, 0)
with ops.device('/job:worker/replica:0/task:0/gpu:0'):
a = random_ops.random_normal([1, 10000, 20000], name='test_random1')
with ops.device('/job:worker/replica:0/task:1/gpu:0'):
b = random_ops.random_normal([30000, 10000, 1], name='test_random2')
c = a * b
try:
with session.Session(workers[1].target) as sess:
sess.run(c, options=config_pb2.RunOptions(
report_tensor_allocations_upon_oom=True))
except Exception as e: # pylint: disable=broad-except
exception_str = '%s' % e
# test_random2 is reported because it's allocated in worker 1.
self.assertTrue('Current usage from device: '
'/job:worker/replica:0/task:1/device:GPU:0, '
'allocator: GPU_0_bfc' in exception_str)
mat = re.search('(.*)GiB from test_random2/RandomStandardNormal',
exception_str)
self.assertGreater(float(mat.group(1)), 0.0)
# test_random1 is not reported because it's allocated in worker 0.
mat = re.search('(.*)MiB from test_random1/RandomStandardNormal',
exception_str)
self.assertTrue(mat is None)
@test_util.run_v1_only('b/120545219')
def testTrackPersistentBytes(self):
ops.reset_default_graph()
a = array_ops.constant(np.ones((100, 100)))
b = array_ops.constant(np.ones((100, 100)))
c = a * b
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.min_graph_nodes = -1
with session.Session(config=config) as sess:
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess.run(c, options=run_options, run_metadata=run_metadata)
options = option_builder.ProfileOptionBuilder.time_and_memory()
options['min_bytes'] = 0
options['select'] = ('bytes', 'peak_bytes', 'output_bytes',
'residual_bytes')
ret = model_analyzer.profile(
sess.graph, run_meta=run_metadata, cmd='scope', options=options)
run_metadata = config_pb2.RunMetadata()
sess.run(c, options=run_options, run_metadata=run_metadata)
ret2 = model_analyzer.profile(
sess.graph, run_meta=run_metadata, cmd='scope', options=options)
n = lib.SearchTFProfNode(ret, 'mul')
n2 = lib.SearchTFProfNode(ret2, 'mul')
self.assertGreater(n.peak_bytes, 0)
self.assertGreater(n.output_bytes, 0)
self.assertGreater(n.residual_bytes, 0)
self.assertEqual(n.peak_bytes, n2.peak_bytes)
self.assertEqual(n.output_bytes, n2.output_bytes)
self.assertEqual(n.residual_bytes, n2.residual_bytes)
@test_util.run_v1_only('b/120545219')
def testTraceLoopBytes(self):
if not test.is_gpu_available(): return
ops.reset_default_graph()
steps = 100
with ops.device('/gpu:0'):
x = array_ops.ones((100, 100), dtype=dtypes.float32)
n = array_ops.constant(steps, dtype=dtypes.int32)
x1 = array_ops.ones((100, 100))
x *= x1
def loop_body(i, x):
x *= x
return i + 1, x
_, y = control_flow_ops.while_loop(
lambda i, x: i < n, loop_body,
[array_ops.constant(0), x])
grad = gradients.gradients(y, [x1])
with session.Session(config=self._no_rewrite_session_config()) as sess:
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess.run(grad, options=run_options, run_metadata=run_metadata)
options = option_builder.ProfileOptionBuilder.time_and_memory()
options['min_bytes'] = 0
options['min_micros'] = 0
options['select'] = ('bytes', 'peak_bytes', 'output_bytes',
'residual_bytes')
options['output'] = 'none'
ret_pb = model_analyzer.profile(
sess.graph, run_meta=run_metadata, cmd='scope', options=options)
self.assertGreater(ret_pb.total_requested_bytes, 1000000)
def testEager(self):
ops.reset_default_graph()
with context.eager_mode():
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = builder(
builder.time_and_memory()).with_file_output(outfile).build()
context.enable_run_metadata()
lib.BuildSmallModel()
profiler = model_analyzer.Profiler()
profiler.add_step(0, context.export_run_metadata())
context.disable_run_metadata()
profiler.profile_operations(opts)
with gfile.Open(outfile, 'r') as f:
out_str = f.read()
self.assertTrue('Conv2D' in out_str)
self.assertTrue('VarHandleOp' in out_str)
with gfile.Open('/tmp/eager_profile', 'wb') as f:
profile_pb = tfprof_log_pb2.ProfileProto()
profile_pb.ParseFromString(profiler.serialize_to_string())
profile_pb_str = '%s' % profile_pb
self.assertTrue('Conv2D' in profile_pb_str)
self.assertTrue('VarHandleOp' in profile_pb_str)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/profiler/model_analyzer_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class TFProfLoggerTest(test.TestCase):
def _BuildSmallPlaceholderlModel(self):
a = array_ops.placeholder(dtypes.int32, [2, 2])
b = array_ops.placeholder(dtypes.int32, [2, 2])
y = math_ops.matmul(a, b)
return a, b, y
def _BuildSmallModel(self):
a = constant_op.constant([[1, 2], [3, 4]])
b = constant_op.constant([[1, 2], [3, 4]])
return math_ops.matmul(a, b)
# pylint: disable=pointless-string-statement
"""# TODO(xpan): This out of core so it doesn't depend on contrib.
def testFillMissingShape(self):
a, b, y = self._BuildSmallPlaceholderlModel()
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess = session.Session()
sess.run(y,
options=run_options,
run_metadata=run_metadata,
feed_dict={a: [[1, 2], [2, 3]],
b: [[1, 2], [2, 3]]})
graph2 = ops.Graph()
# Use copy_op_to_graph to remove shape information.
y2 = copy_elements.copy_op_to_graph(y, graph2, [])
self.assertEquals('<unknown>', str(y2.get_shape()))
tfprof_logger._fill_missing_graph_shape(graph2, run_metadata)
self.assertEquals('(2, 2)', str(y2.get_shape()))
def testFailedFillMissingShape(self):
y = self._BuildSmallModel()
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess = session.Session()
sess.run(y, options=run_options, run_metadata=run_metadata)
graph2 = ops.Graph()
y2 = copy_elements.copy_op_to_graph(y, graph2, [])
self.assertEquals('<unknown>', str(y2.get_shape()))
# run_metadata has special name for MatMul, hence failed to fill shape.
tfprof_logger._fill_missing_graph_shape(graph2, run_metadata)
self.assertEquals('<unknown>', str(y2.get_shape()))
"""
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/profiler/tfprof_logger_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""profiler python module provides APIs to profile TensorFlow models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.core.profiler.tfprof_log_pb2 import OpLogProto
from tensorflow.core.profiler.tfprof_output_pb2 import AdviceProto
from tensorflow.core.profiler.tfprof_output_pb2 import GraphNodeProto
from tensorflow.core.profiler.tfprof_output_pb2 import MultiGraphNodeProto
from tensorflow.python.profiler.model_analyzer import advise
from tensorflow.python.profiler.model_analyzer import profile
from tensorflow.python.profiler.model_analyzer import Profiler
from tensorflow.python.profiler.option_builder import ProfileOptionBuilder
from tensorflow.python.profiler.tfprof_logger import write_op_log
from tensorflow.python.util.tf_export import tf_export
_allowed_symbols = [
'Profiler',
'profile',
'ProfileOptionBuilder',
'advise',
'write_op_log',
]
_allowed_symbols.extend([
'GraphNodeProto',
'MultiGraphNodeProto',
'AdviceProto',
'OpLogProto',
])
# Export protos
tf_export(v1=['profiler.GraphNodeProto'])(GraphNodeProto)
tf_export(v1=['profiler.MultiGraphNodeProto'])(MultiGraphNodeProto)
tf_export(v1=['profiler.AdviceProto'])(AdviceProto)
tf_export(v1=['profiler.OpLogProto'])(OpLogProto)
|
tensorflow-master
|
tensorflow/python/profiler/profiler.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Profiler for TensorFlow models that outputs data in pprof format.
See https://github.com/google/pprof/blob/master/proto/profile.proto for pprof
profile format.
The following needs to be set for profiler to work:
* trace_level needs to be set to FULL_TRACE
* run_metadata object should be passed in to session.run call
Sample usage:
options = tf.compat.v1.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.compat.v1.RunMetadata()
with tf.compat.v1.Session as sess:
...
sess.run(computation, run_metadata=run_metadata, options=options)
pprof_profiler.profile(sess.graph, run_metadata, output_dir)
The code above would output a pprof profile to separate output_dir/.*.pb.gz
file for each device. These files can be passed to pprof for formatting.
For e.g.:
pprof -png --nodecount=100 --sample_index=1 output_dir/profile_output.pb.gz
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
from collections import namedtuple
import gzip
import os
import string
import sys
import time
from proto import profile_pb2
if sys.version_info < (3,):
maketrans = string.maketrans
else:
maketrans = str.maketrans
ProfileDatum = namedtuple('ProfileDatum', [
'node_exec_stats', 'op_type', 'traceback'])
class StringTable(object):
"""Keeps track of strings to add to string_table in pprof proto."""
def __init__(self):
# Pprof requires first entry in string_table to be ''.
self._string_table = ['']
self._string_to_index = {'': 0}
def index_of(self, value_str):
"""Get index of value_str in the string table.
If value_str is not in the string table, we will add it at the end
and then return the new index.
Args:
value_str: (string) Value to lookup/add in/to the string table.
Returns:
Index of value_str in the string table.
"""
if value_str is None:
value_str = ''
if value_str in self._string_to_index:
return self._string_to_index[value_str]
index = len(self._string_table)
self._string_table.append(value_str)
self._string_to_index[value_str] = index
return index
def next_index(self):
"""Gets index that would be assigned to the next added string.
Returns:
Index of the next string if it was added.
"""
return len(self._string_table)
def string_table(self):
"""Returns a list of strings to store in pprof's string_table."""
return self._string_table
class Functions(object):
"""Keeps track of `Function` protos for pprof profile."""
def __init__(self, string_table):
"""Constructor.
Args:
string_table: A `StringTable` object.
"""
self._string_table = string_table
# Maps tuples in the form (file_path, function_name, start_line_number)
# to `Function` protos.
self._function_key_to_function = {}
def index_of(self, file_path, function_name, function_start_line):
"""Returns index of the function, adding the function if needed.
Args:
file_path: (string) Path to file where the function is defined.
function_name: (string) Function name.
function_start_line: (integer) Start line number of function definition.
Returns:
Function index.
"""
function_key = (file_path, function_name, function_start_line)
if function_key in self._function_key_to_function:
return self._function_key_to_function[function_key].id
else:
# Function indexes should start from 1
function_index = len(self._function_key_to_function) + 1
function = profile_pb2.Function()
function.id = function_index
function.name = self._string_table.index_of(function_name)
function.filename = self._string_table.index_of(file_path)
function.start_line = function_start_line
self._function_key_to_function[function_key] = function
return function_index
def function_protos(self):
"""Returns list of `profile_pb2.Function` protos."""
return self._function_key_to_function.values()
class Locations(object):
"""Keeps track of `Location` protos for pprof profile.
`Locations` store information about function call locations.
"""
def __init__(self, functions):
"""Constructor.
Args:
functions: A `Functions` object.
"""
self._functions = functions
# Maps tuples in the form (file_path, called_function_name, line_number)
# to `Location` protos.
self._location_key_to_location = {}
def index_of(
self, file_path, line_number, called_function_name, called_file_path,
called_function_start_line):
"""Returns index of the location, adding the location if needed.
Args:
file_path: (string) Path to file that makes the call.
line_number: (integer) Call line number.
called_function_name: (string) Function name of the function called at
`file_path` and `line_number`.
called_file_path: (string) Path to file where the called function is
defined.
called_function_start_line: (integer) Start line number of called
function definition in `called_file_path` file.
Returns:
Index of location.
"""
location_key = (file_path, called_function_name, line_number)
if location_key in self._location_key_to_location:
location = self._location_key_to_location[location_key]
return location.id
else:
# Location indexes should start from 1
location_index = len(self._location_key_to_location) + 1
location = profile_pb2.Location()
location.id = location_index
self._location_key_to_location[location_key] = location
line = location.line.add()
line.function_id = self._functions.index_of(
called_file_path, called_function_name, called_function_start_line)
line.line = line_number
return location_index
def location_protos(self):
"""Returns list of `profile_pb2.Location` protos."""
return self._location_key_to_location.values()
class Samples(object):
"""Keeps track of `Sample` protos for pprof profile.
Samples store the following statistics in order:
count, all_time, op_time
"""
def __init__(self, string_table):
"""Constructor.
Args:
string_table: A `StringTable` object.
"""
self._string_table = string_table
# TODO(annarev): figure out if location is unique for each node name.
# If not, also key this dictionary based on location ids.
self._node_name_to_sample = {}
def add(self, datum, location_ids):
"""Adds a sample data point.
Args:
datum: `ProfileDatum` to add a sample for.
location_ids: List of numberic location ids for this
sample.
"""
node_name = datum.node_exec_stats.node_name
if node_name in self._node_name_to_sample:
sample = self._node_name_to_sample[node_name]
sample.location_id.extend(location_ids)
else:
sample = profile_pb2.Sample()
# Sample stores 3 values: count, all_time, op_time
sample.value.extend([0, 0, 0])
label = sample.label.add()
label.key = self._string_table.index_of('node_name')
label.str = self._string_table.index_of(node_name)
label = sample.label.add()
label.key = self._string_table.index_of('op_type')
label.str = self._string_table.index_of(datum.op_type)
self._node_name_to_sample[node_name] = sample
sample.value[0] += 1
sample.value[1] += datum.node_exec_stats.all_end_rel_micros
sample.value[2] += (
datum.node_exec_stats.op_end_rel_micros -
datum.node_exec_stats.op_start_rel_micros)
def get_sample_protos(self):
"""Returns list of `Sample` protos for pprof profile."""
return self._node_name_to_sample.values()
class PprofProfiler(object):
"""Creates profiles in pprof format."""
def __init__(self, graph, run_metadata):
"""Constructor.
Args:
graph: A `Graph` instance.
run_metadata: A list of `RunMetadata` objects.
"""
self._graph = graph
self._run_metadata = run_metadata
self._string_table = StringTable()
self._functions = Functions(self._string_table)
self._locations = Locations(self._functions)
def profile(self):
"""Generates pprof profiles.
Returns:
Dictionary mapping from device name to proto in `profile_pb2.Profile`
format.
"""
profiles = {}
data_generator_func = self._get_profile_data_generator()
for device_index, device_stats in enumerate(
self._run_metadata.step_stats.dev_stats):
# Create profile
pprof_proto = self._get_pprof_proto(data_generator_func(device_stats))
if not pprof_proto.sample:
print(
'Not enough data to create profile for device %s. Did you pass '
'RunMetadata to session.run call?' % device_stats.device)
continue
# Add device name comment
device_count = len(self._run_metadata.step_stats.dev_stats)
device_description = (
'Device %d of %d: %s' %
(device_index + 1, device_count, device_stats.device))
device_description_str_index = self._string_table.next_index()
pprof_proto.string_table.append(device_description)
pprof_proto.comment.append(device_description_str_index)
profiles[device_stats.device] = pprof_proto
return profiles
def _get_pprof_proto(self, profile_datum_generator):
"""Returns profile data in pprof proto format.
Args:
profile_datum_generator: Generator outputting `ProfileDatum` objects.
Returns:
A proto in pprof format.
"""
pprof_profile = profile_pb2.Profile()
samples = Samples(self._string_table)
for datum in profile_datum_generator:
if not datum.traceback:
continue
stack_frame = datum.traceback[-1]
after_apply_op = False
location_ids = []
# We add locations from stack trace in bottom-up order.
for stack_frame_index in reversed(range(len(datum.traceback) - 1)):
prev_stack_frame = stack_frame
stack_frame = datum.traceback[stack_frame_index]
# Call at current frame calls function at previous frame.
prev_file_path = prev_stack_frame[0]
prev_function = prev_stack_frame[2]
prev_function_start_line = prev_stack_frame[4]
curr_file_path = stack_frame[0]
curr_line_number = stack_frame[1]
# Skip all calls up to apply_op since they are the same for all ops.
if not after_apply_op:
if prev_function == 'apply_op':
after_apply_op = True
continue
location_index = self._locations.index_of(
curr_file_path, curr_line_number,
prev_function, prev_file_path, prev_function_start_line)
location_ids.append(location_index)
samples.add(datum, location_ids)
sample_type_description = 'count'
sample_type = pprof_profile.sample_type.add()
sample_type.type = self._string_table.index_of(sample_type_description)
sample_type.unit = self._string_table.index_of('count')
sample_type_description = 'all_time'
sample_type = pprof_profile.sample_type.add()
sample_type.type = self._string_table.index_of(sample_type_description)
sample_type.unit = self._string_table.index_of('nanoseconds')
sample_type_description = 'op_time'
sample_type = pprof_profile.sample_type.add()
sample_type.type = self._string_table.index_of(sample_type_description)
sample_type.unit = self._string_table.index_of('nanoseconds')
pprof_profile.string_table.extend(self._string_table.string_table())
pprof_profile.sample.extend(samples.get_sample_protos())
pprof_profile.function.extend(self._functions.function_protos())
pprof_profile.location.extend(self._locations.location_protos())
return pprof_profile
def _get_profile_data_generator(self):
"""Get function that generates `ProfileDatum` objects.
Returns:
A function that generates `ProfileDatum` objects.
"""
node_to_traceback = defaultdict(list)
node_to_op_type = defaultdict(str)
for op in self._graph.get_operations():
node_to_traceback[op.name] = op.traceback_with_start_lines
node_to_op_type[op.name] = op.type
def profile_data_generator(device_step_stats):
for node_stats in device_step_stats.node_stats:
if node_stats.node_name == '_SOURCE' or node_stats.node_name == '_SINK':
continue
yield ProfileDatum(
node_stats,
node_to_op_type[node_stats.node_name],
node_to_traceback[node_stats.node_name])
return profile_data_generator
def get_profiles(graph, run_metadata):
"""Generate profiles in pprof format.
See https://github.com/google/pprof/blob/master/proto/profile.proto
for pprof proto format.
Args:
graph: A `Graph` object.
run_metadata: A `RunMetadata` proto.
Returns:
A dictionary mapping from device name to pprof proto for that device.
"""
return PprofProfiler(graph, run_metadata).profile()
def profile(graph, run_metadata, output_dir=None):
"""Generate profiles in pprof format.
See https://github.com/google/pprof/blob/master/proto/profile.proto
for pprof proto format.
Args:
graph: A `Graph` object.
run_metadata: A `RunMetadata` proto.
output_dir: (string) Directory to output pprof profile to.
Profile files for each device will be stored in compressed
serialized proto format. If output_dir is None, profile protos
will be printed to stdout instead.
Returns:
List of output files created by this profile call.
(Note: this list will be empty if output_dir is None)
"""
profiles = get_profiles(graph, run_metadata)
output_file_template = None
if output_dir:
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
time_suffix = time.strftime('%Y%m%d%H%M%S')
output_file_template = os.path.join(
output_dir, '%s_' + time_suffix + '.pb.gz')
profile_files = []
for device, pprof_proto in profiles.items():
if output_file_template is None:
print('No output directory specified, printing to stdout instead.')
print(pprof_proto)
else:
device_name = str(device).strip('/').translate(
maketrans('/:', '__'))
profile_file = output_file_template % device_name
profile_files.append(profile_file)
with gzip.open(profile_file, 'w') as output_file:
print('Writing profile to %s...' % profile_file)
output_file.write(pprof_proto.SerializeToString())
return profile_files
|
tensorflow-master
|
tensorflow/python/profiler/pprof_profiler.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model Analyzer.
Analyze model, including shape, params, time, memory, structure, etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from google.protobuf import message
from tensorflow.core.profiler import tfprof_options_pb2
from tensorflow.core.profiler import tfprof_output_pb2
from tensorflow.python import pywrap_tensorflow as print_mdl
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.profiler import option_builder
from tensorflow.python.profiler import tfprof_logger
from tensorflow.python.util.tf_export import tf_export
_DEFAULT_PROFILE_OPTIONS = 0
_DEFAULT_ADVISE_OPTIONS = 0
# The following options are for 'advise' cmd.
# Show all advice.
ALL_ADVICE = {
'ExpensiveOperationChecker': {},
'AcceleratorUtilizationChecker': {},
'JobChecker': {}, # Only available internally.
'OperationChecker': {},
}
def _graph_string(graph):
"""Helper to serialize a graph to string."""
if graph:
return graph.as_graph_def(add_shapes=True).SerializeToString()
else:
return b''
def _build_options(options):
"""Build tfprof.OptionsProto.
Args:
options: A dictionary of options.
Returns:
tfprof.OptionsProto.
"""
opts = tfprof_options_pb2.OptionsProto()
opts.max_depth = options.get('max_depth', 10)
opts.min_bytes = options.get('min_bytes', 0)
opts.min_peak_bytes = options.get('min_peak_bytes', 0)
opts.min_residual_bytes = options.get('min_residual_bytes', 0)
opts.min_output_bytes = options.get('min_output_bytes', 0)
opts.min_micros = options.get('min_micros', 0)
opts.min_accelerator_micros = options.get('min_accelerator_micros', 0)
opts.min_cpu_micros = options.get('min_cpu_micros', 0)
opts.min_params = options.get('min_params', 0)
opts.min_float_ops = options.get('min_float_ops', 0)
opts.min_occurrence = options.get('min_occurrence', 0)
opts.step = options.get('step', -1)
opts.order_by = options.get('order_by', 'name')
for p in options.get('account_type_regexes', []):
opts.account_type_regexes.append(p)
for p in options.get('start_name_regexes', []):
opts.start_name_regexes.append(p)
for p in options.get('trim_name_regexes', []):
opts.trim_name_regexes.append(p)
for p in options.get('show_name_regexes', []):
opts.show_name_regexes.append(p)
for p in options.get('hide_name_regexes', []):
opts.hide_name_regexes.append(p)
opts.account_displayed_op_only = options.get('account_displayed_op_only',
False)
for p in options.get('select', []):
opts.select.append(p)
opts.output = options.get('output', 'stdout')
opts.dump_to_file = options.get('dump_to_file', '')
return opts
def _build_advisor_options(options):
"""Build tfprof.AdvisorOptionsProto.
Args:
options: A dictionary of options. See ALL_ADVICE example.
Returns:
tfprof.AdvisorOptionsProto.
"""
opts = tfprof_options_pb2.AdvisorOptionsProto()
if options is None:
return opts
for checker, checker_opts in six.iteritems(options):
checker_ops_pb = tfprof_options_pb2.AdvisorOptionsProto.CheckerOption()
for k, v in six.iteritems(checker_opts):
checker_ops_pb[k] = v
opts.checkers[checker].MergeFrom(checker_ops_pb)
return opts
@tf_export(v1=['profiler.Profiler'])
class Profiler(object):
"""TensorFlow multi-step profiler.
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
```python
Typical use case:
# Currently we are only allowed to create 1 profiler per process.
profiler = Profiler(sess.graph)
for i in xrange(total_steps):
if i % 10000 == 0:
run_meta = tf.compat.v1.RunMetadata()
_ = sess.run(...,
options=tf.compat.v1.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_meta)
profiler.add_step(i, run_meta)
# Profile the parameters of your model.
profiler.profile_name_scope(options=(option_builder.ProfileOptionBuilder
.trainable_variables_parameter()))
# Or profile the timing of your model operations.
opts = option_builder.ProfileOptionBuilder.time_and_memory()
profiler.profile_operations(options=opts)
# Or you can generate a timeline:
opts = (option_builder.ProfileOptionBuilder(
option_builder.ProfileOptionBuilder.time_and_memory())
.with_step(i)
.with_timeline_output(filename).build())
profiler.profile_graph(options=opts)
else:
_ = sess.run(...)
# Auto detect problems and generate advice.
profiler.advise()
```
"""
def __init__(self, graph=None, op_log=None):
"""Constructor.
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define
extra op types.
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
self._coverage = 0.0
self._graph = graph
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
self._graph, op_log=op_log)
# pylint: enable=protected-access
print_mdl.NewProfiler(
_graph_string(self._graph), op_log.SerializeToString())
def __del__(self):
print_mdl.DeleteProfiler()
def add_step(self, step, run_meta):
"""Add statistics of a step.
Args:
step: int, An id used to group one or more different `run_meta` together.
When profiling with the profile_xxx APIs, user can use the `step`
id in the `options` to profile these `run_meta` together.
run_meta: RunMetadata proto that contains statistics of a session run.
"""
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
self._graph, run_meta=run_meta)
# pylint: enable=protected-access
# TODO(xpan): P1: Better to find the current graph.
self._coverage = print_mdl.AddStep(step, _graph_string(self._graph),
run_meta.SerializeToString(),
op_log.SerializeToString())
def profile_python(self, options):
"""Profile the statistics of the Python codes.
By default, it shows the call stack from root. To avoid
redundant output, you may use options to filter as below
options['show_name_regexes'] = ['.*my_code.py.*']
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('code'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_operations(self, options):
"""Profile the statistics of the Operation types (e.g. MatMul, Conv2D).
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('op'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_name_scope(self, options):
"""Profile the statistics of graph nodes, organized by name scope.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('scope'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_graph(self, options):
"""Profile the statistics of graph nodes, organized by dataflow graph.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('graph'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def advise(self, options):
"""Automatically detect problems and generate reports.
Args:
options: A dict of options. See ALL_ADVICE example above.
Returns:
A Advise proto that conains the reports from all checkers.
"""
advise_pb = tfprof_output_pb2.AdviceProto()
opts = _build_advisor_options(options)
advise_pb.ParseFromString(
print_mdl.Profile('advise'.encode('utf-8'), opts.SerializeToString()))
return advise_pb
def serialize_to_string(self):
"""Serialize the ProfileProto to a binary string.
Users can write it to file for offline analysis by tfprof commandline
or graphical interface.
Returns:
ProfileProto binary string.
"""
return print_mdl.SerializeToString()
def _write_profile(self, filename):
"""Writes the profile to a file."""
print_mdl.WriteProfile(filename)
@tf_export(v1=['profiler.profile'])
def profile(graph=None,
run_meta=None,
op_log=None,
cmd='scope',
options=_DEFAULT_PROFILE_OPTIONS):
"""Profile model.
Tutorials and examples can be found in:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
op_log: tensorflow.tfprof.OpLogProto proto. User can assign "types" to
graph nodes with op_log. "types" allow user to flexibly group and
account profiles using options['accounted_type_regexes'].
cmd: string. Either 'op', 'scope', 'graph' or 'code'.
'op' view organizes profile using operation type. (e.g. MatMul)
'scope' view organizes profile using graph node name scope.
'graph' view organizes profile using graph node inputs/outputs.
'code' view organizes profile using Python call stack.
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
If cmd is 'scope' or 'graph', returns GraphNodeProto proto.
If cmd is 'op' or 'code', returns MultiGraphNodeProto proto.
Side effect: stdout/file/timeline.json depending on options['output']
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
if options == _DEFAULT_PROFILE_OPTIONS:
options = (option_builder.ProfileOptionBuilder
.trainable_variables_parameter())
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
graph, op_log, run_meta, add_trace=cmd == 'code')
# pylint: enable=protected-access
opts = _build_options(options)
run_meta_str = run_meta.SerializeToString() if run_meta else b''
graph_str = _graph_string(graph)
if cmd == 'code' or cmd == 'op':
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
elif cmd == 'graph' or cmd == 'scope':
tfprof_node = tfprof_output_pb2.GraphNodeProto()
ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
else:
raise errors.InvalidArgumentError(
None, None, 'unknown cmd: %s\n' % cmd)
return tfprof_node
@tf_export(v1=['profiler.advise'])
def advise(graph=None, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):
"""Auto profile and advise.
Builds profiles and automatically check anomalies of various
aspects. For more details:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
options: see ALL_ADVICE example above. Default checks everything.
Returns:
Returns AdviceProto proto
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
if options == _DEFAULT_ADVISE_OPTIONS:
options = ALL_ADVICE.copy()
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
graph, None, run_meta, add_trace=True)
# pylint: enable=protected-access
run_meta_str = run_meta.SerializeToString() if run_meta else b''
opts = _build_advisor_options(options)
ret = tfprof_output_pb2.AdviceProto()
ret.ParseFromString(
print_mdl.PrintModelAnalysis(
_graph_string(graph), run_meta_str, op_log.SerializeToString(),
'advise'.encode('utf-8'), opts.SerializeToString()))
return ret
|
tensorflow-master
|
tensorflow/python/profiler/model_analyzer.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""print_model_analysis test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
# pylint: disable=bad-whitespace
# pylint: disable=bad-continuation
TEST_OPTIONS = {
'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'order_by': 'name',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['params'],
'output': 'stdout',
}
# pylint: enable=bad-whitespace
# pylint: enable=bad-continuation
class PrintModelAnalysisTest(test.TestCase):
def _BuildSmallModel(self):
image = array_ops.zeros([2, 6, 6, 3])
kernel = variable_scope.get_variable(
'DW', [6, 6, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
return x
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/profiler/internal/print_model_analysis_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""test the RunMetadata proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.profiler import option_builder
# pylint: disable=g-bad-import-order
# XXX: this depends on pywrap_tensorflow and must come later
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.profiler.internal import model_analyzer_testlib as lib
SIZE = 1300
builder = option_builder.ProfileOptionBuilder
def _extract_node(run_meta, node_name):
ret = defaultdict(list)
for dev_stat in run_meta.step_stats.dev_stats:
dev = dev_stat.device.lower()
if dev.find('cpu:') > 0:
dev = dev[dev.find('cpu:'):]
elif dev.find('gpu:') > 0:
dev = dev[dev.find('gpu:'):]
elif '/host:cpu' not in dev:
assert False, 'Unrecognized device name: %s' % dev
for node_stat in dev_stat.node_stats:
nname = node_stat.node_name
if nname.find(':') > 0:
nname = nname[:nname.find(':')]
if nname == node_name:
ret[dev].append(node_stat)
return ret
def _run_model():
x = random_ops.random_normal(shape=[1, SIZE])
w = random_ops.random_normal(shape=[SIZE, 2 * SIZE])
y = math_ops.matmul(x, w)
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
with session.Session(config=config) as sess:
run_metadata = config_pb2.RunMetadata()
opts = builder.time_and_memory()
opts['min_micros'] = 0
opts['min_bytes'] = 0
opts['order_by'] = 'name'
opts['output'] = 'none'
_ = sess.run(y,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_metadata)
tfprof_node = model_analyzer.profile(
sess.graph,
run_meta=run_metadata,
options=opts)
return tfprof_node, run_metadata
def _run_loop_model():
config = config_pb2.ConfigProto()
# Grappler might fuse MatMul with BiasAdd in remapper optimizer.
config.graph_options.rewrite_options.remapping = (
rewriter_config_pb2.RewriterConfig.OFF)
with session.Session(config=config) as sess:
x = lib.BuildFullModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
opts = builder.time_and_memory()
opts['order_by'] = 'name'
opts['output'] = 'none'
tfprof_node = model_analyzer.profile(
sess.graph, run_meta, options=opts)
return tfprof_node, run_meta
class RunMetadataTest(test.TestCase):
@test_util.run_deprecated_v1
def testGPU(self):
if not test.is_gpu_available(cuda_only=True):
return
gpu_dev = test.gpu_device_name()
ops.reset_default_graph()
with ops.device(gpu_dev):
tfprof_node, run_meta = _run_model()
self.assertEqual(tfprof_node.children[0].name, 'MatMul')
self.assertGreater(tfprof_node.children[0].exec_micros, 10)
ret = _extract_node(run_meta, 'MatMul')
self.assertEqual(len(ret['gpu:0']), 1)
self.assertEqual(len(ret['gpu:0/stream:all']), 1, '%s' % run_meta)
@test_util.run_deprecated_v1
def testAllocationHistory(self):
if not test.is_gpu_available(cuda_only=True):
return
gpu_dev = test.gpu_device_name()
ops.reset_default_graph()
with ops.device(gpu_dev):
_, run_meta = _run_model()
mm = _extract_node(run_meta, 'MatMul')['gpu:0'][0]
mm_allocs = mm.memory[0].allocation_records
# has allocation and deallocation.
self.assertEqual(len(mm_allocs), 2)
# first allocated.
self.assertGreater(mm_allocs[1].alloc_micros, mm_allocs[0].alloc_micros)
self.assertGreater(mm_allocs[0].alloc_bytes, 0)
# Then deallocated.
self.assertLess(mm_allocs[1].alloc_bytes, 0)
# All memory deallocated.
self.assertEqual(mm_allocs[0].alloc_bytes + mm_allocs[1].alloc_bytes, 0)
rand = _extract_node(
run_meta, 'random_normal/RandomStandardNormal')['gpu:0'][0]
random_allocs = rand.memory[0].allocation_records
# random normal must allocated first since matmul depends on it.
self.assertLess(random_allocs[0].alloc_micros, mm.all_start_micros)
# deallocates the memory after matmul started.
self.assertGreater(random_allocs[1].alloc_micros, mm.all_start_micros)
@test_util.run_deprecated_v1
def testCPU(self):
ops.reset_default_graph()
with ops.device('/cpu:0'):
tfprof_node, run_meta = _run_model()
self.assertEqual(tfprof_node.children[0].name, 'MatMul')
self.assertGreater(tfprof_node.children[0].exec_micros, 0)
ret = _extract_node(run_meta, 'MatMul')
self.assertEqual(len(ret['cpu:0']), 1)
ret = _extract_node(run_meta, 'MatMul:MatMul')
self.assertEqual(len(ret), 0)
@test_util.run_v1_only('b/120545219')
def testLoopCPU(self):
ops.reset_default_graph()
with ops.device('/cpu:0'):
tfprof_node, run_meta = _run_loop_model()
# The while-loop caused a node to appear 4 times in scheduling.
ret = _extract_node(run_meta,
'rnn/while/basic_rnn_cell/MatMul')
self.assertEqual(len(ret['cpu:0']), 4)
total_cpu_execs = 0
for node in ret['cpu:0']:
total_cpu_execs += node.op_end_rel_micros
mm_node = lib.SearchTFProfNode(
tfprof_node,
'rnn/while/basic_rnn_cell/MatMul')
self.assertEqual(mm_node.run_count, 4)
self.assertEqual(mm_node.cpu_exec_micros, total_cpu_execs)
self.assertEqual(mm_node.exec_micros, total_cpu_execs)
def testGradientGraph(self):
# Note: Please don't just adjust the test to make it pass.
# The code view logic depends on it.
ops.reset_default_graph()
_, _ = _run_loop_model()
graph = ops.get_default_graph()
forward_op = set()
backward_op = set()
back_to_forward = {}
for op in graph.get_operations():
if op.name.find('gradients/') > 0 and op.name.find('_grad/') > 0:
backward_op.add(op.name)
idx1 = op.name.find('gradients/') + 10
idx2 = op.name.find('_grad/')
back_to_forward[op.name] = op.name[idx1:idx2]
else:
forward_op.add(op.name)
for _, f in six.iteritems(back_to_forward):
self.assertTrue(f in forward_op)
def testLoopGPU(self):
if not test.is_gpu_available():
return
ops.reset_default_graph()
with ops.device('/device:GPU:0'):
_, run_meta = _run_loop_model()
# The while-loop caused a node to appear 4 times in scheduling.
ret = _extract_node(run_meta,
'rnn/while/basic_rnn_cell/MatMul')
self.assertEqual(len(ret['gpu:0']), 4, '%s' % run_meta)
total_cpu_execs = 0
for node in ret['gpu:0']:
total_cpu_execs += node.op_end_rel_micros
self.assertGreaterEqual(len(ret['gpu:0/stream:all']), 4, '%s' % run_meta)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/profiler/internal/run_metadata_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Register flops statistics for various TensorFlow operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
# List of all ops which have implemented flops statistics.
IMPLEMENTED_OPS = set([
# Unary ops
"Reciprocal", "Square", "Rsqrt", "Log", "Neg", "AssignSub", "AssignAdd",
"L2Loss", "Softmax",
# Binary ops
"Add", "Sub", "Mul", "RealDiv", "Maximum", "Minimum", "Pow", "RsqrtGrad",
"GreaterEqual", "Greater", "LessEqual", "Less", "Equal", "NotEqual",
"SquaredDifference",
# Reduction ops
"Mean", "Sum", "ArgMax", "ArgMin", "BiasAddGrad",
# Convolution and pooling
"AvgPool", "MaxPool", "AvgPoolGrad", "MaxPoolGrad", "Conv2DBackpropInput",
"Conv2DBackpropFilter",
# Other ops
"AddN",
# Ops implemented in core tensorflow:
"MatMul", "Conv2D", "DepthwiseConv2dNative", "BiasAdd", "Dilation2D",
])
def _zero_flops(graph, node):
"""Returns zero flops."""
del graph, node # graph and node are unused
return ops.OpStats("flops", 0)
def _list_product(lst):
"""Computes product of element of the list."""
result = 1
for item in lst:
result *= item
return result
################################################################################
# Unary operations
################################################################################
def _unary_op_flops(graph, node, ops_per_element=1):
"""Common code which compute flops for unary operations."""
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
return ops.OpStats("flops", in_shape.num_elements() * ops_per_element)
@ops.RegisterStatistics("Reciprocal", "flops")
def _reciprocal_flops(graph, node):
"""Compute flops for Reciprocal operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("Square", "flops")
def _square_flops(graph, node):
"""Compute flops for Square operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("Rsqrt", "flops")
def _rsqrt_flops(graph, node):
"""Compute flops for Rsqrt operation."""
# Rsqrt(x) = 1 / sqrt(x)
return _unary_op_flops(graph, node, ops_per_element=2)
@ops.RegisterStatistics("Log", "flops")
def _log_flops(graph, node):
"""Compute flops for Log operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("Neg", "flops")
def _neg_flops(graph, node):
"""Compute flops for Neg operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("AssignSub", "flops")
def _assign_sub_flops(graph, node):
"""Compute flops for AssignSub operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("AssignAdd", "flops")
def _assign_add_flops(graph, node):
"""Compute flops for AssignAdd operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("L2Loss", "flops")
def _l2_loss_flops(graph, node):
"""Compute flops for L2Loss operation."""
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
# Tensorflow uses inefficient implementation, with (3*N-1) flops:
# Optimal implementation is 2*N flops
return ops.OpStats("flops", in_shape.num_elements() * 3 - 1)
@ops.RegisterStatistics("Softmax", "flops")
def _softmax_flops(graph, node):
"""Compute flops for Softmax operation."""
# Softmax implenetation:
#
# Approximate flops breakdown:
# 2*n -- compute shifted logits
# n -- exp of shifted logits
# 2*n -- compute softmax from exp of shifted logits
return _unary_op_flops(graph, node, ops_per_element=5)
################################################################################
# Binary operations
################################################################################
def _binary_per_element_op_flops(graph, node, ops_per_element=1):
"""Common code which compute flops for binary operations."""
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
return ops.OpStats("flops", out_shape.num_elements() * ops_per_element)
@ops.RegisterStatistics("Add", "flops")
def _add_flops(graph, node):
"""Compute flops for Add operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Sub", "flops")
def _sub_flops(graph, node):
"""Compute flops for Sub operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Mul", "flops")
def _mul_flops(graph, node):
"""Compute flops for Mul operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("RealDiv", "flops")
def _real_div_flops(graph, node):
"""Compute flops for RealDiv operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Maximum", "flops")
def _maximum_flops(graph, node):
"""Compute flops for Maximum operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Minimum", "flops")
def _minimum_flops(graph, node):
"""Compute flops for Minimum operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Pow", "flops")
def _pow_flops(graph, node):
"""Compute flops for Pow operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("RsqrtGrad", "flops")
def _rsqrt_grad_flops(graph, node):
"""Compute flops for RsqrtGrad operation."""
return _binary_per_element_op_flops(graph, node, ops_per_element=4)
@ops.RegisterStatistics("GreaterEqual", "flops")
def _greater_equal_flops(graph, node):
"""Compute flops for GreaterEqual operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Greater", "flops")
def _greater_flops(graph, node):
"""Compute flops for Greater operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("LessEqual", "flops")
def _less_equal_flops(graph, node):
"""Compute flops for LessEqual operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Less", "flops")
def _less_flops(graph, node):
"""Compute flops for Less operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Equal", "flops")
def _equal_flops(graph, node):
"""Compute flops for Equal operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("NotEqual", "flops")
def _not_equal_flops(graph, node):
"""Compute flops for NotEqual operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("SquaredDifference", "flops")
def _squared_difference_flops(graph, node):
"""Compute flops for SquaredDifference operation."""
return _binary_per_element_op_flops(graph, node, ops_per_element=2)
################################################################################
# Reduction ops
################################################################################
def _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0):
"""Common code which compute flops for reduction operations."""
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
num_flops = (in_shape.num_elements() * reduce_flops
+ out_shape.num_elements() * (finalize_flops - reduce_flops))
return ops.OpStats("flops", num_flops)
@ops.RegisterStatistics("Mean", "flops")
def _mean_flops(graph, node):
"""Compute flops for Mean operation."""
# reduction - sum, finalization - divide
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=1)
@ops.RegisterStatistics("Sum", "flops")
def _sum_flops(graph, node):
"""Compute flops for Sum operation."""
# reduction - sum, no finalization
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
@ops.RegisterStatistics("ArgMax", "flops")
def _arg_max_flops(graph, node):
"""Compute flops for ArgMax operation."""
# reduction - comparison, no finalization
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
@ops.RegisterStatistics("ArgMin", "flops")
def _arg_min_flops(graph, node):
"""Compute flops for ArgMin operation."""
# reduction - comparison, no finalization
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
@ops.RegisterStatistics("BiasAddGrad", "flops")
def _bias_add_grad_flops(graph, node):
"""Compute flops for BiasAddGrad operation."""
# Implementation of BiasAddGrad, essentially it's a reduce sum and reshaping:
# So computing flops same way as for "Sum"
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
################################################################################
# Convolution and pooling
# Note: all flops statistics are implemented only for NHWC data format
################################################################################
def _verify_conv_data_format(node):
"""Verifies data format for pooling and convolutional operations."""
# TODO(xpan): P1: Support NCHW
if node.attr["data_format"].s != b"NHWC":
raise ValueError("Only NHWC format is supported in flops computations")
def _pool_flops(graph, node):
"""Common code which compute flops for pooling operations."""
# compute flops for average and max pooling
_verify_conv_data_format(node)
#
# Pooling declaration:
# Inputs:
# - value
# Outputs:
# - output
# Attributes:
# - ksize
# - strides
# - padding
# - data_format
#
# Pooling implenetation:
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
kernel_shape = list(node.attr["ksize"].list.i)
kernel_area = _list_product(kernel_shape)
return ops.OpStats("flops", kernel_area * out_shape.num_elements())
@ops.RegisterStatistics("AvgPool", "flops")
def _avg_pool_flops(graph, node):
"""Compute flops for AvgPool operation."""
return _pool_flops(graph, node)
@ops.RegisterStatistics("MaxPool", "flops")
def _max_pool_flops(graph, node):
"""Compute flops for MaxPool operation."""
return _pool_flops(graph, node)
@ops.RegisterStatistics("AvgPoolGrad", "flops")
def _avg_pool_grad_flops(graph, node):
"""Compute flops for AvgPoolGrad operation."""
_verify_conv_data_format(node)
# Pooling gradient implementation:
out_backprop_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
out_backprop_shape.assert_is_fully_defined()
kernel_shape = list(node.attr["ksize"].list.i)
kernel_area = _list_product(kernel_shape)
# TensorFlow multiply each element of pooling window by coefficient,
# then sum up all of them, thus we have 2 flops per element:
# More optimal implementation - if division is done after.
return ops.OpStats("flops",
kernel_area * out_backprop_shape.num_elements() * 2)
@ops.RegisterStatistics("MaxPoolGrad", "flops")
def _max_pool_grad_flops(graph, node):
"""Compute flops for MaxPoolGrad operation."""
_verify_conv_data_format(node)
#
# MaxPoolGrad declaration:
# Inputs:
# - orig_input -- original input tensor (of max_pool)
# - orig_output -- original output tensor (of max_pool)
# - grad -- gradient with respect to output of max_pool
# Outputs:
# - output -- gradient with respect to input of max_pool
# Attributes:
# - ksize
# - strides
# - padding
# - data_format
# It computes MaxPool first, then one flop per each element of original output
#
kernel_shape = list(node.attr["ksize"].list.i)
kernel_area = _list_product(kernel_shape)
orig_out_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
orig_out_shape.assert_is_fully_defined()
max_pool_ops = kernel_area * orig_out_shape.num_elements()
return ops.OpStats("flops", max_pool_ops + orig_out_shape.num_elements())
@ops.RegisterStatistics("Conv2DBackpropInput", "flops")
def _conv_2d_backprop_input_flops(graph, node):
"""Compute flops for Conv2DBackpropInput operation."""
# Formula:
# batch_size * image_x_dim * image_y_dim * kernel_x_dim * kernel_y_dim
# * input_depth * output_depth * 2 / (image_x_stride * image_x_stride)
#
# Where:
# image_x_dim, image_y_dim and input_depth --- size of input to source (no
# backprop) convolution, in other words they are sizes of backprop output.
# output_depth --- number of filters in the original convolution, thus
# depth of backprop input.
# kernel_x_dim and kernel_y_dim --- sizes of filter in spatial dimension
# image_x_stride and image_x_stride --- strides of the convolution
#
_verify_conv_data_format(node)
# out_shape = [batch_size, image_y_dim, image_x_dim, input_depth]
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
# kernel_shape = [kernel_y_dim, kernel_x_dim, input_depth, output_depth]
kernel_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
kernel_shape.assert_is_fully_defined()
# strides
strides_shape = list(node.attr["strides"].list.i)
strides_product = strides_shape[1] * strides_shape[2]
return ops.OpStats("flops",
(2 * out_shape.num_elements()
* kernel_shape.num_elements()
/ (out_shape.dims[-1].value * strides_product)))
@ops.RegisterStatistics("Conv2DBackpropFilter", "flops")
def _conv_2d_backprop_filter_flops(graph, node):
"""Compute flops for Conv2DBackpropFilter operation."""
# Formula same as for Conv2DBackpropInput:
# batch_size * image_x_dim * image_y_dim * kernel_x_dim * kernel_y_dim
# * input_depth * output_depth * 2 / (image_x_stride * image_x_stride)
#
_verify_conv_data_format(node)
# image_shape = [batch_size, image_y_dim, image_x_dim, input_depth]
image_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
image_shape.assert_is_fully_defined()
# kernel_shape = [kernel_y_dim, kernel_x_dim, input_depth, output_depth]
kernel_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
kernel_shape.assert_is_fully_defined()
# strides
strides_shape = list(node.attr["strides"].list.i)
strides_product = strides_shape[1] * strides_shape[2]
return ops.OpStats("flops",
(2 * image_shape.num_elements()
* kernel_shape.num_elements()
/ (image_shape.dims[-1].value * strides_product)))
################################################################################
# Other ops
################################################################################
@ops.RegisterStatistics("AddN", "flops")
def _add_n_flops(graph, node):
"""Compute flops for AddN operation."""
if not node.input:
return _zero_flops(graph, node)
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
return ops.OpStats("flops", in_shape.num_elements() * (len(node.input) - 1))
|
tensorflow-master
|
tensorflow/python/profiler/internal/flops_registry.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A test lib that defines some models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python import pywrap_tensorflow as print_mdl
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import variable_scope
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import compat
def BuildSmallModel():
"""Build a small forward conv model."""
image = array_ops.zeros([2, 6, 6, 3])
_ = variable_scope.get_variable(
'ScalarW', [],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
kernel = variable_scope.get_variable(
'DW', [3, 3, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
kernel = variable_scope.get_variable(
'DW2', [2, 2, 6, 12],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(x, kernel, [1, 2, 2, 1], padding='SAME')
return x
def BuildFullModel():
"""Build the full model with conv,rnn,opt."""
seq = []
for i in range(4):
with variable_scope.variable_scope('inp_%d' % i):
seq.append(array_ops.reshape(BuildSmallModel(), [2, 1, -1]))
cell = rnn_cell.BasicRNNCell(16)
out = rnn.dynamic_rnn(
cell, array_ops.concat(seq, axis=1), dtype=dtypes.float32)[0]
target = array_ops.ones_like(out)
loss = nn_ops.l2_loss(math_ops.reduce_mean(target - out))
sgd_op = gradient_descent.GradientDescentOptimizer(1e-2)
return sgd_op.minimize(loss)
def BuildSplitableModel():
"""Build a small model that can be run partially in each step."""
image = array_ops.zeros([2, 6, 6, 3])
kernel1 = variable_scope.get_variable(
'DW', [3, 3, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
r1 = nn_ops.conv2d(image, kernel1, [1, 2, 2, 1], padding='SAME')
kernel2 = variable_scope.get_variable(
'DW2', [2, 3, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
r2 = nn_ops.conv2d(image, kernel2, [1, 2, 2, 1], padding='SAME')
r3 = r1 + r2
return r1, r2, r3
def SearchTFProfNode(node, name):
"""Search a node in the tree."""
if node.name == name:
return node
for c in node.children:
r = SearchTFProfNode(c, name)
if r: return r
return None
@contextlib.contextmanager
def ProfilerFromFile(profile_file):
"""Initialize a profiler from profile file."""
print_mdl.ProfilerFromFile(compat.as_bytes(profile_file))
profiler = model_analyzer.Profiler.__new__(model_analyzer.Profiler)
yield profiler
print_mdl.DeleteProfiler()
def CheckAndRemoveDoc(profile):
assert 'Doc:' in profile
start_pos = profile.find('Profile:')
return profile[start_pos + 9:]
|
tensorflow-master
|
tensorflow/python/profiler/internal/model_analyzer_testlib.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu.topology import Topology
from tensorflow.python.util.tf_export import tf_export
SINGLE_CORE_ASSIGNMENT = [[[0, 0, 0]]]
def _compute_task_and_cores_to_replicas(core_assignment, topology):
"""Computes a nested dict which maps task and logical core to replicas."""
task_and_cores_to_replicas = {}
for replica in xrange(core_assignment.shape[0]):
for logical_core in xrange(core_assignment.shape[1]):
coordinates = core_assignment[replica, logical_core, :]
task_id = topology.task_ordinal_at_coordinates(coordinates)
if task_id not in task_and_cores_to_replicas:
task_and_cores_to_replicas[task_id] = {}
if logical_core not in task_and_cores_to_replicas[task_id]:
task_and_cores_to_replicas[task_id][logical_core] = set()
task_and_cores_to_replicas[task_id][logical_core].add(replica)
task_to_sorted_replica_id = {}
for task, core_to_replicas in task_and_cores_to_replicas.items():
core_to_sorted_replicas = {}
for core, replicas in core_to_replicas.items():
core_to_sorted_replicas[core] = sorted(replicas)
task_to_sorted_replica_id[task] = core_to_sorted_replicas
return task_to_sorted_replica_id
@tf_export("tpu.experimental.DeviceAssignment")
class DeviceAssignment(object):
"""Mapping from logical cores in a computation to the physical TPU topology.
Prefer to use the `DeviceAssignment.build()` helper to construct a
`DeviceAssignment`; it is easier if less flexible than constructing a
`DeviceAssignment` directly.
"""
def __init__(self, topology, core_assignment):
"""Constructs a `DeviceAssignment` object.
Args:
topology: A `Topology` object that describes the physical TPU topology.
core_assignment: A logical to physical core mapping, represented as a
rank 3 numpy array. See the description of the `core_assignment`
property for more details.
Raises:
ValueError: If `topology` is not `Topology` object.
ValueError: If `core_assignment` is not a rank 3 numpy array.
"""
if not isinstance(topology, Topology):
raise ValueError("topology must be a Topology object, got {}".format(
type(topology)))
core_assignment = np.asarray(core_assignment, dtype=np.int32)
self._topology = topology
if core_assignment.ndim != 3:
raise ValueError("core_assignment must be a rank 3 numpy array, "
"got shape {}".format(core_assignment.shape))
self._num_replicas = core_assignment.shape[0]
self._num_cores_per_replica = core_assignment.shape[1]
if core_assignment.shape[-1] != topology.mesh_rank:
raise ValueError(
"minor dimension of core_assignment must have size equal to topology "
"rank ({}), got shape {}".format(topology.mesh_rank,
core_assignment.shape))
self._core_assignment = core_assignment
self._task_and_cores_to_replicas = _compute_task_and_cores_to_replicas(
self._core_assignment, topology)
@property
def topology(self):
"""A `Topology` that describes the TPU topology."""
return self._topology
@property
def num_cores_per_replica(self):
"""The number of cores per replica."""
return self._num_cores_per_replica
@property
def num_replicas(self):
"""The number of replicas of the computation."""
return self._num_replicas
@property
def core_assignment(self):
"""The logical to physical core mapping.
Returns:
An integer numpy array of rank 3, with shape
`[num_replicas, num_cores_per_replica, topology_rank]`. Maps
(replica, logical core) pairs to physical topology coordinates.
"""
return self._core_assignment
def coordinates(self, replica, logical_core):
"""Returns the physical topology coordinates of a logical core."""
return tuple(self.core_assignment[replica, logical_core, :])
def lookup_replicas(self, task_id, logical_core):
"""Lookup replica ids by task number and logical core.
Args:
task_id: TensorFlow task number.
logical_core: An integer, identifying a logical core.
Returns:
A sorted list of the replicas that are attached to that task and
logical_core.
Raises:
ValueError: If no replica exists in the task which contains the logical
core.
"""
try:
return self._task_and_cores_to_replicas[task_id][logical_core]
except KeyError:
raise ValueError(
"Can not find any replica in task: {} contains logical_core: {} ".
format(task_id, logical_core))
def tpu_ordinal(self, replica=0, logical_core=0):
"""Returns the ordinal of the TPU device assigned to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_ordinal_at_coordinates(coordinates)
def host_device(self, replica=0, logical_core=0, job=None):
"""Returns the CPU device attached to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.cpu_device_name_at_coordinates(coordinates, job=job)
def tpu_device(self, replica=0, logical_core=0, job=None):
"""Returns the name of the TPU device assigned to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_name_at_coordinates(coordinates, job=job)
@staticmethod
def build(topology,
computation_shape=None,
computation_stride=None,
num_replicas=1):
return device_assignment(topology, computation_shape, computation_stride,
num_replicas)
def _ring_2d(height, width):
"""Ring-order of a height x width mesh.
For example, in a 4x4 mesh, this returns the following order.
0 -- 1 -- 2 -- 3
| | | |
15-- 6 -- 5 -- 4
| | | |
14-- 7 -- 8 -- 9
| | | |
13-- 12-- 11-- 10
Args:
height: An integer represents the height.
width: An integer represents the width.
Returns:
A list of [y, x] pairs with ring order.
"""
if height == 1:
return [(0, i) for i in range(width)]
if width == 1:
return [(i, 0) for i in range(height)]
if height % 2 != 0:
logging.warning("Odd dimension")
return [(i % height, i // height) for i in range(width * height)]
ret = [(0, 0)]
for i in range(height // 2):
for j in range(1, width):
ret.append((2 * i, j))
for j in range(width - 1, 0, -1):
ret.append((2 * i + 1, j))
for i in range(height - 1, 0, -1):
ret.append((i, 0))
return ret
def device_assignment(topology,
computation_shape=None,
computation_stride=None,
num_replicas=1):
"""Computes a device_assignment of a computation across a TPU topology.
Attempts to choose a compact grid of cores for locality.
Returns a `DeviceAssignment` that describes the cores in the topology assigned
to each core of each replica.
`computation_shape` and `computation_stride` values should be powers of 2 for
optimal packing.
Args:
topology: A `Topology` object that describes the TPU cluster topology.
To obtain a TPU topology, evaluate the `Tensor` returned by
`initialize_system` using `Session.run`. Either a serialized
`TopologyProto` or a `Topology` object may be passed. Note: you must
evaluate the `Tensor` first; you cannot pass an unevaluated `Tensor` here.
computation_shape: A rank 1 int32 numpy array with size equal to the
topology rank, describing the shape of the computation's block of cores.
If None, the `computation_shape` is `[1] * topology_rank`.
computation_stride: A rank 1 int32 numpy array of size `topology_rank`,
describing the inter-core spacing of the `computation_shape` cores in the
TPU topology. If None, the `computation_stride` is `[1] * topology_rank`.
num_replicas: The number of computation replicas to run. The replicas will
be packed into the free spaces of the topology.
Returns:
A DeviceAssignment object, which describes the mapping between the logical
cores in each computation replica and the physical cores in the TPU
topology.
Raises:
ValueError: If `topology` is not a valid `Topology` object.
ValueError: If `computation_shape` or `computation_stride` are not 1D int32
numpy arrays with shape [3] where all values are positive.
ValueError: If computation's replicas cannot fit into the TPU topology.
"""
# Deserialize the Topology proto, if it is a string.
if isinstance(topology, bytes):
topology = Topology(serialized=topology)
if not isinstance(topology, Topology):
raise ValueError("`topology` is not a Topology object; got {}".format(
type(topology)))
topology_rank = len(topology.mesh_shape)
mesh_shape = topology.mesh_shape
if computation_shape is None:
computation_shape = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_shape = np.asarray(computation_shape, dtype=np.int32)
if computation_stride is None:
computation_stride = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_stride = np.asarray(computation_stride, dtype=np.int32)
if computation_shape.shape != (topology_rank,):
raise ValueError("computation_shape must have shape [{}]; got {}".format(
topology_rank, computation_shape.shape))
if computation_stride.shape != (topology_rank,):
raise ValueError("computation_stride must have shape [{}]; got {}".format(
topology_rank, computation_stride.shape))
if any(computation_shape < 1):
raise ValueError(
"computation_shape must be positive; got computation_shape={}".format(
computation_shape))
if any(computation_stride < 1):
raise ValueError(
"computation_stride must be positive; got computation_stride={}".format(
computation_stride))
# Computes the physical size of one computation instance.
computation_footprint = computation_shape * computation_stride
if any(computation_footprint > mesh_shape):
raise ValueError(
"computation footprint {} does not fit in TPU topology shape {}".format(
computation_footprint, mesh_shape))
# Computes how many copies of the computation footprint fit in the mesh.
block_counts = mesh_shape // computation_footprint
replica_counts = block_counts * computation_stride
max_replicas = np.prod(replica_counts)
if num_replicas > max_replicas:
raise ValueError(
"requested {} replicas but only {} replicas with shape {} and "
"computation_stride {} fit in a TPU mesh of shape {}".format(
num_replicas, max_replicas, computation_shape, computation_stride,
mesh_shape))
def ceil_of_ratio(n, m):
return (n + m - 1) // m
replica_shape = [0] * topology_rank
if num_replicas > 0:
remaining_replicas = num_replicas
remaining_dims = topology_rank
# Choose dimensions as close to an equal cube as possible, in order of
# increasing dimension size. By visiting dimensions in increasing size, we
# assign the most constrained dimension first, so we won't make infeasible
# choices.
#
# As a secondary sort order, visit the dimensions in reverse order. This
# means we try to use both cores on the same chip in preference to two cores
# on different chips.
for x, ni in sorted(((x, -i) for (i, x) in enumerate(replica_counts))):
i = -ni
target_size = int(math.ceil(remaining_replicas**(1.0 / remaining_dims)))
replica_shape[i] = min(target_size, x)
remaining_replicas = ceil_of_ratio(remaining_replicas, replica_shape[i])
remaining_dims -= 1
assert remaining_replicas == 1 and remaining_dims == 0
# Assigns an offset to each replica such that no two replicas overlap.
replica_offsets = np.full([num_replicas, topology_rank], -1, dtype=np.int32)
# TODO(ylc): Revisit here when topology_rank > 3.
enable_2d_tiling = (
topology_rank == 3 and
computation_shape[-1] == 2 # Only handle 2D case.
and np.prod(computation_stride) == 1 # Ensure no stride.
and num_replicas == max_replicas) # Full replication.
logging.info("enable_2d_tiling: {}".format(enable_2d_tiling))
if enable_2d_tiling:
assignment = []
inner_ring = _ring_2d(computation_shape[0], computation_shape[1])
outer_ring = _ring_2d(replica_shape[0], replica_shape[1])
for replica in xrange(num_replicas):
outer_x, outer_y = outer_ring[replica]
per_replica_assignment = []
for index in xrange(np.prod(computation_shape)):
inner_x, inner_y = inner_ring[index // 2]
px = outer_x * computation_shape[0] + inner_x
py = outer_y * computation_shape[1] + inner_y
pz = index % 2
per_replica_assignment.append([px, py, pz])
assignment.append(per_replica_assignment)
else:
for replica in xrange(num_replicas):
# Chooses a replica number in each axis.
t = replica
pos = []
for dim in replica_shape[::-1]:
pos.append(t % dim)
t //= dim
replica_pos = np.array(pos[::-1], dtype=np.int32)
# Determines where that replica starts in each axis.
outer = replica_pos // computation_stride
inner = replica_pos % computation_stride
replica_offsets[replica, :] = outer * computation_footprint + inner
# Computes a logical core -> physical core mapping for each replica.
indices = [
np.arange(0, computation_shape[i] * computation_stride[i],
computation_stride[i]) for i in xrange(topology_rank)
]
indices = np.concatenate(
[i[..., np.newaxis] for i in np.meshgrid(*indices, indexing="ij")],
axis=-1)
indices = indices.reshape((-1, topology_rank))
assignment = indices + replica_offsets[:, np.newaxis, :]
return DeviceAssignment(topology, core_assignment=assignment)
|
tensorflow-master
|
tensorflow/python/tpu/device_assignment.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU specific APIs to be used in conjunction with TPU Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.distribute.cluster_resolver import TPUClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import topology
from tensorflow.python.tpu import tpu
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
_INITIALIZED_TPU_SYSTEMS = {}
_LOCAL_MASTERS = ("", "local")
@tf_export("tpu.experimental.initialize_tpu_system")
def initialize_tpu_system(cluster_resolver=None):
"""Initialize the TPU devices.
Args:
cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
Returns:
The tf.tpu.Topology object for the topology of the TPU cluster.
Raises:
RuntimeError: If no TPU devices found for eager execution.
"""
if cluster_resolver is None:
cluster_resolver = TPUClusterResolver("")
assert isinstance(cluster_resolver, TPUClusterResolver)
tpu_name = compat.as_text(cluster_resolver._tpu) # pylint: disable=protected-access
if tpu_name in _INITIALIZED_TPU_SYSTEMS:
logging.warning("TPU system %s has already been initialized. "
"Reinitializing the TPU can cause previously created "
"variables on TPU to be lost.")
logging.info("Initializing the TPU system: %s", tpu_name)
if context.executing_eagerly():
# This function looks as it is for the following non-intuitive reasons.
# tpu.initialize_system creates a dummy op whose sole purpose is to trigger
# DistributedTPURewritePass. This pass actually adds real ops that
# initialize the TPU system. Thus, we can't simply run tpu.initialize_system
# eagerly. We need to wrap it in defun and trigger the rewrite passes on it.
job = None
if tpu_name not in _LOCAL_MASTERS:
# Explicitly place the tpu.initialize_system in the first worker to
# avoid the output node match multiple devices error.
job = "{}/replica:0/task:0".format(cluster_resolver.get_job_name())
@function.defun
def _tpu_init_fn():
return tpu.initialize_system(job=job)
# The TPU_SYSTEM device must match the device used in tpu.initialize_system
# exactly, otherwise you can get errors if there are multiple TPU_SYSTEM
# devices available.
with ops.device(tpu._tpu_system_device_name(job)): # pylint: disable=protected-access
output = _tpu_init_fn()
# Clear out the eager context caches since the memory is invalid now.
logging.info("Clearing out eager caches")
context.context()._clear_caches() # pylint: disable=protected-access
serialized_topology = output.numpy()
else:
master = cluster_resolver.master()
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
with ops.Graph().as_default():
with session_lib.Session(config=session_config, target=master) as sess:
serialized_topology = sess.run(tpu.initialize_system())
logging.info("Finished initializing TPU system.")
tpu_topology = topology.Topology(serialized=serialized_topology)
_INITIALIZED_TPU_SYSTEMS[tpu_name] = tpu_topology
return tpu_topology
|
tensorflow-master
|
tensorflow/python/tpu/tpu_strategy_util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU embedding APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import math
import re
import six
from tensorflow.core.protobuf.tpu import optimization_parameters_pb2
from tensorflow.core.protobuf.tpu import tpu_embedding_configuration_pb2 as elc
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.python.tpu.ops import tpu_ops
TRAINING = elc.TPUEmbeddingConfiguration.TRAINING
INFERENCE = elc.TPUEmbeddingConfiguration.INFERENCE
class TableConfig(
collections.namedtuple(
'TableConfig',
['vocabulary_size', 'dimension', 'initializer', 'combiner'])):
"""Embedding table configuration."""
def __new__(cls,
vocabulary_size,
dimension,
initializer=None,
combiner='mean'):
"""Embedding table configuration.
Args:
vocabulary_size: Number of vocabulary (/rows) in the table.
dimension: The embedding dimension.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and standard
deviation `1/sqrt(dimension)`.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn', 'sum' and None are
supported, with 'mean' the default. 'sqrtn' often achieves good
accuracy, in particular with bag-of-words columns. For more information,
see `tf.nn.embedding_lookup_sparse`. None is only valid for dense rather
than sparse tensors.
Returns:
`TableConfig`.
Raises:
ValueError: if `vocabulary_size` is not positive integer.
ValueError: if `dimension` is not positive integer.
ValueError: if `initializer` is specified and is not callable.
ValueError: if `combiner` is not supported.
"""
if not isinstance(vocabulary_size, int) or vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size {}.'.format(vocabulary_size))
if not isinstance(dimension, int) or dimension < 1:
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
if combiner not in ('mean', 'sum', 'sqrtn', None):
raise ValueError('Invalid combiner {}'.format(combiner))
return super(TableConfig, cls).__new__(cls, vocabulary_size, dimension,
initializer, combiner)
class FeatureConfig(
collections.namedtuple(
'FeatureConfig',
['table_id', 'max_sequence_length', 'weight_key'])):
"""Feature configuration."""
def __new__(cls,
table_id,
max_sequence_length=0,
weight_key=None):
"""Feature configuration.
Args:
table_id: Which table the feature is uses for embedding lookups.
max_sequence_length: If positive, the feature is a sequence feature with
the corresponding maximum sequence length. If the sequence is longer
than this, it will be truncated. If 0, the feature is not a sequence
feature.
weight_key: If using weights for the combiner, this key specifies which
input feature contains the weights.
Returns:
`FeatureConfig`.
Raises:
ValueError: if `max_sequence_length` non-negative.
"""
if not isinstance(max_sequence_length, int) or max_sequence_length < 0:
raise ValueError('Invalid max_sequence_length {}.'.format(
max_sequence_length))
return super(FeatureConfig, cls).__new__(cls, table_id, max_sequence_length,
weight_key)
class EnqueueData(
collections.namedtuple(
'EnqueueData',
['embedding_indices', 'sample_indices', 'aggregation_weights'])):
"""Data to be enqueued through generate_enqueue_ops()."""
def __new__(cls,
embedding_indices,
sample_indices=None,
aggregation_weights=None):
"""Data to be enqueued through generate_enqueue_ops().
Args:
embedding_indices: A rank 1 Tensors, indices into the embedding tables. It
corresponds to sp_ids.values in embedding_lookup_sparse(). Both int32
and int64 are allowed and will be converted to int32 internally.
sample_indices: A rank 2 Tensors specifying the training example to which
the corresponding embedding_indices and aggregation_weights values
belong. It corresponds to sp_ids.indices in embedding_lookup_sparse().
If it is None, we assume each embedding_indices belongs to a different
sample. Both int32 and int64 are allowed and will be converted to int32
internally.
aggregation_weights: A rank 1 Tensors containing aggregation weights.
It corresponds to sp_weights.values in embedding_lookup_sparse(). If it
is None, we assume all weights are 1. Both float32 and float64 are
allowed and will be converted to float32 internally.
Returns:
An EnqueueData tuple.
"""
return super(EnqueueData, cls).__new__(cls, embedding_indices,
sample_indices, aggregation_weights)
@staticmethod
def from_sparse_tensor(sp_tensor, weights=None):
return EnqueueData(
sp_tensor.values,
sp_tensor.indices,
aggregation_weights=weights.values if weights is not None else None)
def get_enqueue_datas_list_from_sparse_tensors_list(sp_tensors_list):
"""Convenient function for generate_enqueue_ops().
Args:
sp_tensors_list: a list of dictionary mapping from string of feature names
to SparseTensor. Each dictionary is for one TPU core. Dictionaries for the
same host should be contiguous on the list.
Returns:
enqueue_datas_list: a list of dictionary mapping from string
of feature names to EnqueueData. Each dictionary is for one
TPU core. Dictionaries for the same host should be contiguous
on the list.
"""
enqueue_datas_list = []
for sp_tensors in sp_tensors_list:
enqueue_datas = collections.OrderedDict(
(k, EnqueueData.from_sparse_tensor(v))
for k, v in six.iteritems(sp_tensors))
enqueue_datas_list.append(enqueue_datas)
return enqueue_datas_list
AdamSlotVariableNames = collections.namedtuple(
'AdamSlotVariableNames', ['m', 'v'])
AdagradSlotVariableName = collections.namedtuple(
'AdagradSlotVariableName', ['accumulator'])
AdamSlotVariables = collections.namedtuple(
'AdamSlotVariables', ['m', 'v'])
AdagradSlotVariable = collections.namedtuple(
'AdagradSlotVariable', ['accumulator'])
VariablesAndOps = collections.namedtuple(
'VariablesAndOps',
['embedding_variables_by_table', 'slot_variables_by_table',
'load_ops', 'retrieve_ops']
)
class _OptimizationParameters(object):
"""Parameters common to all optimizations."""
def __init__(self, learning_rate, use_gradient_accumulation,
clip_weight_min, clip_weight_max):
self.learning_rate = learning_rate
self.use_gradient_accumulation = use_gradient_accumulation
self.clip_weight_min = clip_weight_min
self.clip_weight_max = clip_weight_max
class AdagradParameters(_OptimizationParameters):
"""Optimization parameters for Adagrad."""
def __init__(self,
learning_rate,
initial_accumulator=0.1,
use_gradient_accumulation=True,
clip_weight_min=None,
clip_weight_max=None):
"""Optimization parameters for Adagrad.
Args:
learning_rate: used for updating embedding table.
initial_accumulator: initial accumulator for Adagrad.
use_gradient_accumulation: setting this to `False` makes embedding
gradients calculation less accurate but faster. Please see
`optimization_parameters.proto` for details.
for details.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
"""
super(AdagradParameters,
self).__init__(learning_rate, use_gradient_accumulation,
clip_weight_min, clip_weight_max)
if initial_accumulator <= 0:
raise ValueError('Adagrad initial_accumulator must be positive')
self.initial_accumulator = initial_accumulator
class AdamParameters(_OptimizationParameters):
"""Optimization parameters for Adam."""
def __init__(self,
learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=1e-08,
lazy_adam=True,
sum_inside_sqrt=True,
use_gradient_accumulation=True,
clip_weight_min=None,
clip_weight_max=None):
"""Optimization parameters for Adam.
Args:
learning_rate: a floating point value. The learning rate.
beta1: A float value.
The exponential decay rate for the 1st moment estimates.
beta2: A float value.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability.
lazy_adam: Use lazy Adam instead of Adam. Lazy Adam trains faster.
Please see `optimization_parameters.proto` for details.
sum_inside_sqrt: This improves training speed. Please see
`optimization_parameters.proto` for details.
use_gradient_accumulation: setting this to `False` makes embedding
gradients calculation less accurate but faster. Please see
`optimization_parameters.proto` for details.
for details.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
"""
super(AdamParameters,
self).__init__(learning_rate, use_gradient_accumulation,
clip_weight_min, clip_weight_max)
if beta1 < 0. or beta1 >= 1.:
raise ValueError('beta1 must be between 0. and 1; got {}.'.format(beta1))
if beta2 < 0. or beta2 >= 1.:
raise ValueError('beta2 must be between 0. and 1; got {}.'.format(beta2))
if epsilon <= 0.:
raise ValueError('epsilon must be positive; got {}.'.format(epsilon))
if not use_gradient_accumulation and not lazy_adam:
raise ValueError(
'When disabling Lazy Adam, gradient accumulation must be used.')
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.lazy_adam = lazy_adam
self.sum_inside_sqrt = sum_inside_sqrt
class StochasticGradientDescentParameters(_OptimizationParameters):
"""Optimization parameters for stochastic gradient descent."""
def __init__(self, learning_rate, clip_weight_min=None,
clip_weight_max=None):
"""Optimization parameters for stochastic gradient descent.
Args:
learning_rate: a floating point value. The learning rate.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
"""
super(StochasticGradientDescentParameters,
self).__init__(learning_rate, False, clip_weight_min, clip_weight_max)
DeviceConfig = collections.namedtuple('DeviceConfig',
['num_hosts', 'num_cores', 'job_name'])
class TPUEmbedding(object):
"""API for using TPU for embedding.
Example:
```
table_config_user = tpu_embedding.TableConfig(
vocabulary_size=4, dimension=2,
initializer=initializer, combiner='mean')
table_to_config_dict = {'video': table_config_video,
'user': table_config_user}
feature_to_config_dict = {'watched': tpu_embedding.FeatureConfig('video'),
'favorited': tpu_embedding.FeatureConfig('video'),
'friends': tpu_embedding.FeatureConfig('user')}
batch_size = 4
num_hosts = 1
optimization_parameters = tpu_embedding.AdagradParameters(1., 1.)
mode = tpu_embedding.TRAINING
embedding = tpu_embedding.TPUEmbedding(
table_to_config_dict, feature_to_config_dict,
batch_size, num_hosts, mode, optimization_parameters)
batch_size_per_core = embedding.batch_size_per_core
sparse_features_list = []
for host in hosts:
with ops.device(host):
for _ in range(embedding.num_cores_per_host):
sparse_features = {}
sparse_features['watched'] = sparse_tensor.SparseTensor(...)
sparse_features['favorited'] = sparse_tensor.SparseTensor(...)
sparse_features['friends'] = sparse_tensor.SparseTensor(...)
sparse_features_list.append(sparse_features)
enqueue_ops = embedding.generate_enqueue_ops(sparse_features_list)
embedding_variables_and_ops = embedding.create_variables_and_ops()
def computation():
activations = embedding.get_activations()
loss = compute_loss(activations)
base_optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=1)
cross_shard_optimizer = tpu_optimizer.CrossShardOptimizer(
base_optimizer)
train_op = cross_shard_optimizer.minimize(loss)
gradients = (
tpu_embedding_gradient.get_gradients_through_compute_gradients(
cross_shard_optimizer, loss, activations)
send_gradients_op = embedding.generate_send_gradients_op(gradients)
with ops.control_dependencies([train_op, send_gradients_op]):
loss = array_ops.identity(loss)
loss = tpu.shard(computation,
num_shards=embedding.num_cores)
with self.test_session() as sess:
sess.run(tpu.initialize_system(embedding_config=
embedding.config_proto))
sess.run(variables.global_variables_initializer())
sess.run(embedding_variables_and_ops.load_ops())
sess.run(enqueue_ops)
loss_val = sess.run(loss)
```
"""
# TODO(shizhiw): Consider addign a field to FeatureConfig that indicates that
# the feature should not be used to update embedding table (cr/204852758,
# cr/204940540). Also, this can support different combiners for different
# features within the same table.
# TODO(shizhiw, b/118512626): Remove `batch_size` from `__init__` and move it
# to `FeatureConfig`?
# TODO(shizhiw): will it be cleaner to make `table_to_config_dict` and
# `feature_to_config_dict` lists of `TableSpec` and `FeatureSpec`
# respectively?
# TODO(shizhiw): Consider adding `input_fn` as an option to remove boilerplate
# for-loops around construction of inputs.
# `optimization_parameter` applies to all tables. If the need arises,
# we can add `optimization_parameters` to `TableConfig` to override this
# global setting.
def __init__(self,
table_to_config_dict,
feature_to_config_dict,
batch_size,
mode,
master=None,
optimization_parameters=None,
cluster_def=None,
pipeline_execution_with_tensor_core=False,
partition_strategy='div',
device_config=None):
"""API for using TPU for embedding lookups.
Args:
table_to_config_dict: A dictionary mapping from string of table name to
`TableConfig`. Table refers to an embedding table, e.g. `params`
argument to `tf.nn.embedding_lookup_sparse()`.
feature_to_config_dict: A dictionary mapping from string of feature name
to `FeatureConfig`. Feature refers to ids to lookup in embedding table,
e.g. `sp_ids` argument to `tf.nn.embedding_lookup_sparse()`.
batch_size: An `int` representing the global batch size.
mode: `TRAINING` or `INFERENCE`.
master: A `string` representing the TensorFlow master to use.
optimization_parameters: `AdagradParameters`, `AdamParameters`,
`Stochasticgradientdescentparameters`. Must be set in training and must
be `None` in inference.
cluster_def: A ClusterDef object describing the TPU cluster.
pipeline_execution_with_tensor_core: setting this to `True` makes training
faster, but trained model will be different if step N and step N+1
involve the same set of embedding IDs. Please see
`tpu_embedding_configuration.proto` for details.
partition_strategy: A string, either 'mod' or 'div', specifying how to map
the lookup id to the embedding tensor. For more information see
`tf.nn.embedding_lookup_sparse`.
device_config: A DeviceConfig instance, used when `master` and
`cluster_def` are both `None`.
Raises:
ValueError: if any input is invalid.
"""
if partition_strategy not in ('div', 'mod'):
raise ValueError(
'Invalid partition_strategy {}'.format(partition_strategy))
self._partition_strategy = partition_strategy
_validate_table_to_config_dict(table_to_config_dict)
# Avoid nondeterminism from `Dict` iteration order by using `OrderedDict`.
self._table_to_config_dict = _create_ordered_dict(table_to_config_dict)
_validate_feature_to_config_dict(table_to_config_dict,
feature_to_config_dict)
self._feature_to_config_dict = _create_ordered_dict(feature_to_config_dict)
self._table_to_features_dict, self._table_to_num_features_dict = (
_create_table_to_features_and_num_features_dicts(
self._feature_to_config_dict))
self._combiners = _create_combiners(self._table_to_config_dict,
self._table_to_features_dict)
self._batch_size = batch_size
if master is None and cluster_def is None:
if device_config is None:
raise ValueError('When master and cluster_def are both None,'
'device_config must be set but is not.')
if device_config.num_cores % device_config.num_hosts:
raise ValueError('num_hosts ({}) should divide num_cores ({}) '
'but does not.'.format(device_config.num_cores,
device_config.num_hosts))
self._num_hosts = device_config.num_hosts
self._num_cores = device_config.num_cores
self._num_cores_per_host = self._num_cores // self._num_hosts
self._hosts = [
'{}/replica:0/task:{}/device:CPU:0'.format(device_config.job_name, i)
for i in range(self._num_hosts)
]
else:
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata( # pylint: disable=protected-access
master,
cluster_def=cluster_def))
if tpu_system_metadata.num_cores == 0:
raise ValueError('TPUEmbedding needs TPUs, but master {} does not have '
'TPUs.'.format(master))
self._num_hosts = tpu_system_metadata.num_hosts
master_job_name = tpu_system_metadata_lib.master_job(master, cluster_def)
self._hosts = []
for device in tpu_system_metadata.devices:
if 'device:CPU:' in device.name and (
master_job_name is None or master_job_name in device.name):
self._hosts.append(device.name)
self._num_cores_per_host = tpu_system_metadata.num_of_cores_per_host
self._num_cores = tpu_system_metadata.num_cores
_validate_batch_size(self._batch_size, self._num_cores)
self._batch_size_per_core = self._batch_size // self._num_cores
# TODO(shizhiw): remove `mode`?
if mode == TRAINING:
_validate_optimization_parameters(optimization_parameters)
self._optimization_parameters = optimization_parameters
elif mode == INFERENCE:
if optimization_parameters is not None:
raise ValueError('`optimization_parameters` should be `None` '
'for inference mode.')
self._optimization_parameters = (
StochasticGradientDescentParameters(1.))
else:
raise ValueError('`mode` only supports {} and {}; got {}.'
.format(TRAINING, INFERENCE, mode))
self._mode = mode
# TODO(shizhiw): move `optimization_parameters` into `_optimizer_handler`
# and create special handler for inference that inherits from
# StochasticGradientDescentHandler with more user-friendly error message
# on get_slot().
self._optimizer_handler = _get_optimization_handler(
self._optimization_parameters)
self._pipeline_execution_with_tensor_core = (
pipeline_execution_with_tensor_core)
self._config_proto = self._create_config_proto()
@property
def hosts(self):
"""A list of device names for CPU hosts.
Returns:
A list of device names for CPU hosts.
"""
return copy.copy(self._hosts)
# TODO(shizhiw): change to num_tensor_cores_per_host to be more explicit and
# to be consistent with `tpu_embedding_configuration.proto`.
@property
def num_cores_per_host(self):
"""Number of TPU cores on a CPU host.
Returns:
Number of TPU cores on a CPU host.
"""
return self._num_cores_per_host
@property
def num_cores(self):
"""Total number of TPU cores on all hosts.
Returns:
Total number of TPU cores on all hosts.
"""
return self._num_cores
@property
def batch_size_per_core(self):
"""Batch size for each TPU core.
The sparse tensors in `sparse_features_list` to `generate_enqueue_ops`
must have batch dimension equal to this.
Returns:
Batch size for each TPU core.
"""
return self._batch_size_per_core
@property
def config_proto(self):
"""Create embedding config proto for `tpu.initialize_system()`.
Returns:
an `TPUEmbeddingConfiguration` proto describing the desired
configuration of the hardware embedding lookup tables, which
is passed to `tpu.initialize_system()`.
"""
return self._config_proto
@property
def table_to_config_dict(self):
return copy.copy(self._table_to_config_dict)
@property
def feature_to_config_dict(self):
return copy.copy(self._feature_to_config_dict)
@property
def table_to_features_dict(self):
return copy.copy(self._table_to_features_dict)
@property
def optimization_parameters(self):
return self._optimization_parameters
def _create_config_proto(self):
"""Create `TPUEmbeddingConfiguration`."""
config_proto = elc.TPUEmbeddingConfiguration()
for table in self._table_to_config_dict:
table_descriptor = config_proto.table_descriptor.add()
table_descriptor.name = table
table_config = self._table_to_config_dict[table]
# For small tables, we pad to the number of hosts so that at least one
# id will be assigned to each host.
table_descriptor.vocabulary_size = max(table_config.vocabulary_size,
len(self.hosts))
table_descriptor.dimension = table_config.dimension
table_descriptor.num_features = self._table_to_num_features_dict[table]
table_descriptor.optimization_parameters.learning_rate.constant = (
self._optimization_parameters.learning_rate)
table_descriptor.optimization_parameters.gradient_accumulation_status = (
optimization_parameters_pb2.GradientAccumulationStatus.ENABLED
if self._optimization_parameters.use_gradient_accumulation else
optimization_parameters_pb2.GradientAccumulationStatus.DISABLED)
if self._optimization_parameters.clip_weight_min is not None:
table_descriptor.optimization_parameters.clipping_limits.lower.value = (
self._optimization_parameters.clip_weight_min)
if self._optimization_parameters.clip_weight_max is not None:
table_descriptor.optimization_parameters.clipping_limits.upper.value = (
self._optimization_parameters.clip_weight_max)
self._optimizer_handler.set_optimization_parameters(table_descriptor)
config_proto.mode = self._mode
config_proto.batch_size_per_tensor_core = self._batch_size_per_core
config_proto.num_hosts = self._num_hosts
config_proto.num_tensor_cores = self._num_cores
config_proto.sharding_strategy = (
elc.TPUEmbeddingConfiguration.DIV_DEFAULT
if self._partition_strategy == 'div' else
elc.TPUEmbeddingConfiguration.MOD)
config_proto.pipeline_execution_with_tensor_core = (
self._pipeline_execution_with_tensor_core)
return config_proto
def create_variables_and_ops(self, embedding_variable_name_by_table=None,
slot_variable_names_by_table=None):
"""Create embedding and slot variables, with ops to load and retrieve them.
Args:
embedding_variable_name_by_table: A dictionary mapping from string of
table name to string of embedding variable name. If `None`,
defaults from `get_default_slot_variable_names()` will be used.
slot_variable_names_by_table: A dictionary mapping from string of table
name to `AdamSlotVariableNames`, `AdagradSlotVariableNames` etc. If
`None`, defaults from `get_default_slot_variable_names()` will be used.
Returns:
`tpu_embedding.VariablesAndOps` with:
A dictionary mapping from string of table name to embedding variables,
A dictionary mapping from string of table name to AdagradSlotVariable,
AdamSlotVariables etc with slot variables,
A function which returns a list of ops to load embedding and slot
variables from TPU to CPU.
A function which returns a list of ops to retrieve embedding and slot
variables from TPU to CPU.
"""
embedding_variables_by_table = {}
slot_variables_by_table = {}
load_op_fns = []
retrieve_op_fns = []
for table in self._table_to_config_dict:
if embedding_variable_name_by_table:
embedding_variable_name = embedding_variable_name_by_table[table]
else:
embedding_variable_name = table
if slot_variable_names_by_table:
slot_variable_names = slot_variable_names_by_table[table]
else:
slot_variable_names = (
self._optimizer_handler.get_default_slot_variable_names(table))
device_fn = _create_device_fn(self._hosts)
with ops.device(device_fn):
table_variables = _create_partitioned_variables(
name=embedding_variable_name,
num_hosts=self._num_hosts,
vocabulary_size=self._table_to_config_dict[table].vocabulary_size,
embedding_dimension=self._table_to_config_dict[table].dimension,
initializer=self._table_to_config_dict[table].initializer,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
embedding_variables_by_table[table] = table_variables
slot_variables_for_table, load_ops_fn, retrieve_ops_fn = (
self._optimizer_handler.create_variables_and_ops(
table, slot_variable_names, self._num_hosts,
self._table_to_config_dict[table], table_variables)
)
slot_variables_by_table[table] = slot_variables_for_table
load_op_fns.append(load_ops_fn)
retrieve_op_fns.append(retrieve_ops_fn)
def load_ops():
"""Calls and returns the load ops for each embedding table.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_ops_list = []
for load_op_fn in load_op_fns:
load_ops_list.extend(load_op_fn())
return load_ops_list
def retrieve_ops():
"""Calls and returns the retrieve ops for each embedding table.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_ops_list = []
for retrieve_op_fn in retrieve_op_fns:
retrieve_ops_list.extend(retrieve_op_fn())
return retrieve_ops_list
return VariablesAndOps(embedding_variables_by_table,
slot_variables_by_table,
load_ops, retrieve_ops)
def generate_enqueue_ops(self, enqueue_datas_list):
"""Generate enqueue ops.
Args:
enqueue_datas_list: a list of dictionary mapping from string
of feature names to EnqueueData. Each dictionary is for one
TPU core. Dictionaries for the same host should be contiguous
on the list.
Returns:
Ops to enqueue to TPU for embedding.
"""
self._validate_generate_enqueue_ops_enqueue_datas_list(enqueue_datas_list)
return [
self._generate_enqueue_op(
enqueue_datas, device_ordinal=i % self._num_cores_per_host)
for i, enqueue_datas in enumerate(enqueue_datas_list)
]
def _validate_generate_enqueue_ops_enqueue_datas_list(self,
enqueue_datas_list):
"""Validate `enqueue_datas_list`."""
feature_set = set(self._feature_to_config_dict.keys())
contiguous_device = None
for i, enqueue_datas in enumerate(enqueue_datas_list):
used_feature_set = set(enqueue_datas.keys())
# Check features are valid.
missing_feature_set = feature_set - used_feature_set
if missing_feature_set:
raise ValueError('`enqueue_datas_list[{}]` misses a feature that is '
'in `feature_to_config_dict`: {}.'.format(
i, missing_feature_set))
extra_feature_set = used_feature_set - feature_set
if extra_feature_set:
raise ValueError('`enqueue_datas_list[{}]` has a feature that is not '
'in `feature_to_config_dict`: {}.'.format(
i, extra_feature_set))
device = None
device_feature = None
for feature, enqueue_data in six.iteritems(enqueue_datas):
combiner = self._table_to_config_dict[
self._feature_to_config_dict[feature].table_id].combiner
if not isinstance(enqueue_data, EnqueueData):
raise ValueError('`enqueue_datas_list[{}]` has a feature that is '
'not mapped to `EnqueueData`. `feature`: {}'.format(
i, feature))
if enqueue_data.sample_indices is None and combiner:
raise ValueError('`enqueue_datas_list[{}]` has a feature that has '
'neither `EnqueueData` or `combiner`.'
'`feature`: {}, combiner: {}.'.format(
i, feature, combiner))
if (enqueue_data.sample_indices is not None and
enqueue_data.sample_indices.op.device !=
enqueue_data.embedding_indices.op.device):
raise ValueError(
'Device of sample_indices does not agree with '
'that of emebdding_indices for feature {}.'.format(feature))
if (enqueue_data.aggregation_weights is not None and
enqueue_data.aggregation_weights.op.device !=
enqueue_data.embedding_indices.op.device):
raise ValueError(
'Device of aggregation_weights does not agree with '
'that of emebdding_indices for feature {}.'.format(feature))
# Check all features are on the same device.
if device is None:
device = enqueue_data.embedding_indices.op.device
device_feature = feature
else:
if device != enqueue_data.embedding_indices.op.device:
raise ValueError('Devices are different between features in '
'`enqueue_datas_list[{}]`; '
'devices: {}, {}; features: {}, {}.'.format(
i, device,
enqueue_data.embedding_indices.op.device,
feature, device_feature))
if i % self._num_cores_per_host:
if device != contiguous_device:
raise ValueError('We expect the `enqueue_datas` which are on the '
'same host to be contiguous in '
'`enqueue_datas_list`, '
'`enqueue_datas_list[{}]` is on device {}, '
'but is expected to be on device {}.'.format(
i, device, contiguous_device))
else:
contiguous_device = device
def _generate_enqueue_op(self, enqueue_datas, device_ordinal):
enqueue_data0 = list(enqueue_datas.values())[0]
with ops.colocate_with(enqueue_data0.embedding_indices):
(sample_indices_list, embedding_indices_list, aggregation_weights_list,
table_ids, max_sequence_lengths) = (
self._format_for_tpu_embedding_sparse_tensor_batch(enqueue_datas))
return tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch(
sample_indices_list,
embedding_indices_list,
aggregation_weights_list,
table_ids,
device_ordinal=device_ordinal,
combiners=self._combiners,
max_sequence_lengths=max_sequence_lengths)
def _format_for_tpu_embedding_sparse_tensor_batch(self, enqueue_datas):
"""Format sparse features for `enqueue_tpu_embedding_sparse_tensor_batch()`.
Args:
enqueue_datas: a `Dict` of tensors for embedding. Can be sparse or
dense.
Returns:
Arguments for `enqueue_tpu_embedding_sparse_tensor_batch()`.
"""
(sample_indices_list, embedding_indices_list, aggregation_weights_list,
table_ids, max_sequence_lengths) = [], [], [], [], []
for table_id, table in enumerate(self._table_to_features_dict):
features = self._table_to_features_dict[table]
for feature in features:
enqueue_data = enqueue_datas[feature]
sample_indices = (
enqueue_data.sample_indices
if enqueue_data.sample_indices is not None else array_ops.zeros(
(0,), dtype=dtypes.int32))
sample_indices_list.append(sample_indices)
aggregation_weights = (
enqueue_data.aggregation_weights if
enqueue_data.aggregation_weights is not None else array_ops.zeros(
(0,), dtype=dtypes.float32))
aggregation_weights_list.append(aggregation_weights)
embedding_indices_list.append(enqueue_data.embedding_indices)
table_ids.append(table_id)
max_sequence_lengths.append(
self._feature_to_config_dict[feature].max_sequence_length)
return (sample_indices_list, embedding_indices_list,
aggregation_weights_list, table_ids, max_sequence_lengths)
def get_activations(self):
"""Get activations for features.
This should be called within `computation` that is passed to
`tpu.replicate` and friends.
Returns:
A dictionary mapping from `String` of feature name to `Tensor`
of activation.
"""
recv_activations = tpu_ops.recv_tpu_embedding_activations(
num_outputs=len(self._table_to_config_dict),
config=self._config_proto.SerializeToString())
activations = collections.OrderedDict()
for table_id, table in enumerate(self._table_to_features_dict):
features = self._table_to_features_dict[table]
num_features = self._table_to_num_features_dict[table]
feature_index = 0
table_activations = array_ops.reshape(
recv_activations[table_id],
[self.batch_size_per_core, num_features, -1])
for feature in features:
seq_length = self._feature_to_config_dict[feature].max_sequence_length
if not seq_length:
activations[feature] = table_activations[:, feature_index, :]
feature_index = feature_index + 1
else:
activations[feature] = (
table_activations[:, feature_index:(feature_index+seq_length), :])
feature_index = feature_index + seq_length
return activations
def generate_send_gradients_op(self, feature_to_gradient_dict):
"""Send gradient to TPU embedding.
Args:
feature_to_gradient_dict: dict mapping feature names to gradient wrt
activations.
Returns:
SendTPUEmbeddingGradients Op.
Raises:
RuntimeError: If `mode` is not `TRAINING`.
"""
if self._mode != TRAINING:
raise RuntimeError('Only in training mode gradients need to '
'be sent to TPU embedding; got mode {}.'
.format(self._mode))
gradients = []
for table in self._table_to_features_dict:
features = self._table_to_features_dict[table]
table_gradients = []
for feature in features:
gradient = feature_to_gradient_dict[feature]
# Expand dims for non-sequence feature to match sequence features.
if gradient.shape.ndims == 2:
gradient = array_ops.expand_dims(gradient, 1)
table_gradients.append(gradient)
interleaved_table_grads = array_ops.reshape(
array_ops.concat(table_gradients, axis=1),
[-1, array_ops.shape(table_gradients[0])[-1]])
gradients.append(interleaved_table_grads)
return tpu_ops.send_tpu_embedding_gradients(
inputs=gradients, config=self.config_proto.SerializeToString())
def _validate_table_to_config_dict(table_to_config_dict):
"""Validate `table_to_config_dict`."""
for k, v in six.iteritems(table_to_config_dict):
if not isinstance(v, TableConfig):
raise ValueError('Value of `table_to_config_dict` must be of type '
'`TableConfig`, got {} for {}.'.format(type(v), k))
def _validate_feature_to_config_dict(table_to_config_dict,
feature_to_config_dict):
"""Validate `feature_to_config_dict`."""
used_table_set = set([feature.table_id
for feature in feature_to_config_dict.values()])
table_set = set(table_to_config_dict.keys())
unused_table_set = table_set - used_table_set
if unused_table_set:
raise ValueError('`table_to_config_dict` specifies table that is not '
'used in `feature_to_config_dict`: {}.'
.format(unused_table_set))
extra_table_set = used_table_set - table_set
if extra_table_set:
raise ValueError('`feature_to_config_dict` refers to a table that is not '
'specified in `table_to_config_dict`: {}.'
.format(extra_table_set))
def _validate_batch_size(batch_size, num_cores):
if batch_size % num_cores:
raise ValueError('`batch_size` is not a multiple of number of '
'cores. `batch_size`={}, `_num_cores`={}.'.format(
batch_size, num_cores))
def _validate_optimization_parameters(optimization_parameters):
if not isinstance(optimization_parameters, _OptimizationParameters):
raise ValueError('`optimization_parameters` must inherit from '
'`_OptimizationPramaters`. '
'`type(optimization_parameters)`={}'.format(
type(optimization_parameters)))
class _OptimizerHandler(object):
"""Interface class for handling optimizer specific logic."""
def __init__(self, optimization_parameters):
self._optimization_parameters = optimization_parameters
def set_optimization_parameters(self, table_descriptor):
raise NotImplementedError()
def get_default_slot_variable_names(self, table):
raise NotImplementedError()
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
raise NotImplementedError()
class _AdagradHandler(_OptimizerHandler):
"""Handles Adagrad specific logic."""
def __init__(self, optimization_parameters):
super(_AdagradHandler, self).__init__(optimization_parameters)
self._table_to_accumulator_variables_dict = {}
def set_optimization_parameters(self, table_descriptor):
table_descriptor.optimization_parameters.adagrad.SetInParent()
def get_default_slot_variable_names(self, table):
return AdagradSlotVariableName('{}/{}'.format(table, 'Adagrad'))
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
accumulator_initializer = init_ops.constant_initializer(
self._optimization_parameters.initial_accumulator)
accumulator_variables = _create_partitioned_variables(
name=slot_variable_names.accumulator,
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=accumulator_initializer)
slot_variables = AdagradSlotVariable(accumulator_variables)
def load_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_op_list = []
for host_id, table_variable, accumulator_variable in (zip(
range(num_hosts), table_variables, accumulator_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops.load_tpu_embedding_adagrad_parameters(
parameters=table_variable,
accumulators=accumulator_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
return load_op_list
def retrieve_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_op_list = []
for host_id, table_variable, accumulator_variable in (zip(
range(num_hosts), table_variables, accumulator_variables)):
with ops.colocate_with(table_variable):
retrieved_table, retrieved_accumulator = (
tpu_ops.retrieve_tpu_embedding_adagrad_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table),
state_ops.assign(accumulator_variable, retrieved_accumulator))
retrieve_op_list.append(retrieve_parameters_op)
return retrieve_op_list
return slot_variables, load_ops_fn, retrieve_ops_fn
class _AdamHandler(_OptimizerHandler):
"""Handles Adam specific logic."""
def __init__(self, optimization_parameters):
super(_AdamHandler, self).__init__(optimization_parameters)
self._table_to_m_variables_dict = {}
self._table_to_v_variables_dict = {}
def set_optimization_parameters(self, table_descriptor):
table_descriptor.optimization_parameters.adam.beta1 = (
self._optimization_parameters.beta1)
table_descriptor.optimization_parameters.adam.beta2 = (
self._optimization_parameters.beta2)
table_descriptor.optimization_parameters.adam.epsilon = (
self._optimization_parameters.epsilon)
table_descriptor.optimization_parameters.adam.use_non_lazy_adam = (
not self._optimization_parameters.lazy_adam)
table_descriptor.optimization_parameters.adam.use_sum_inside_sqrt = (
self._optimization_parameters.sum_inside_sqrt)
def get_default_slot_variable_names(self, table):
return AdamSlotVariableNames('{}/{}/m'.format(table, 'Adam'),
'{}/{}/v'.format(table, 'Adam'))
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
m_initializer = init_ops.zeros_initializer()
m_variables = _create_partitioned_variables(
name=slot_variable_names.m,
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=m_initializer)
v_initializer = init_ops.zeros_initializer()
v_variables = _create_partitioned_variables(
name=slot_variable_names.v,
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=v_initializer)
slot_variables = AdamSlotVariables(m_variables, v_variables)
def load_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_op_list = []
for host_id, table_variable, m_variable, v_variable in (zip(
range(num_hosts), table_variables,
m_variables, v_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops.load_tpu_embedding_adam_parameters(
parameters=table_variable,
momenta=m_variable,
velocities=v_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
return load_op_list
def retrieve_ops_fn():
"""Returns the retrieve ops for Adam embedding tables.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_op_list = []
for host_id, table_variable, m_variable, v_variable in (zip(
range(num_hosts), table_variables,
m_variables, v_variables)):
with ops.colocate_with(table_variable):
retrieved_table, retrieved_m, retrieved_v = (
tpu_ops.retrieve_tpu_embedding_adam_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table),
state_ops.assign(m_variable, retrieved_m),
state_ops.assign(v_variable, retrieved_v))
retrieve_op_list.append(retrieve_parameters_op)
return retrieve_op_list
return slot_variables, load_ops_fn, retrieve_ops_fn
class _StochasticGradientDescentHandler(_OptimizerHandler):
"""Handles stochastic gradient descent specific logic."""
def set_optimization_parameters(self, table_descriptor):
(table_descriptor.optimization_parameters.stochastic_gradient_descent
.SetInParent())
def get_default_slot_variable_names(self, table):
return None
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
del table_config
def load_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_op_list = []
for host_id, table_variable in (zip(
range(num_hosts), table_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops
.load_tpu_embedding_stochastic_gradient_descent_parameters(
parameters=table_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
return load_op_list
def retrieve_ops_fn():
"""Returns the retrieve ops for SGD embedding tables.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_op_list = []
for host_id, table_variable in (zip(
range(num_hosts), table_variables)):
with ops.colocate_with(table_variable):
retrieved_table = (
tpu_ops
.retrieve_tpu_embedding_stochastic_gradient_descent_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table))
retrieve_op_list.append(retrieve_parameters_op)
return retrieve_op_list
return None, load_ops_fn, retrieve_ops_fn
def _get_optimization_handler(optimization_parameters):
if isinstance(optimization_parameters, AdagradParameters):
return _AdagradHandler(optimization_parameters)
elif isinstance(optimization_parameters, AdamParameters):
return _AdamHandler(optimization_parameters)
elif isinstance(optimization_parameters, StochasticGradientDescentParameters):
return _StochasticGradientDescentHandler(optimization_parameters)
else:
return NotImplementedError()
def _create_ordered_dict(d):
"""Create an OrderedDict from Dict."""
return collections.OrderedDict((k, d[k]) for k in sorted(d))
def _create_combiners(table_to_config_dict, table_to_features_dict):
"""Create a per feature list of combiners, ordered by table."""
combiners = []
for table in table_to_config_dict:
combiner = table_to_config_dict[table].combiner or 'sum'
combiners.extend([combiner] * len(table_to_features_dict[table]))
return combiners
def _create_table_to_features_and_num_features_dicts(feature_to_config_dict):
"""Create mapping from table to a list of its features."""
table_to_features_dict_tmp = {}
table_to_num_features_dict_tmp = {}
for feature, feature_config in six.iteritems(feature_to_config_dict):
if feature_config.table_id in table_to_features_dict_tmp:
table_to_features_dict_tmp[feature_config.table_id].append(feature)
else:
table_to_features_dict_tmp[feature_config.table_id] = [feature]
table_to_num_features_dict_tmp[feature_config.table_id] = 0
if feature_config.max_sequence_length == 0:
table_to_num_features_dict_tmp[feature_config.table_id] = (
table_to_num_features_dict_tmp[feature_config.table_id] + 1)
else:
table_to_num_features_dict_tmp[feature_config.table_id] = (
table_to_num_features_dict_tmp[feature_config.table_id] +
feature_config.max_sequence_length)
table_to_features_dict = collections.OrderedDict()
table_to_num_features_dict = collections.OrderedDict()
for table in sorted(table_to_features_dict_tmp):
table_to_features_dict[table] = sorted(table_to_features_dict_tmp[table])
table_to_num_features_dict[table] = table_to_num_features_dict_tmp[table]
return table_to_features_dict, table_to_num_features_dict
def _create_device_fn(hosts):
"""Create device_fn() to use with _create_partitioned_variables()."""
def device_fn(op):
"""Returns the `device` for `op`."""
part_match = re.match(r'.*/part_(\d+)(/|$)', op.name)
dummy_match = re.match(r'.*dummy_(\d+).*', op.name)
if not part_match and not dummy_match:
raise RuntimeError(
'Internal Error: Expected {} to contain /part_* or dummy_*'.format(
op.name))
if part_match:
idx = int(part_match.group(1))
else:
idx = int(dummy_match.group(1))
device = hosts[idx]
logging.debug('assigning {} to {}.', op, device)
return device
return device_fn
def _create_partitioned_variables(name,
num_hosts,
vocabulary_size,
embedding_dimension,
initializer,
collections=None): # pylint: disable=redefined-outer-name
"""Creates ParitionedVariables based on `num_hosts` for `table`."""
num_slices = min(vocabulary_size, num_hosts)
var_list = list(
variable_scope.get_variable(
name,
shape=(vocabulary_size, embedding_dimension),
partitioner=partitioned_variables.fixed_size_partitioner(num_slices),
dtype=dtypes.float32,
initializer=initializer,
collections=collections,
trainable=False))
if vocabulary_size >= num_hosts:
return var_list
# For padded part, define the dummy variable to be loaded into TPU system.
for idx in range(num_hosts - vocabulary_size):
var_list.append(
variable_scope.get_variable(
'dummy_{}_{}'.format(vocabulary_size + idx, name),
shape=(1, embedding_dimension),
dtype=dtypes.float32,
initializer=initializer,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
trainable=False))
return var_list
|
tensorflow-master
|
tensorflow/python/tpu/tpu_embedding.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""Utilities to handle tensor tracer parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import re
from tensorflow.python.platform import tf_logging as logging
TRACE_MODE_NAN_INF = 'nan-inf'
TRACE_MODE_PART_TENSOR = 'part-tensor'
TRACE_MODE_FULL_TENSOR = 'full-tensor'
TRACE_MODE_FULL_IF_NAN = 'trace-back-if-nan'
TRACE_MODE_NORM = 'norm'
TRACE_MODE_MAX_ABS = 'max-abs'
_FLAG_NAME_TRACE_STACK_SIZE = 'trace_stack_size'
_SUBMODE_BRIEF = 'brief'
_SUBMODE_DETAILED = 'detailed'
_FLAGS_ENV_VAR = 'TENSOR_TRACER_FLAGS'
_FLAG_SINGLE_QUOTE_PAT = re.compile(r"\s*--([^=]+)='([^']*)'")
_FLAG_DOUBLE_QUOTE_PAT = re.compile(r'\s*--([^=]+)="([^"]*)"')
_FLAG_NO_QUOTE_PAT = re.compile(r'\s*--([^=]+)=(\S*)')
_FLAG_NO_EQUAL_PAT = re.compile(r'\s*--([^=]+)\s*')
_FLAG_NAME_ENABLE = 'enable'
_FLAG_NAME_TRACE_MODE = 'trace_mode'
_FLAG_NAME_USE_COMPACT_TRACE = 'compact_trace'
_FLAG_NAME_TRACE_SCALAR_OPS = 'trace_scalar'
_FLAG_NAME_TRACE_BEFORE_OPS = 'trace_before_included_ops'
_FLAG_NAME_TRACE_AFTER_OPS = 'trace_after_included_ops'
_FLAG_NAME_SUBMODE = 'submode'
_FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS = 'include_less_interesting_ops'
_FLAG_NAME_EXCLUDED_OPNAMES = 'excluded_opnames'
_FLAG_NAME_EXCLUDED_OPTYPES = 'excluded_optypes'
_FLAG_NAME_INCLUDED_OPNAMES = 'included_opnames'
_FLAG_NAME_INCLUDED_OPTYPES = 'included_optypes'
_FLAG_NAME_INCLUDED_CORES = 'included_cores'
_FLAG_NAME_TRACE_DIR = 'trace_dir'
_FLAG_NAME_REPORT_FILE = 'report_file'
_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR = 'use_test_undeclared_outputs_dir'
_FLAG_NAME_OP_RANGE = 'op_range'
# Folder to dump the pre (before tensor tracer updates) and post graphs (after
# tensor tracer updates).
_FLAG_DUMP_BEFORE_AFTER_GRAPHS = 'dump_graphs'
_OP_RANGE_PAT = re.compile(r'(\d+):(\d+)')
_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR = 'TEST_UNDECLARED_OUTPUTS_DIR'
class TTParameters(object):
"""A class that handles the parameters of Tensor Tracer."""
def __init__(self, env=None):
if env:
self._env = env
else:
self._env = os.environ
self._validate_flag_names()
self.trace_mode = self._get_trace_mode()
self.submode = self._get_submode()
self.trace_dir = self._get_trace_dir()
self.report_file_path = self._get_report_filepath()
self.op_range = self._get_op_range()
self.excluded_opname_re_list = self._flag_value_to_re_list(
_FLAG_NAME_EXCLUDED_OPNAMES)
self.excluded_optype_re_list = self._flag_value_to_re_list(
_FLAG_NAME_EXCLUDED_OPTYPES)
self.included_opname_re_list = self._flag_value_to_re_list(
_FLAG_NAME_INCLUDED_OPNAMES)
self.included_optype_re_list = self._flag_value_to_re_list(
_FLAG_NAME_INCLUDED_OPTYPES)
self.is_conditional_trace = self._is_conditional_trace_mode()
self.trace_scalar_ops = self.is_flag_on(_FLAG_NAME_TRACE_SCALAR_OPS)
self.use_compact_trace = self.is_flag_on(_FLAG_NAME_USE_COMPACT_TRACE)
# _trace_ops_before_included and _trace_ops_after_included denotes to depth
# of tracing relative to the ops given in --included_opnames or
# --included_optypes
# For example, in the below graph
# op1 --> op2 --> op3 --> op4 --> op5
# If --included_opnames=op3 then only op3 will be traced.
# If also --trace_before_included_ops=2 (_trace_ops_before_included), then
# op1 and op2 will be traced as they are at most 2 hops apart from an
# included op. Similarly, if --trace_after_included_ops=2, then op4 and op5
# will also be traced.
self.trace_ops_before_included = self._get_flag_int_value(
_FLAG_NAME_TRACE_BEFORE_OPS, 0)
self.trace_ops_after_included = self._get_flag_int_value(
_FLAG_NAME_TRACE_AFTER_OPS, 0)
self.trace_stack_size = self._get_flag_int_value(
_FLAG_NAME_TRACE_STACK_SIZE, 1)
_, self.graph_dump_path = self.get_flag_value(
_FLAG_DUMP_BEFORE_AFTER_GRAPHS)
self.included_cores = self._flag_value_as_int_list(
_FLAG_NAME_INCLUDED_CORES)
self.include_less_interesting_ops, _ = self.get_flag_value(
_FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS)
def _is_conditional_trace_mode(self):
return self.trace_mode == TRACE_MODE_FULL_IF_NAN
def _get_report_filepath(self):
"""Sets the path of the output report file."""
found, report_file_path = self.get_flag_value(
_FLAG_NAME_REPORT_FILE)
if found and report_file_path \
and self.use_test_undeclared_outputs_dir():
if os.path.isabs(report_file_path):
raise ValueError('If use_test_undeclared_outputs_dir is set,'
'report_file_path cannot be an absolute path (%s)'
%report_file_path)
outputs_dir = self._env.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR)
report_file_path = os.path.join(outputs_dir, report_file_path)
return report_file_path
def _get_op_range(self):
"""Sets the index range of the Ops that we will consider tracing."""
found, op_range = self.get_flag_value(_FLAG_NAME_OP_RANGE)
if not found or not op_range:
op_range = (-1, -1) # this means including all ops.
return op_range
match = _OP_RANGE_PAT.match(op_range)
if not match:
op_range = (-1, -1) # this means including all ops.
return op_range
op_range = (int(match.group(1)), int(match.group(2)))
return op_range
def _get_trace_dir(self):
found, trace_dir = self.get_flag_value(_FLAG_NAME_TRACE_DIR)
if found and trace_dir \
and self.use_test_undeclared_outputs_dir():
raise ValueError('Cannot not use --%s and --%s at the same time'
%(_FLAG_NAME_TRACE_DIR,
_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR))
if self.use_test_undeclared_outputs_dir():
trace_dir = self._env.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR)
return trace_dir
def _get_trace_mode(self):
"""Checks if the given trace mode is valid."""
found, trace_mode = self.get_flag_value(_FLAG_NAME_TRACE_MODE)
if not found or not trace_mode:
trace_mode = TRACE_MODE_NORM
valid_trace_modes = [
TRACE_MODE_NAN_INF, TRACE_MODE_PART_TENSOR, TRACE_MODE_FULL_TENSOR,
TRACE_MODE_NORM, TRACE_MODE_MAX_ABS, TRACE_MODE_FULL_IF_NAN
]
if trace_mode not in valid_trace_modes:
raise ValueError('Invalid trace mode "%s" given to the Tensor_Tracer.'
'Valid trace modes are: %s'%(trace_mode,
valid_trace_modes))
return trace_mode
def is_brief_mode(self):
return self.submode == _SUBMODE_BRIEF
def _get_submode(self):
"""Checks if the given submode is valid."""
found, submode = self.get_flag_value(_FLAG_NAME_SUBMODE)
if not found or not submode:
submode = _SUBMODE_DETAILED
if not submode:
return
valid_submodes = [_SUBMODE_DETAILED, _SUBMODE_BRIEF]
if submode not in valid_submodes:
raise ValueError('Invalid submode "%s" given to the Tensor_Tracer.'
'Valid submodes are: %s'%(submode,
valid_submodes))
return submode
@staticmethod
def match_next_flag(flags, pos):
"""Returns the match for the next TensorTracer flag.
Args:
flags: a string that contains the flags.
pos: where in flags to start the search.
Returns:
A pair where the first element is the regular-expression
match found and the second element indicates if the match
has a value.
"""
match = _FLAG_DOUBLE_QUOTE_PAT.match(flags, pos)
if match:
return match, True
match = _FLAG_SINGLE_QUOTE_PAT.match(flags, pos)
if match:
return match, True
match = _FLAG_NO_QUOTE_PAT.match(flags, pos)
if match:
return match, True
match = _FLAG_NO_EQUAL_PAT.match(flags, pos)
if match:
# The flag is found but is not given a value.
return match, False
# The flag is not found.
return None, False
def _validate_flag_names(self):
"""Validates if the TensorTrace flags passed are valid."""
valid_flag_names = [
_FLAG_NAME_ENABLE, _FLAG_NAME_TRACE_MODE, _FLAG_NAME_USE_COMPACT_TRACE,
_FLAG_NAME_TRACE_SCALAR_OPS, _FLAG_NAME_TRACE_BEFORE_OPS,
_FLAG_NAME_TRACE_AFTER_OPS, _FLAG_NAME_TRACE_STACK_SIZE,
_FLAG_NAME_SUBMODE, _FLAG_NAME_EXCLUDED_OPNAMES,
_FLAG_NAME_EXCLUDED_OPTYPES, _FLAG_NAME_INCLUDED_OPNAMES,
_FLAG_NAME_INCLUDED_OPTYPES, _FLAG_NAME_TRACE_DIR,
_FLAG_NAME_INCLUDED_CORES, _FLAG_NAME_REPORT_FILE,
_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR,
_FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS, _FLAG_NAME_OP_RANGE,
_FLAG_DUMP_BEFORE_AFTER_GRAPHS
]
tensor_tracer_flags = self._env.get(_FLAGS_ENV_VAR)
if not tensor_tracer_flags:
return
pos = 0
while True:
match, _ = TTParameters.match_next_flag(tensor_tracer_flags, pos)
if not match:
break
flag_name = match.group(1)
if flag_name not in valid_flag_names:
raise ValueError(
'The flag name "%s" passed via the environment variable "%s" '
'is invalid. Valid flag names are:'
'\n%s'%(flag_name, _FLAGS_ENV_VAR, valid_flag_names))
pos = match.end()
def _flag_value_as_int_list(self, wanted_flag_name):
"""Returns the integer list of a TensorTracer flag.
Args:
wanted_flag_name: the name of the flag we are looking for.
Returns:
the value of the flag.
Raises:
RuntimeError: If supposedly deadcode is reached.
"""
int_list = []
found, flag_value = self.get_flag_value(wanted_flag_name)
if found:
try:
integer_values = flag_value.split(',')
int_list = [int(int_val) for int_val in integer_values]
except ValueError:
logging.warning('Cannot convert %s to int for flag %s', int_list,
wanted_flag_name)
return int_list
def _get_flag_int_value(self, wanted_flag_name, default_value):
"""Returns the int value of a TensorTracer flag.
Args:
wanted_flag_name: the name of the flag we are looking for.
default_value: the default value for the flag, if not provided.
Returns:
the value of the flag.
Raises:
RuntimeError: If supposedly deadcode is reached.
"""
flag_int_value = default_value
found, flag_value = self.get_flag_value(wanted_flag_name)
if found:
try:
flag_int_value = int(flag_value)
except ValueError:
logging.warning('Cannot convert %s to int for flag %s' % (
flag_int_value, wanted_flag_name))
return flag_int_value
def get_flag_value(self, wanted_flag_name):
"""Returns the value of a TensorTracer flags.
Args:
wanted_flag_name: the name of the flag we are looking for.
Returns:
A pair where the first element indicates if the flag is
found and the second element is the value of the flag.
Raises:
RuntimeError: If supposedly deadcode is reached.
"""
tensor_tracer_flags = self._env.get(_FLAGS_ENV_VAR)
if not tensor_tracer_flags:
return False, None
pos = 0
while True:
match, has_value = TTParameters.match_next_flag(
tensor_tracer_flags, pos)
if not match:
return False, None
flag_name = match.group(1)
if has_value:
flag_value = match.group(2)
else:
flag_value = None
if flag_name == wanted_flag_name:
return True, flag_value
pos = match.end()
raise RuntimeError('Should not reach here.')
def _flag_value_to_re_list(self, flag_name):
"""Converts list of strings to compiled RE."""
re_list = []
found, flag_value = self.get_flag_value(flag_name)
if not found or not flag_value:
return re_list
list_of_values = flag_value.split()
for v in list_of_values:
r = re.compile(v)
re_list.append(r)
return re_list
def is_flag_on(self, flag_name):
"""Returns True if the given flag is on."""
found, flag_value = self.get_flag_value(flag_name)
if not found:
return False
if flag_value is None:
return True
# Depends on the flag value.
flag_value = flag_value.lower()
enabled = flag_value in ['1', 't', 'true', 'y', 'yes']
return enabled
def is_enabled(self):
"""Returns True if TensorTracer is enabled."""
if self.is_flag_on(_FLAG_NAME_ENABLE):
logging.info('Tensor Tracer is enabled with flags %s.' %
self._env.get(_FLAGS_ENV_VAR))
return True
else:
return False
def use_test_undeclared_outputs_dir(self):
"""Decides the output directory of the report and trace files.
Args:
None.
Returns:
True if the output files should be written to the
test-undeclared-outputs-directory defined via an
env variable.
"""
return self.is_flag_on(_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR)
|
tensorflow-master
|
tensorflow/python/tpu/tensor_tracer_flags.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for TPU InfeedQueue methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
from tensorflow.python.tpu import tpu_feed
class InfeedTest(test.TestCase):
def testConstructor(self):
"""Tests that the constructor can be called with different arguments."""
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2)
self.assertEqual(i.number_of_tuple_elements, 2)
self.assertEqual(i.tuple_types, None)
self.assertEqual(i.tuple_shapes, None)
self.assertEqual(i.number_of_shards, None)
i = tpu_feed.InfeedQueue(
tuple_types=[dtypes.float32, dtypes.int32, dtypes.int32])
self.assertEqual(i.number_of_tuple_elements, 3)
self.assertEqual(i.tuple_types,
[dtypes.float32, dtypes.int32, dtypes.int32])
self.assertEqual(i.tuple_shapes, None)
self.assertEqual(i.number_of_shards, None)
i = tpu_feed.InfeedQueue(tuple_shapes=[[1], [2, 3]])
self.assertEqual(i.number_of_tuple_elements, 2)
self.assertEqual(i.tuple_types, None)
self.assertEqual(i.tuple_shapes, [[1], [2, 3]])
self.assertEqual(i.number_of_shards, None)
i = tpu_feed.InfeedQueue(shard_dimensions=[1, 0, 7])
self.assertEqual(i.number_of_tuple_elements, 3)
self.assertEqual(i.tuple_types, None)
self.assertEqual(i.tuple_shapes, None)
self.assertEqual([p.shard_dimension
for p in i.sharding_policies], [1, 0, 7])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue()
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(
number_of_tuple_elements=2, tuple_types=[dtypes.float32])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2, tuple_shapes=[[1]])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2, shard_dimensions=[1])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(tuple_shapes=[[1], [2, 3]], shard_dimensions=[1])
def testModification(self):
"""Tests modification of the queue post-construction."""
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2)
i.set_tuple_types([dtypes.float32, dtypes.int32])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
i.set_tuple_types([dtypes.float32, dtypes.float32])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.float32])
with self.assertRaises(ValueError):
i.set_tuple_types([dtypes.float32])
i.set_tuple_shapes([[1], [2, 3]])
self.assertEqual(i.tuple_shapes, [[1], [2, 3]])
i.set_tuple_shapes([[1, 2], [3, 4]])
self.assertEqual(i.tuple_shapes, [[1, 2], [3, 4]])
with self.assertRaises(ValueError):
i.set_tuple_shapes([[1, 2]])
i.set_number_of_shards(2)
self.assertEqual(i.number_of_shards, 2)
i.set_number_of_shards(3)
self.assertEqual(i.number_of_shards, 3)
t1 = constant_op.constant(1, dtypes.int32, shape=[6])
t2 = constant_op.constant(2.0, dtypes.float32, shape=[3, 18])
i.set_configuration_from_input_tensors([t1, t2])
self.assertEqual(i.tuple_shapes, [[6], [3, 18]])
self.assertEqual(i.tuple_types, [dtypes.int32, dtypes.float32])
i.set_configuration_from_sharded_input_tensors([[t2, t1], [t2, t1]])
self.assertEqual(i.number_of_shards, 2)
self.assertEqual(i.tuple_shapes, [[6, 18], [12]])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
i.set_shard_dimensions([1, 0])
i.set_number_of_shards(3)
with self.assertRaises(ValueError):
i.set_number_of_shards(4)
def testFreezing(self):
"""Tests freezing the queue."""
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2)
t1 = constant_op.constant(1, dtypes.int32, shape=[2])
t2 = constant_op.constant(2.0, dtypes.float32, shape=[2, 4])
i.set_configuration_from_sharded_input_tensors([[t2, t1], [t2, t1]])
self.assertEqual(i.number_of_shards, 2)
self.assertEqual(i.tuple_shapes, [[4, 4], [4]])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
self.assertEqual(i.shard_dimensions, [0, 0])
i.freeze()
i.set_number_of_shards(2)
i.set_tuple_shapes([[4, 4], [4]])
i.set_tuple_types([dtypes.float32, dtypes.int32])
i.set_shard_dimensions([0, 0])
with self.assertRaises(ValueError):
i.set_number_of_shards(1)
with self.assertRaises(ValueError):
i.set_tuple_shapes([[8, 8], [8]])
with self.assertRaises(ValueError):
i.set_tuple_types([dtypes.int32, dtypes.float32])
with self.assertRaises(ValueError):
i.set_shard_dimensions([1, 0])
self.assertEqual(i.number_of_shards, 2)
self.assertEqual(i.tuple_shapes, [[4, 4], [4]])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
self.assertEqual(i.shard_dimensions, [0, 0])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/tpu/tpu_infeed_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""A utility to trace tensor values on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import sys
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tensor_tracer_flags
from tensorflow.python.tpu import tensor_tracer_report
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu.ops import tpu_ops
_DEVICE_TYPE_TPU = 'tpu'
_DEVICE_TYPE_CPU = 'cpu'
_TRACE_MODE_PART_TENSOR_SIZE = 3
_REASON_OUTSIDE_OP_RANGE = 'not-traced-outside-op-range'
_REASON_UNSAFE_OP = 'not-traced-unsafe-op'
_REASON_WHILELOOP_OP = 'not-traced-special-whileloop-op'
_REASON_UNSAFE_SCALAR = 'not-traced-unsafe-scalar'
_REASON_SKIP_SCALAR = 'not-traced-scalar'
_REASON_LESS_INTERESTING_OP = 'not-traced-less-interesting-op'
_REASON_DEVICE_MISMATCH = 'not-traced-device-mismatch'
_REASON_DYNAMIC_SHAPE = 'not-traced-dynamic-shape'
_REASON_SCALAR_GET_TRACED = 'traced-scalar'
_REASON_TENSOR_GET_TRACED = 'traced-tensor'
_REASON_USER_INCLUDED = 'traced-user-included'
_REASON_USER_EXCLUDED = 'not-traced-user-excluded'
_REASON_NOT_EXECUTED = 'not-traced-not-in-exec-path'
_REASON_NON_NUMERIC_TENSOR = 'not-traced-non-numeric-tensor'
_REASON_FEEDS_WHILELOOP_OP = 'not-traced-feeds-special-whileloop-op'
_OUTPUT_STREAM_ESCAPE = 'file://'
_TENSOR_TRACER_COLLECTION = 'tensor_tracer_variables'
_TRACE_FILE_NAME = 'trace.all'
_COMPACT_TRACE_FILE_PREFIX = 'compact_trace.'
_COMPACT_TRACE_ENTRY_INIT_VALUE = -1.0
_TENSOR_TRACER_STORAGE = 'tensor_tracer_storage'
_TENSOR_VALUES_CACHE = 'tensor_values_cache'
_REPLICA_ID_TAG = '#replica-id: '
def tensor_tracepoint(tensor, checkpoint_name):
"""Adds a checkpoint with the given checkpoint name for the given tensor.
The tensor will be added to the list of tensors that will be traced by the
tensor tracer.
Args:
tensor: the tensor object for which the tracing is requested.
checkpoint_name: a string name for the checkpoint. This name has to be a
unique name if used within model comparison. The tensors that have the same
checkpoint identifier is compared in model comparison.
Returns:
The provided tensor.
"""
tensor.graph.get_collection(_TENSOR_TRACER_COLLECTION)
tensor.graph.add_to_collection(_TENSOR_TRACER_COLLECTION,
(tensor, checkpoint_name))
return tensor
def keras_layer_tracepoint(layer, checkpoint_name):
"""An interface for adding the tensor outputs of a keras layer.
Encapsulates tensor_tracepoint.
Args:
layer: A keras layer.
checkpoint_name: a string name for the checkpoint. This name has to be a
unique name if used within model comparison. The tensors that have the same
checkpoint identifier is compared in model comparison.
Returns:
The provided layer.
"""
try:
outputs = layer.output
if tensor_util.is_tensor(outputs):
tensor_tracepoint(outputs, '%s' % (checkpoint_name))
else:
idx = 0
for output_tensor in outputs:
if tensor_util.is_tensor(outputs):
tensor_tracepoint(output_tensor, '%s_%d' % (checkpoint_name, idx))
idx += 1
except AttributeError:
pass
except RuntimeError:
pass
return layer
def _trace_files_need_precreated(output_dir):
"""Return True if trace files must be pre-created by users."""
if not output_dir.startswith('/'):
return False
if len(output_dir) < 5:
return False
if output_dir[2] != 'n':
return False
if output_dir[3] != 's':
return False
if output_dir[1] != 'c':
return False
if output_dir[4] != '/':
return False
return True
def _get_tensor_values_cache(graph=None):
"""Returns the variable that implements tensor-value caching."""
graph = graph or ops.get_default_graph()
collection = graph.get_collection(_TENSOR_TRACER_STORAGE)
if len(collection) == 1:
return collection[0]
elif not collection:
raise RuntimeError('%s has not been created'%_TENSOR_VALUES_CACHE)
else:
raise RuntimeError('Multiple %s created'%_TENSOR_VALUES_CACHE)
return None
def _create_tensor_values_cache(graph, num_tensors):
"""Creates a variable as the cache to store intermediate tensor values."""
graph = graph or ops.get_default_graph()
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
_TENSOR_VALUES_CACHE,
shape=[num_tensors],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
_COMPACT_TRACE_ENTRY_INIT_VALUE),
trainable=False,
use_resource=True,
collections=[_TENSOR_TRACER_STORAGE, ops.GraphKeys.LOCAL_VARIABLES])
class TensorTracer(object):
"""A software construct for tracing tensor values in a TF graph on TPU.
This utility is disabled by default. It can be enabled by setting
the TENSOR_TRACER_FLAGS env variable as:
export TENSOR_TRACER_FLAGS="--enable=1"
If it is enabled, it will trace the output tensor values of
selected Ops in the graph. It has two outputs: (1) the traces and (2)
a report. The traces are dumped to a specified local file on the TPU
host. The report is printed to the log.info of the TPU job.
By passing options via the env variable, users can change:
(1) the trace mode (e.g., detecting NaN/Inf, printing partial or
full tensor values)
(2) which Ops to be traced (via op.name or op.type)
(3) output trace file path.
"""
# The set of graphs that are rewritten by tensor tracer.
_traced_graphs = set()
@staticmethod
def is_enabled():
"""Returns True if TensorTracer is enabled."""
return tensor_tracer_flags.TTParameters().is_enabled()
@staticmethod
def check_device_type(device_type):
"""Checks if the given device type is valid."""
if device_type not in [_DEVICE_TYPE_TPU, _DEVICE_TYPE_CPU]:
raise ValueError('Invalid device_type "%s"'%device_type)
@staticmethod
def loop_cond_op(op):
return op.type in ('LoopCond', 'RefLoopCond')
@staticmethod
def while_loop_op(op):
"""Returns true if op is one of the special ops of in a while loop.
Args:
op: A tf.Operation.
Returns:
True if the given op is one of [Switch, Merge, Enter, Exit,
NextIteration, LoopCond], which are all building blocks for TF while
loops.
"""
return (control_flow_util.IsLoopSwitch(op) or
control_flow_util.IsLoopMerge(op) or
control_flow_util.IsLoopEnter(op) or
control_flow_util.IsLoopExit(op) or
TensorTracer.loop_cond_op(op) or
op.type in ('RefNextIteration', 'NextIteration'))
@staticmethod
def unsafe_op(op):
"""Returns True if this op is not safe to be traced."""
if control_flow_util.IsInCond(op):
return True
# Reasons for not including following op types:
# Assign: cause incorrect result with CPU tracing.
if op.type in ['Assign']:
return True
return False
@staticmethod
def device_mismatch(device_type, op):
if device_type == _DEVICE_TYPE_TPU:
# pylint: disable=protected-access
return tpu._TPU_REPLICATE_ATTR not in op.node_def.attr
# pylint: enable=protected-access
return False
@staticmethod
def unsafe_scalar_trace(op):
"""Return true if scalar output tensor from Op is not safe to be traced."""
# Tracing the following causes cycle in the graph on TPU.
if op.type in ['LoopCond', 'Enter', 'Merge', 'Const',
'Switch', 'Less', 'ReadVariableOp']:
return True
# Tracing the following will cause casting-issue
# with the norm tracing mode or other compilation issues on CPU.
if op.type in ['VarHandleOp', 'IteratorToStringHandle',
'IteratorGetNext', 'OneShotIterator',
'IteratorV2', 'MakeIterator',
'BatchDatasetV2', 'MapDataset',
'FixedLengthRecordDataset', 'TakeDataset', 'ZipDataset',
'Placeholder', 'PlaceholderWithDefault', 'StridedSlice']:
return True
return False
def _less_interesting_op(self, op):
"""Returns True if the given op is not an interesting one to be traced."""
# If flag is set to include less interesting ops, then include everything.
if self._parameters.include_less_interesting_ops:
return False
# Following ops are highly unlikey to cause bugs.
return op.type in ['Const', 'Identity', 'Cast', 'Shape']
@staticmethod
def reason(op_idx, details):
"""Returns reason why the Op at op_idx is traced or not."""
return '%d %s'%(op_idx, details)
def __init__(self):
"""Initializes a TensorTracer.
Sets the various member fields from the flags (if given) or the defaults.
"""
self._replica_id = None
self._tt_config = tensor_tracer_report.TensorTracerConfig()
self._parameters = tensor_tracer_flags.TTParameters()
self._included_op_full_names = set()
def _add_replica_id_to_graph(self):
"""Adds nodes for computing the replica ID to the graph."""
if self._tt_config.num_replicas:
with ops.control_dependencies(None):
# Uses None as dependency to run outside of TPU graph rewrites.
self._replica_id = tpu_ops.tpu_replicated_input(
list(range(self._tt_config.num_replicas)),
name='tt_replica_id')
else:
self._replica_id = 'unknown'
def _inside_op_range(self, idx):
"""Return True if the given index is inside the selected range."""
if idx < self._parameters.op_range[0]:
return False
return (self._parameters.op_range[1] < 0 or
idx <= self._parameters.op_range[1])
def _is_user_included_op(self, op):
"""Checks whether the op is included in the tensor tracer flags.
Args:
op: tf Operation
Returns:
True, if the op is included.
An op is included if:
- Its op name is given in included_opnames
- Its op type is given in included_optypes
- The op is at most _trace_ops_before_included hops before an included op
- The op is at most _trace_ops_after_included hops after an included op
"""
def _is_op_or_any_neighbor_included(op, check_before=0, check_after=0):
"""Helper function to check if op is included or not."""
if op.name in self._included_op_full_names:
return True
for opname_re in self._parameters.included_opname_re_list:
if opname_re.match(op.name):
self._included_op_full_names.add(op.name)
return True
for optype_re in self._parameters.included_optype_re_list:
if optype_re.match(op.type):
self._included_op_full_names.add(op.name)
return True
if check_after > 0:
for out_tensor in op.outputs:
for consumer in out_tensor.consumers():
if _is_op_or_any_neighbor_included(consumer, check_after - 1, 0):
self._included_op_full_names.add(op.name)
return True
if check_before > 0:
for input_tensor in op.inputs:
if _is_op_or_any_neighbor_included(input_tensor.op,
0,
check_before - 1):
self._included_op_full_names.add(op.name)
return True
return False
# check_after and check_before are swapped below, as below operation
# checks the distance from an arbitrary op to included ops.
return _is_op_or_any_neighbor_included(
op, self._parameters.trace_ops_after_included,
self._parameters.trace_ops_before_included)
def _is_user_excluded_op(self, op):
for opname_re in self._parameters.excluded_opname_re_list:
if opname_re.match(op.name):
return True
for optype_re in self._parameters.excluded_optype_re_list:
if optype_re.match(op.type):
return True
return False
def _use_tensor_values_cache(self):
"""Returns True if immediate tensors should be first saved to a cache."""
if self._parameters.trace_mode not in set([
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_MAX_ABS]):
return False
if (self._parameters.trace_dir and
_trace_files_need_precreated(self._parameters.trace_dir)):
return True
return self._parameters.use_compact_trace
def _save_tensor_value_to_cache_op(self, graph, cache_idx, updates):
"""Returns an Op that will save the given updates to an entry in the cache."""
cache = _get_tensor_values_cache(graph)
indices = constant_op.constant([cache_idx])
return state_ops.scatter_update(cache, indices, updates).op
def _preprocess_traced_tensor(self, tensor):
"""Computes NAN/Norm/Max on TPUs before sending to CPU.
Args:
tensor: The tensor to be traced.
Returns:
A tensor that should be input to the trace_function.
Raises:
RuntimeError: If the trace mode is invalid.
"""
def _detect_nan_inf(tensor):
"""Trace function for detecting any NaN/Inf in the tensor."""
if tensor.dtype.is_floating:
mask = math_ops.reduce_any(
gen_math_ops.logical_or(
gen_math_ops.is_nan(tensor), gen_math_ops.is_inf(tensor)))
output_tensor = control_flow_ops.cond(mask,
lambda: constant_op.constant(1.0),
lambda: constant_op.constant(0.0))
else:
output_tensor = constant_op.constant(0.0)
# The shape has to be 1. Set it if it does not have the information.
output_tensor = array_ops.reshape(output_tensor, [1])
return output_tensor
def _show_norm(tensor):
tensor = math_ops.cast(tensor, dtypes.float32)
output_tensor = linalg_ops.norm(tensor)
# The shape has to be 1. Set it if it does not have the information.
output_tensor = array_ops.reshape(output_tensor, [1])
return output_tensor
def _show_max_abs(tensor):
tensor = math_ops.cast(tensor, dtypes.float32)
output_tensor = math_ops.reduce_max(math_ops.abs(tensor))
zero = constant_op.constant(0, dtypes.float32)
output_tensor = gen_math_ops.maximum(zero, output_tensor)
# The shape has to be 1. Set it if it does not have the information.
output_tensor = array_ops.reshape(output_tensor, [1])
return output_tensor
def _detect_inf_nan_producer(tensor):
"""Checks if the tensor is the first NaN/Inf tensor in the computation path."""
if tensor.op.inputs:
inp_check = [
_detect_nan_inf(inp_tensor) for inp_tensor in tensor.op.inputs
]
is_any_input_inf_nan = math_ops.add_n(inp_check)
else:
is_any_input_inf_nan = constant_op.constant(0, dtypes.bool)
is_current_tensor_inf_nan = _detect_nan_inf(tensor)
# An op is NaN/INF producer only when all inputs are nan/inf free (
# is_any_input_inf_nan = 0), and its output has nan/inf (
# is_current_tensor_inf_nan=1). Below will be 1 if op nan/inf is producer.
is_nan_producer = is_current_tensor_inf_nan - is_any_input_inf_nan
is_nan_producer = math_ops.reduce_any(is_nan_producer > 0)
return is_nan_producer
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_FULL_IF_NAN):
return _detect_inf_nan_producer(tensor)
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NAN_INF:
return _detect_nan_inf(tensor)
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_PART_TENSOR):
return tensor
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR):
return tensor
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NORM:
return _show_norm(tensor)
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_MAX_ABS:
return _show_max_abs(tensor)
raise RuntimeError(
'Tensor trace fun for %s is not yet implemented'
% self._parameters.trace_mode)
def _make_tensor_trace_fun(self, tensor_name, tensor_trace_order):
"""Makes the tensor tracing function called by outside compilation.
Args:
tensor_name: name of the tensor being traced.
tensor_trace_order: TensorTraceOrder object holding tensorname to id map.
Returns:
A function to be passed as the first argument to outside compilation.
Raises:
RuntimeError: If the trace mode is invalid.
"""
def _print_tensor(tensor_name, num_elements, tensor, output_tensor):
"""Prints a tensor value to a file.
Args:
tensor_name: name of the tensor being traced.
num_elements: number of elements to print (-1 means print all).
tensor: the tensor needs to be returned.
output_tensor: the tensor needs to be printed.
Returns:
The same tensor passed via the "tensor" argument.
Raises:
ValueError: If tensor_name is not already in
self._tensorname_idx_map.
"""
if self._parameters.is_brief_mode():
if tensor_name not in tensor_trace_order.tensorname_idx_map:
raise ValueError(
'Tensor name %s is not in the tensorname_idx_map'%tensor_name)
msg = '%d'%self._tensorname_idx_map[tensor_name]
else:
msg = '"%s"'%tensor_name
if self._parameters.trace_dir:
output_path = os.path.join(self._parameters.trace_dir, _TRACE_FILE_NAME)
output_stream = _OUTPUT_STREAM_ESCAPE + output_path
else:
output_stream = sys.stderr
return logging_ops.print_v2(msg, array_ops.shape(output_tensor),
'@', self._replica_id,
'\n', output_tensor, '\n',
summarize=num_elements,
output_stream=output_stream)
def _show_part_tensor(tensor):
"""Trace function for printing part of the tensor."""
return _print_tensor(tensor_name, _TRACE_MODE_PART_TENSOR_SIZE,
tensor, tensor)
def _show_full_tensor(tensor):
"""Trace function for printing the entire tensor."""
return _print_tensor(tensor_name, -1, tensor, tensor)
def _show_full_tensors(tensor):
"""Prints the full tensor values for the tensors that are _trace_stack_size hops away from a given tensor."""
def _get_distance_k_tensors(k_before=0):
"""Returns the tensors that are at most k_before hops away from the tensor."""
if k_before < 0:
return []
visited_tensors = {tensor: 0}
visitor_queue = [tensor]
head = 0
while head < len(visitor_queue):
current_tensor = visitor_queue[head]
head += 1
distance = visited_tensors[current_tensor]
if distance == k_before:
break
for input_tensor in current_tensor.op.inputs:
if input_tensor in visited_tensors:
continue
visitor_queue.append(input_tensor)
visited_tensors[input_tensor] = distance + 1
return visitor_queue
tensors_to_print = _get_distance_k_tensors(
self._parameters.trace_stack_size)
print_ops = [_print_tensor(t.name, -1, t, t) for t in tensors_to_print]
with ops.control_dependencies(print_ops):
return constant_op.constant(True)
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_FULL_IF_NAN):
return _show_full_tensors
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_PART_TENSOR):
return _show_part_tensor
# The input tensor has a shape of "[1]" for TRACE_MODE_NAN_INF,
# TRACE_MODE_NORM, and TRACE_MODE_MAX_ABS, as related computations are
# performed within TPUs and only their results are transferred to CPU.
# Simply, print the full tensor for these trace modes.
if self._parameters.trace_mode in [
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR,
tensor_tracer_flags.TRACE_MODE_MAX_ABS]:
return _show_full_tensor
raise RuntimeError('Tensor trace fun for %s is not yet implemented'
%self._parameters.trace_mode)
def _skip_op(self, op_id, op, ops_in_exec_path, report_handler):
"""Returns True if we should not trace Op."""
if TensorTracer.while_loop_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_WHILELOOP_OP))
return True
if TensorTracer.unsafe_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_UNSAFE_OP))
return True
if TensorTracer.device_mismatch(self._tt_config.device_type, op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_DEVICE_MISMATCH))
return True
if op not in ops_in_exec_path:
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_NOT_EXECUTED))
return True
if not self._inside_op_range(op_id):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_OUTSIDE_OP_RANGE))
return True
if self._less_interesting_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_LESS_INTERESTING_OP))
return True
if self._is_user_included_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))
return False
if self._is_user_excluded_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))
return True
return False
def _skip_tensor(self, op_id, out_tensor, report_handler):
"""Returns True if we should not trace out_tensor."""
# Skips a tensor if the tensor has a non-numeric type.
# Note: we cannot use check_ops.is_numeric_tensor(out_tensor)
# because it also excludes tensors with dtypes, bool, and
# float32_ref, which we actually want to trace.
non_numeric_tensor_types = set([dtypes.variant, dtypes.resource,
dtypes.string])
if out_tensor.dtype in non_numeric_tensor_types:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_NON_NUMERIC_TENSOR))
return True
# Skip a tensor if it feeds a special while loop op.
if [consumer for consumer in out_tensor.consumers() if
TensorTracer.while_loop_op(consumer)]:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_FEEDS_WHILELOOP_OP))
return True
if self._is_user_included_op(out_tensor.op):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))
return False
if self._is_user_excluded_op(out_tensor.op):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))
return True
if not out_tensor.get_shape().is_fully_defined():
# If trace mode is nan-inf, norm or max, then the tensor will be reduced
# to a scalar before the outside compilation call.
if self._parameters.trace_mode in [
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_MAX_ABS
]:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))
return False
else:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_DYNAMIC_SHAPE))
return True
rank = len(out_tensor.shape)
if rank < 1:
# scalar
if self._parameters.trace_scalar_ops:
if TensorTracer.unsafe_scalar_trace(out_tensor.op):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_UNSAFE_SCALAR))
return True
else:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_SCALAR_GET_TRACED))
return False
else:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_SKIP_SCALAR))
return True
else:
# tensor
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))
return False
def _filter_execution_path_operations(self, operations, fetches):
"""Returns the set of ops in the execution path to compute given fetches."""
# If no fetch provided, then return all operations.
if fetches is None:
return set(operations)
# Convert to list, if a single element is provided.
if not isinstance(fetches, (list, tuple)):
fetches = [fetches]
# If a tensor is given as fetch, convert it to op.
op_fetches = []
for fetch in fetches:
if isinstance(fetch, ops.Operation):
op_fetches.append(fetch)
elif isinstance(fetch, ops.Tensor):
op_fetches.append(fetch.op)
else:
raise RuntimeError('Given fetch:%s is neither a tensor nor an op.'
%fetch)
execution_path_operations = set(op_fetches)
traverse_stack = list(op_fetches)
while True:
if not traverse_stack:
break
head_op = traverse_stack.pop()
input_ops = [tensor_input.op for tensor_input in head_op.inputs]
input_ops.extend(head_op.control_inputs)
for input_op in input_ops:
if input_op not in execution_path_operations:
# Filter out loop condition operations, tracing them causes a cycle.
# Trace only the loop-body.
if TensorTracer.loop_cond_op(input_op):
continue
execution_path_operations.add(input_op)
traverse_stack.append(input_op)
return execution_path_operations
def _determine_and_instrument_traced_tensors(self, graph_order,
ops_in_exec_path,
tensor_trace_points,
report_handler):
"""Determines the tensors to trace and instruments the trace details."""
traced_tensors = []
checkpoint_operations = set([tensor.op
for (tensor, _) in tensor_trace_points])
for op_id, op in enumerate(graph_order.operations):
if checkpoint_operations and op not in checkpoint_operations:
continue
if self._skip_op(op_id, op, ops_in_exec_path, report_handler):
continue
for i in range(len(op.outputs)):
out_tensor = op.outputs[i]
if not self._skip_tensor(op_id, out_tensor, report_handler):
traced_tensors.append(out_tensor)
return traced_tensors
def _check_trace_files(self):
"""Checks if any requirements for trace files are satisfied."""
if not self._parameters.trace_dir:
# traces will be written to stderr. No need to check trace files.
return
if _trace_files_need_precreated(self._parameters.trace_dir):
for replica_id in range(0, self._tt_config.num_replicas):
trace_file_path = os.path.join(
self._parameters.trace_dir,
_COMPACT_TRACE_FILE_PREFIX) + '%d'%replica_id
if not gfile.Exists(trace_file_path):
raise RuntimeError(
'%s must be pre-created with the '
'appropriate properties.'%trace_file_path)
else:
if not gfile.Exists(self._parameters.trace_dir):
gfile.MkDir(self._parameters.trace_dir)
if not gfile.Exists(self._parameters.trace_dir):
raise RuntimeError('Failed to create %s'%self._parameters.trace_dir)
def _determine_trace_and_create_report(self, graph, ops_in_exec_path):
"""Work needs to be done prior to TPU or CPU tracing."""
self._check_trace_files()
graph_order = tensor_tracer_report.sort_tensors_and_ops(graph)
tensor_trace_points = graph.get_collection(_TENSOR_TRACER_COLLECTION)
report_handler = tensor_tracer_report.TTReportHandle()
traced_tensors = self._determine_and_instrument_traced_tensors(
graph_order, ops_in_exec_path, tensor_trace_points, report_handler)
tensor_trace_order = tensor_tracer_report.TensorTraceOrder(graph_order,
traced_tensors)
if self._use_tensor_values_cache():
_create_tensor_values_cache(graph, len(traced_tensors))
report_handler.create_report(self._tt_config, self._parameters,
tensor_trace_order, tensor_trace_points)
return tensor_trace_order
def _generate_flush_cache_op(self, graph, start_replica, on_tpu):
"""Generates an Op that will flush the cache to file.
Args:
graph: the graph of Ops
start_replica: the ID of the first replica being flushed by this Op.
on_tpu: if the graph is executed on TPU.
Returns:
The Op to flush the cache to file.
"""
def _make_flush_fun(replica_id):
"""Makes a function for flushing the cache for the given replica."""
def _fun():
"""A function that flushes the cache to a file."""
def _flush_fun(cache):
"""Flushes the cache to a file."""
if isinstance(replica_id, str):
replica_id_str = replica_id
else:
replica_id_str = '%d'%replica_id
if self._parameters.trace_dir:
output_path = (os.path.join(self._parameters.trace_dir,
_COMPACT_TRACE_FILE_PREFIX)
+ replica_id_str)
output_stream = _OUTPUT_STREAM_ESCAPE + output_path
else:
output_stream = sys.stderr
new_step_line = _REPLICA_ID_TAG + replica_id_str
print_op = logging_ops.print_v2(
new_step_line, '\n',
cache, '\n',
summarize=-1,
output_stream=output_stream)
with ops.control_dependencies([print_op]):
return constant_op.constant(0).op
cache = _get_tensor_values_cache(graph)
if on_tpu:
flush_op = tpu.outside_compilation(_flush_fun, cache.value())
else:
flush_op = _flush_fun(cache.value())
with ops.control_dependencies([flush_op]):
reset_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE,
dtype=cache.dtype,
shape=cache.shape)
assign_op = state_ops.assign(cache, reset_value).op
with ops.control_dependencies([assign_op]):
return flush_op.outputs[0]
return _fun
def _f(replica_id):
return _make_flush_fun(replica_id)
def _eq(x):
return math_ops.equal(x, self._replica_id)
def _do_nothing():
return constant_op.constant(0)
return control_flow_ops.case({\
_eq(start_replica): _f(start_replica), \
_eq(start_replica+1): _f(start_replica+1), \
_eq(start_replica+2): _f(start_replica+2), \
_eq(start_replica+3): _f(start_replica+3), \
_eq(start_replica+4): _f(start_replica+4), \
_eq(start_replica+5): _f(start_replica+5), \
_eq(start_replica+6): _f(start_replica+6), \
_eq(start_replica+7): _f(start_replica+7), \
},
default=_do_nothing,
exclusive=True).op
def _flush_tensor_values_cache(self, graph, tensor_fetches, op_fetches,
on_tpu):
"""Flushes the intermediate tensor values in the graph to the cache.
Args:
graph: the graph of Ops
tensor_fetches: list of tensor results returned by the model_fn.
op_fetches: list of ops that are returned by the model_fn, e.g., train_op.
on_tpu: if the graph is executed on TPU.
Returns:
An identical copy of tensor_fetches.
"""
# Add a dependency to op and tensor fetches to make sure that all tracing
# ops are executed before flushing trace results.
with ops.control_dependencies(op_fetches +
[tensor.op for tensor in tensor_fetches]):
flush_cache_op_list = []
for host in range(self._tt_config.num_hosts):
start_replica = host * 8
flush_op = self._generate_flush_cache_op(graph, start_replica, on_tpu)
flush_cache_op_list.append(flush_op)
return control_flow_ops.tuple(tensor_fetches,
control_inputs=flush_cache_op_list)
def _process_tensor_fetches(self, tensor_fetches):
"""Check that tensor_fetches is not empty and have valid tensors."""
# If none or empty list.
if tensor_fetches is None:
raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be '
'None.')
if not isinstance(tensor_fetches, (list, tuple)):
tensor_fetches = [tensor_fetches]
elif not tensor_fetches:
raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be '
'empty list.')
fetches = []
for fetch in tensor_fetches:
if isinstance(fetch, ops.Tensor):
fetches.append(fetch)
else:
raise RuntimeError('Given tensor_fetch:%s is not a tensor.' % fetch)
return fetches
def _process_op_fetches(self, op_fetches):
"""Check that op_fetches have valid ops."""
if op_fetches is None:
return []
if not isinstance(op_fetches, (list, tuple)):
op_fetches = [op_fetches]
fetches = []
for fetch in op_fetches:
if isinstance(fetch, ops.Operation):
fetches.append(fetch)
else:
logging.warning('Ignoring the given op_fetch:%s, which is not an op.' %
fetch)
return fetches
def _convert_fetches_to_input_format(self, input_fetches, current_fetches):
"""Changes current_fetches' format, so that it matches input_fetches."""
if isinstance(input_fetches, ops.Tensor):
if len(current_fetches) != 1:
raise RuntimeError('Tensor tracer input/output fetches do not match.')
return current_fetches[0]
else:
if len(current_fetches) != len(current_fetches):
raise RuntimeError('Tensor tracer input/output fetches do not match.')
elif isinstance(input_fetches, tuple):
return tuple(current_fetches)
else:
return current_fetches
def _get_op_control_flow_context(self, op):
"""Returns the control flow of the given op.
Args:
op: tf.Operation for which the control flow context is requested.
Returns:
op_control_flow_context: which the is control flow context of the given
op. If the operation type is LoopExit, returns the outer control flow
context.
"""
# pylint: disable=protected-access
op_control_flow_context = op._control_flow_context
# pylint: enable=protected-access
if control_flow_util.IsLoopExit(op):
op_control_flow_context = op_control_flow_context.outer_context
return op_control_flow_context
def _trace_execution(self, graph,
tensor_fetches,
op_fetches=None,
on_tpu=True):
"""Commong tracing function for both CPU and TPUs.
The caller function should set device_type, num_replicas,
num_replicas_per_host, num_hosts and replica_id before calling
_trace_execution.
Args:
graph: the graph of Ops executed on the TPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
on_tpu: True if executing on TPU.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If tensor_fetches is None or empty.
"""
def _cast_unsupported_dtypes(tensor):
"""Casts tensor to a supported type."""
if tensor.dtype.__eq__(dtypes.int64):
# outside-compilation doesn't support int64 input yet.
return math_ops.cast(tensor, dtypes.int32)
if tensor.dtype.__eq__(dtypes.bfloat16) or tensor.dtype.__eq__(
dtypes.float16):
# Since host can't handle bf16, convert tensor to f32.
return math_ops.cast(tensor, dtypes.float32)
return tensor
TensorTracer.check_device_type(self._tt_config.device_type)
# Check in_tensor_fetches, and op_fetches and convert them to lists.
processed_t_fetches = self._process_tensor_fetches(tensor_fetches)
op_fetches = self._process_op_fetches(op_fetches)
all_fetches = op_fetches + [tensor.op for tensor in processed_t_fetches]
# Filter out the operations that won't be executed.
# if fetches=None, then ops_in_exec_path = set(operations)
exec_op_set = self._filter_execution_path_operations(graph.get_operations(),
all_fetches)
# Write report file, and determine the traced tensors.
tensor_trace_order = self._determine_trace_and_create_report(
graph, exec_op_set)
tensor_fetch_set = set(processed_t_fetches)
tracing_ops = []
# pylint: disable=protected-access
current_control_flow_context = graph._get_control_flow_context()
# pylint: enable=protected-access
sorted_exec_op_list = list(exec_op_set)
sorted_exec_op_list.sort(key=lambda op: op.name)
# Trace ops only if they are in the execution path.
for op in sorted_exec_op_list:
for i in range(len(op.outputs)):
out_tensor = op.outputs[i]
tensor_name = out_tensor.name
if tensor_name not in tensor_trace_order.tensorname_to_cache_idx:
continue
# Create the list of consumers before calling _preprocess_traced_tensor.
# Otherwise, adding control input below, will introduce a cycle in the
# graph.
consumers = out_tensor.consumers()
# Not all consumers may be in the exec path. Filter out the consumers
# to keep the graph simpler.
consumers = [cop for cop in consumers if cop in exec_op_set]
# If there is no consumer of the tensor, there is no need to trace it;
# unless the tensor itself is one of the fetches.
is_a_fetched_tensor = out_tensor in tensor_fetch_set
if (not consumers) and (not is_a_fetched_tensor):
continue
op_control_flow_context = self._get_op_control_flow_context(op)
# pylint: disable=protected-access
graph._set_control_flow_context(op_control_flow_context)
# pylint: enable=protected-access
processed_out_tensor = self._preprocess_traced_tensor(out_tensor)
if on_tpu:
processed_out_tensor = _cast_unsupported_dtypes(processed_out_tensor)
if self._use_tensor_values_cache():
cache_idx = tensor_trace_order.tensorname_to_cache_idx[tensor_name]
trace_op = self._save_tensor_value_to_cache_op(graph,
cache_idx,
processed_out_tensor)
else:
def tpu_wrap_trace_fn(tensor, out_tensor_name):
"""Wraps the trace_fn with outside compilation if on TPUs."""
tensor_trace_fn = self._make_tensor_trace_fun(out_tensor_name,
tensor_trace_order)
if on_tpu:
return tpu.outside_compilation(tensor_trace_fn, tensor)
else:
return tensor_trace_fn(tensor)
def conditional_trace_fn(predicate_tensor, out_tensor, trace_fn,
out_tensor_name):
"""Creates a cond op that traces the out_tensor if predicate is satisfied."""
return control_flow_ops.cond(
predicate_tensor, lambda: trace_fn(out_tensor, out_tensor_name),
lambda: constant_op.constant(False)).op
if self._parameters.is_conditional_trace:
trace_op = conditional_trace_fn(processed_out_tensor, out_tensor,
tpu_wrap_trace_fn, tensor_name)
elif self._parameters.included_cores:
should_print = constant_op.constant(False)
for core in self._parameters.included_cores:
should_print = gen_math_ops.logical_or(
should_print, gen_math_ops.equal(self._replica_id, core))
trace_op = conditional_trace_fn(should_print, processed_out_tensor,
tpu_wrap_trace_fn, tensor_name)
else:
trace_op = tpu_wrap_trace_fn(processed_out_tensor, tensor_name)
if is_a_fetched_tensor:
tracing_ops.append(trace_op)
continue
# Add it to all consumers, as some consumers may not be executed if they
# are in a control flow.
for consumer_op in consumers:
# pylint: disable=protected-access
consumer_op._add_control_input(trace_op)
# pylint: enable=protected-access
# pylint: disable=protected-access
graph._set_control_flow_context(current_control_flow_context)
# pylint: enable=protected-access
if tracing_ops:
# If we are tracing a fetched tensor, their dependency is stored in
# tracing_ops.
processed_t_fetches = control_flow_ops.tuple(processed_t_fetches,
control_inputs=tracing_ops)
if self._use_tensor_values_cache():
processed_t_fetches = self._flush_tensor_values_cache(graph,
processed_t_fetches,
op_fetches,
on_tpu=on_tpu)
# processed_t_fetches is a list at this point. Convert it to the same
# format as given in tensor_fetches.
return self._convert_fetches_to_input_format(tensor_fetches,
processed_t_fetches)
def trace_tpu(self, graph,
tensor_fetches,
op_fetches=None,
num_replicas=None,
num_replicas_per_host=None,
num_hosts=None):
"""Traces the tensors generated by TPU Ops in a TF graph.
Args:
graph: the graph of Ops executed on the TPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
num_replicas: number of replicas used on the TPU.
num_replicas_per_host: number of replicas per TPU host.
num_hosts: total number of TPU hosts.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If num_replicas_per_host > 8.
RuntimeError: If tensor_fetches is None or empty.
"""
if graph in TensorTracer._traced_graphs:
logging.warning('Graph is already rewritten with tensor tracer, ignoring '
'multiple calls.')
return tensor_fetches
else:
TensorTracer._traced_graphs.add(graph)
self._tt_config.device_type = _DEVICE_TYPE_TPU
self._tt_config.num_replicas = num_replicas
self._tt_config.num_replicas_per_host = num_replicas_per_host
self._tt_config.num_hosts = num_hosts
if self._tt_config.num_replicas is not None:
if self._tt_config.num_replicas_per_host is None:
self._tt_config.num_replicas_per_host = 8
if self._tt_config.num_hosts is None:
self._tt_config.num_hosts = (
num_replicas // self._tt_config.num_replicas_per_host +
(num_replicas % self._tt_config.num_replicas_per_host > 0))
if self._tt_config.num_replicas_per_host > 8:
# Checks for the assumption in _generate_flush_cache_op().
raise RuntimeError('num_replicas_per_host (%d) is '
'greater than 8'%self._tt_config.num_replicas_per_host)
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_before_tt.pbtxt')
with graph.as_default():
self._add_replica_id_to_graph()
tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,
on_tpu=True)
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_after_tt.pbtxt')
return tensor_fetches
def trace_cpu(self, graph, tensor_fetches, op_fetches=None):
"""Traces the tensors generated by CPU Ops in a TF graph.
Args:
graph: the graph of Ops executed on the CPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If tensor_fetches is None or empty.
"""
if graph in TensorTracer._traced_graphs:
logging.warning('Graph is already rewritten with tensor tracer, ignoring '
'multiple calls.')
return tensor_fetches
else:
TensorTracer._traced_graphs.add(graph)
self._tt_config.device_type = _DEVICE_TYPE_CPU
self._tt_config.num_replicas = 1
self._tt_config.num_replicas_per_host = 1
self._tt_config.num_hosts = 1
self._replica_id = 0
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_before_tt.pbtxt')
with graph.as_default():
tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,
on_tpu=False)
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_after_tt.pbtxt')
return tensor_fetches
|
tensorflow-master
|
tensorflow/python/tpu/tensor_tracer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the SessionRunHook for preemptible Cloud TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import threading
import time
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
class CloudTPUPreemptedHook(session_run_hook.SessionRunHook):
"""The SessionRunHook for preemptible Cloud TPUs.
This is an implementation of SessionRunHook for the pre-emptible Google Cloud
TPU service. It attempts to close the session if the TPU is preempted, and
exits the coordinator process if the session cannot be closed.
"""
def __init__(self, cluster):
self._cluster = cluster
def after_create_session(self, session, coord):
if tpu_cluster_resolver.is_running_in_gce():
self._tpu_poller = _TPUPollingThread(self._cluster, session)
self._tpu_poller.start()
def end(self, session):
self._tpu_poller.stop()
class _TPUPollingThread(threading.Thread):
"""A thread that polls the state of a TPU node.
When the node transitions into a TERMINAL state (PREEMPTED, TERMINATED)
that's considered as not recoverable by the underlying infrastructure,
it attempts to close the session, and exits the entire process if the
session.close() stucks.
"""
def __init__(self, cluster, session):
super(_TPUPollingThread, self).__init__()
self._running = True
self._session_closed = False
self._cluster = cluster
self._session = session
self._interval = 30
# Some of the Google API libraries are quite chatty, so disable them.
for name in ['googleapiclient.discovery', 'oauth2client.client']:
_logging.getLogger(name).setLevel(_logging.WARNING)
def stop(self):
self._running = False
self._session_closed = True
self.join()
def run(self):
if not tpu_cluster_resolver.is_running_in_gce():
logging.warning(
'TPUPollingThread is running in a non-GCE environment, exiting...')
self._running = False
return
while self._running:
response = self._cluster._fetch_cloud_tpu_metadata() # pylint: disable=protected-access
logging.warning(
'TPUPollingThread found TPU %s in state %s, and health %s.',
self._cluster._tpu, response['state'], response['health']) # pylint: disable=protected-access
if 'state' in response and response['state'] in [
'TERMINATED', 'PREEMPTED'
]:
logging.warning('TPU node %s reached an unrecoverable state %s, '
'terminating the session now.', self._cluster._tpu, # pylint: disable=protected-access
response['state'])
# Try to close the session.
self._session.close()
time.sleep(self._interval)
if not self._session_closed:
# Raise an exception if the session.close() stucks.
logging.warning('Cannot close session on TPU node %s.',
self._cluster._tpu) # pylint: disable=protected-access
raise errors.UnavailableError(
None, None, 'TPU node %s reached an unrecoverable state %s.' %
(self._cluster._tpu, response['state'])) # pylint: disable=protected-access
time.sleep(self._interval)
|
tensorflow-master
|
tensorflow/python/tpu/preempted_hook.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow_estimator.python.estimator.tpu.util import *
# pylint: enable=wildcard-import,unused-import
|
tensorflow-master
|
tensorflow/python/tpu/util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPU Feature Column Library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.feature_column import feature_column_lib as fc_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_function
# pylint: disable=protected-access
_TPU_FC_TO_SCOPE = '_tpu_feature_column_scope'
_SUPPORTED_SEQUENCE_COLUMNS = (fc._SequenceCategoricalColumn,
fc_lib.SequenceCategoricalColumn)
_SUPPORTED_CATEGORICAL_COLUMNS_V2 = (fc_lib.IdentityCategoricalColumn,
fc_lib.VocabularyFileCategoricalColumn,
fc_lib.VocabularyListCategoricalColumn,
fc_lib.WeightedCategoricalColumn,
fc_lib.SequenceCategoricalColumn)
_SUPPORTED_CATEGORICAL_COLUMNS = (fc._IdentityCategoricalColumn,
fc._VocabularyFileCategoricalColumn,
fc._VocabularyListCategoricalColumn,
fc._WeightedCategoricalColumn,
fc._SequenceCategoricalColumn
) + _SUPPORTED_CATEGORICAL_COLUMNS_V2
_SEQUENCE_FEATURE_LENGTH_POSTFIX = '_seq_length_'
def embedding_column(categorical_column,
dimension,
combiner='mean',
initializer=None,
max_sequence_length=0):
"""TPU embedding_column for `tf.feature_column.embedding_column`.
Note that the interface for TPU embedding_column is different from the non-TPU
version. The following args available for the non-TPU version are NOT
supported: ckpt_to_load_from, tensor_name_in_ckp, max_norm and trainable.
Args:
categorical_column: A categorical_column returned from
categorical_column_with_identity, weighted_categorical_column,
categorical_column_with_vocabulary_file,
categorical_column_with_vocabulary_list,
sequence_categorical_column_with_identity,
sequence_categorical_column_with_vocabulary_file,
sequence_categorical_column_with_vocabulary_list
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row for a non-sequence column. For more information, see
`tf.feature_column.embedding_column`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
max_sequence_length: An non-negative integer specifying the max sequence
length. Any sequence shorter then this will be padded with 0 embeddings
and any sequence longer will be truncated. This must be positive for
sequence features and 0 for non-sequence features.
Returns:
A _TPUEmbeddingColumn.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if `initializer` is specified but not callable.
"""
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS):
raise TypeError(
'categorical_column for tpu '
' embedding_column must be type %s, got %s.' % (' or '.join([
cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS
]), type(categorical_column)))
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. '
'Embedding of column_name: {}'.format(
categorical_column.name))
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
embedding_shape = categorical_column._num_buckets, dimension # pylint: disable=protected-access
def _creator(weight_collections, scope):
embedding_column_layer = fc._EmbeddingColumnLayer(
embedding_shape=embedding_shape,
initializer=initializer,
weight_collections=weight_collections,
trainable=True,
name='embedding_column_layer')
return embedding_column_layer(None, scope=scope) # pylint: disable=not-callable
column = _TPUEmbeddingColumn(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
layer_creator=_creator,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
max_sequence_length=max_sequence_length)
# For Embedding column, the initializer is hidden inside the creator Fn, which
# is not accessiable later. So, we attach it to a speicial field. Also note
# that non-TPU Embedding column and non-TPU shared Embedding column handle the
# initializer differently. See shared_embedding_columns for details.
column._tpu_initializer = initializer
return column
def shared_embedding_columns(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
max_sequence_lengths=None):
"""List of dense columns that convert from sparse, categorical input.
Note that the interface for TPU embedding_column is different from the non-TPU
version. The following args available for the non-TPU version are NOT
supported: ckpt_to_load_from, tensor_name_in_ckp, max_norm and trainable.
Args:
categorical_columns: A list of categorical_columns returned from
categorical_column_with_identity, weighted_categorical_column,
categorical_column_with_vocabulary_file,
categorical_column_with_vocabulary_list,
sequence_categorical_column_with_identity,
sequence_categorical_column_with_vocabulary_file,
sequence_categorical_column_with_vocabulary_list
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row for a non-sequence column. For more information, see
`tf.feature_column.embedding_column`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
shared_embedding_collection_name: Optional name of the collection where
shared embedding weights are added. If not given, a reasonable name will
be chosen based on the names of `categorical_columns`. This is also used
in `variable_scope` when creating shared embedding weights.
max_sequence_lengths: An list of non-negative integers, either None or
empty or the same length as the argument categorical_columns. Entries
corresponding to non-sequence columns must be 0 and entries corresponding
to sequence columns specify the max sequence length for the column. Any
sequence shorter then this will be padded with 0 embeddings and any
sequence longer will be truncated.
Returns:
A _TPUEmbeddingColumn.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if `initializer` is specified but not callable.
ValueError: if `max_sequence_lengths` is specified and not the same length
as `categorical_columns`.
ValueError: if `max_sequence_lengths` is positive for a non sequence column
or 0 for a sequence column.
"""
for categorical_column in categorical_columns:
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS):
raise TypeError(
'categorical_column for tpu '
' shared_embedding_columns must be type %s, got %s.' % (' or '.join([
cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS
]), type(categorical_column)))
if not max_sequence_lengths:
max_sequence_lengths = [0] * len(categorical_columns)
if len(max_sequence_lengths) != len(categorical_columns):
raise ValueError('max_sequence_lengths and categorical_columns must be of '
'the same length. len(max_sequence_lengths)={} '
'len(categorical_columns)={}.'.format(
len(max_sequence_lengths), len(categorical_columns)))
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. ')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
num_buckets = sorted_columns[0]._num_buckets # pylint: disable=protected-access
for c in sorted_columns[1:]:
if num_buckets != c._num_buckets: # pylint: disable=protected-access
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
sorted_columns[0], num_buckets, c, c._num_buckets)) # pylint: disable=protected-access
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
tpu_columns = []
# Create the state (_SharedEmbeddingColumnLayer) here.
for categorical_column, max_sequence_length in zip(
categorical_columns, max_sequence_lengths):
column = _TPUSharedEmbeddingColumn(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
shared_embedding_collection_name=shared_embedding_collection_name,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
max_sequence_length=max_sequence_length)
tpu_columns.append(column)
return tpu_columns
class _TPUBaseEmbeddingColumn(object):
"""Base class for TPU Embedding Column."""
def __init__(self, categorical_column, max_sequence_length=0):
self._tpu_categorical_column = categorical_column
self._max_sequence_length = max_sequence_length
if (self.is_sequence_column() and max_sequence_length < 1):
raise ValueError('max_sequence_length must be greater than 0 for '
'sequence columns. Got max_sequence_length={} for '
'sequence column {}.'.format(max_sequence_length,
categorical_column.name))
if (not self.is_sequence_column() and max_sequence_length != 0):
raise ValueError('Non zero max_seq_length={} specified for non '
'sequence column {}.'.format(max_sequence_length,
categorical_column.name))
def get_combiner(self):
"""Returns the embedding combiner."""
raise NotImplementedError('not implemented')
def get_embedding_table_size(self):
"""Returns the embedding table size, tuple of vocab size and dimension."""
raise NotImplementedError('not implemented')
def get_feature_key_name(self):
"""Returns the feature key name in the features dict."""
raise NotImplementedError('not impl')
def get_weight_key_name(self):
"""Return the key name for weights."""
raise NotImplementedError('not impl')
def get_embedding_var_name(self):
"""Returns the embedding variable name.
Feature key name and embedding variable name are usually one-to-one mapping.
But for shared embedding columns, it is many-to-one mapping.
"""
raise NotImplementedError('not impl')
def get_initializer(self):
"""Returns the initializer."""
raise NotImplementedError('not impl')
def is_categorical_column_weighted(self):
"""Check if the categorical column of the embedding column is weighted."""
raise NotImplementedError('not impl')
def is_sequence_column(self):
return isinstance(self._tpu_categorical_column, _SUPPORTED_SEQUENCE_COLUMNS)
def get_max_sequence_length(self):
return self._max_sequence_length
def get_sequence_length_feature_key_name(self):
"""Get the key for the associated sequence length feature."""
return get_sequence_length_feature_key_name_from_feature_key_name(
self.get_feature_key_name())
class _TPUEmbeddingColumn(_TPUBaseEmbeddingColumn, fc._EmbeddingColumn):
"""Core Embedding Column."""
def __new__(cls,
categorical_column,
dimension,
combiner='mean',
layer_creator=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
max_sequence_length=0):
# Note, args ckpt_to_load_from, tensor_name_in_ckpt, max_norm and trainable
# are not supported on TPU. They are solely for matching the signature of
# __new__ of parent class fc._EmbeddingColumn.
return fc._EmbeddingColumn.__new__(
cls,
categorical_column,
dimension,
combiner=combiner,
layer_creator=layer_creator,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable)
def __init__(self,
categorical_column,
dimension,
combiner='mean',
layer_creator=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
max_sequence_length=0):
_TPUBaseEmbeddingColumn.__init__(self, categorical_column,
max_sequence_length=max_sequence_length)
self._key = None
def get_combiner(self):
return self.combiner
def get_embedding_table_size(self):
"""Returns num_ids and width."""
return (self.categorical_column._num_buckets, self.dimension)
def get_feature_key_name(self):
"""get_feature_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.categorical_column.name
return self.categorical_column.name
def get_weight_key_name(self):
"""get_weight_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.weight_feature_key
return None
def get_embedding_var_name(self):
"""get_embedding_var_name."""
return self.categorical_column.name
def get_initializer(self):
return self._tpu_initializer
def is_categorical_column_weighted(self):
"""Check if the categorical column of the embedding column is weighted."""
if isinstance(
self.categorical_column,
(
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
fc_lib.WeightedCategoricalColumn)):
return True
return False
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if tpu.under_tpu_inference_context():
def host_computation():
return fc._EmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc._EmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable)
# TPU mode
# Get the embeddings from the LazyBuilder.
tensor = inputs.get(self.get_feature_key_name())
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(self.get_embedding_var_name(),
'embedding_weights')
return tensor
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
if tpu.under_tpu_inference_context():
def host_computation():
return fc._EmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc._EmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable)
tensor = inputs.get(self.get_feature_key_name())
tensor_lengths = inputs.get(self.get_sequence_length_feature_key_name())
# inputs is a _LazyBuilder and for rank 1 tensors, it calls expand_dims(-1).
# We need to undo this to match the standard CPU sequence embedding.
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(self.get_embedding_var_name(),
'embedding_weights')
return fc._SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths)
class _TPUSharedEmbeddingColumn(_TPUBaseEmbeddingColumn,
fc._SharedEmbeddingColumn):
"""Core Shared Embedding Column."""
def __new__(cls,
categorical_column,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
max_sequence_length=0):
return fc._SharedEmbeddingColumn.__new__(
cls,
categorical_column,
dimension,
combiner=combiner,
initializer=initializer,
shared_embedding_collection_name=shared_embedding_collection_name,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable)
def __init__(self,
categorical_column,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
max_sequence_length=0):
_TPUBaseEmbeddingColumn.__init__(self, categorical_column,
max_sequence_length=max_sequence_length)
self._key = None
def get_combiner(self):
return self.combiner
def get_embedding_table_size(self):
"""Returns num_ids and width."""
return (self.categorical_column._num_buckets, self.dimension)
def get_feature_key_name(self):
"""get_feature_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.categorical_column.name
return self.categorical_column.name
def get_weight_key_name(self):
"""get_weight_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.weight_feature_key
return None
def get_embedding_var_name(self):
"""get_embedding_var_name."""
return self.shared_embedding_collection_name
def get_initializer(self):
return self.initializer
def is_categorical_column_weighted(self):
"""Check if the categorical column of the embedding column is weighted."""
if isinstance(
self.categorical_column,
(
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
fc_lib.WeightedCategoricalColumn)):
return True
return False
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if tpu.under_tpu_inference_context():
def host_computation():
return fc._SharedEmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc._SharedEmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable)
# TPU mode
# Get the embeddings from the LazyBuilder.
tensor = inputs.get(self.get_feature_key_name())
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(
self.get_embedding_var_name(),
'embedding_weights',
is_shared_embedding=True)
return tensor
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
if tpu.under_tpu_inference_context():
def host_computation():
return fc._SharedEmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc._SharedEmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable)
tensor = inputs.get(self.get_feature_key_name())
tensor_lengths = inputs.get(self.get_sequence_length_feature_key_name())
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(
self.get_embedding_var_name(),
'embedding_weights',
is_shared_embedding=True)
return fc._SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths)
def _record_variable_scope_and_name(embedding_var_name,
embedding_var_name_in_fc,
is_shared_embedding=False):
"""Add embedding variable name and scope to collection."""
g = ops.get_default_graph()
collection = g.get_collection_ref(_TPU_FC_TO_SCOPE)
if not collection:
collection.append({})
var_def_dict = collection[0]
captured_scope = variable_scope.get_variable_scope()
captured_scope_name = captured_scope.name
if embedding_var_name in var_def_dict:
if (var_def_dict[embedding_var_name][0] != captured_scope_name
and not is_shared_embedding):
raise ValueError(
'For embedding var name {}, the variable scope name is different, '
'got {}; expected {}'.format(embedding_var_name,
captured_scope_name,
var_def_dict[embedding_var_name][0]))
if var_def_dict[embedding_var_name][1] != embedding_var_name_in_fc:
raise ValueError(
'For embedding var name {}, the embedding name is different, '
'got {}; expected {}'.format(embedding_var_name,
embedding_var_name_in_fc,
var_def_dict[embedding_var_name][1]))
else:
var_def_dict[embedding_var_name] = (captured_scope_name,
embedding_var_name_in_fc)
def _is_running_on_cpu():
"""Returns True if the current context is CPU model."""
return tpu_function.get_tpu_context().number_of_shards is None
def get_sequence_length_feature_key_name_from_feature_key_name(feature_name):
"""Gets the name of the sequence length feature from that of the base feature.
Args:
feature_name: The feature key of a sequence column.
Returns:
A string which is the feature key for the associated feature length column.
"""
return feature_name + _SEQUENCE_FEATURE_LENGTH_POSTFIX
def split_sequence_columns(feature_columns):
"""Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns.
For use in a TPUEstimator model_fn function. E.g.
def model_fn(features):
sequence_columns, feature_columns = (
tf.tpu.feature_column.split_sequence_columns(feature_columns))
input = tf.feature_column.input_layer(
features=features, feature_columns=feature_columns)
sequence_features, sequence_lengths = (
tf.contrib.feature_column.sequence_input_layer(
features=features, feature_columns=sequence_columns))
Args:
feature_columns: A list of _TPUEmbeddingColumns to split.
Returns:
Two lists of _TPUEmbeddingColumns, the first is the sequence columns and the
second is the non-sequence columns.
"""
sequence_columns = []
non_sequence_columns = []
for column in feature_columns:
if not isinstance(column, (_TPUEmbeddingColumn, _TPUSharedEmbeddingColumn)):
raise TypeError(
'column must be a _TPUEmbeddingColumn or _TPUSharedEmbeddingColumn '
'but got %s instead.' % (type(column)))
if column.is_sequence_column():
sequence_columns.append(column)
else:
non_sequence_columns.append(column)
return sequence_columns, non_sequence_columns
|
tensorflow-master
|
tensorflow/python/tpu/feature_column.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for topology.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.tpu import topology
class TopologyTest(test.TestCase):
def testSerialization(self):
"""Tests if the class is able to generate serialized strings."""
original_topology = topology.Topology(
mesh_shape=[1, 1, 2],
device_coordinates=[[[0, 0, 0], [0, 0, 1]]],
)
serialized_str = original_topology.serialized()
new_topology = topology.Topology(serialized=serialized_str)
# Make sure the topology recovered from serialized str is same as the
# original topology.
self.assertAllEqual(
original_topology.mesh_shape, new_topology.mesh_shape)
self.assertAllEqual(
original_topology.device_coordinates, new_topology.device_coordinates)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/tpu/topology_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of Cloud TPU helper functions for data loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import functional_ops
def _TextLineDataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TextLineDataset(filename, buffer_size=buffer_size)
return dataset
def _TFRecordDataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
_FILETYPE_MAP = {
'tfrecord': _TFRecordDataset,
'textline': _TextLineDataset,
'text': _TextLineDataset,
}
def StreamingFilesDataset(files,
filetype=None,
file_reader_job=None,
worker_job=None,
num_epochs=None,
filename_shuffle_buffer_size=None,
num_parallel_reads=None,
batch_transfer_size=None,
sloppy=None):
"""StreamingFilesDataset constructs a dataset to stream from workers (GCE VM).
Because Cloud TPUs are allocated over the network, a Cloud TPU cannot read
files local to your GCE VM. In order to train using files stored on your local
VM (e.g. on local SSD for extreme performance), use the StreamingFilesDataset
helper to generate a dataset to feed your Cloud TPU with files from your GCE
VM.
The resulting dataset may return an OutOfRangeError if there are no files
found as a result of the fileglob expansion.
Note: StreamingFilesDataset assumes that the session is using a
TPUClusterResolver and has therefore a worker and a coordinator job. File
loading will be done on the coordinator job.
Args:
files: A string glob to match files, or a `tf.data.Dataset` generating file
names.
filetype: A string (one of 'tfrecord', or 'textline') or a single-argument
TensorFlow function that when given a filename returns a dataset.
file_reader_job: An optional string that corresponds to the job that should
perform the file reads.
worker_job: An optional string that corresponds to the job that should
process the tensors (i.e. your GPU or TPU worker).
num_epochs: The number of epochs through the training set that should be
generated. By default, it will repeat infinitely.
filename_shuffle_buffer_size: An optional integer whose value controls the
shuffling of the file names. If you would like to read from the files in
the same order, set to 0 or False.
num_parallel_reads: An optional integer controlling the number of files to
read from concurrently. (Set to 1 for no parallelism.)
batch_transfer_size: An optional integer controlling the batching used to
amortize the remote function invocation overhead. Set to a very large
number to increase throughput. Set to a very small number to reduce memory
consumption. Set to False to skip batching.
sloppy: (Optional.) If `False`, read input data while maintaining a
deterministic order. (This may have significant performance impacts.)
sloppy defaults to: True.
Returns:
A `tf.data.Dataset` with an infinite stream of elements generated by a
parallel interleaving of the set of files matched (or generated) by `files`
with a type is the output of the dataset specified by `filetype`.
Raises:
ValueError: if any argument is not of the expected type.
"""
if filetype is None:
filetype = 'tfrecord'
if isinstance(filetype, str):
if filetype not in _FILETYPE_MAP:
raise ValueError('Unexpected filetype: %s' % filetype)
reader_fn = _FILETYPE_MAP[filetype]
elif callable(filetype):
reader_fn = filetype
else:
raise ValueError('filetype should be a string or a callable')
file_reader_job = file_reader_job or 'coordinator'
worker_job = worker_job or 'worker'
if filename_shuffle_buffer_size is None:
filename_shuffle_buffer_size = 4096
num_parallel_reads = num_parallel_reads or 8
if batch_transfer_size is None:
batch_transfer_size = 256
if sloppy is None:
sloppy = True
if file_reader_job == 'coordinator':
file_reader_device = '/job:coordinator/task:0'
else:
file_reader_device = '/job:%s' % file_reader_job
with ops.device(file_reader_device):
if isinstance(files, str):
source_dataset = dataset_ops.Dataset.list_files(files)
elif isinstance(files, dataset_ops.DatasetV2):
source_dataset = files
else:
raise ValueError('files was not a string or a dataset: %s' % files)
if filename_shuffle_buffer_size:
source_dataset = source_dataset.shuffle(
buffer_size=filename_shuffle_buffer_size)
source_dataset = source_dataset.apply(
interleave_ops.parallel_interleave(
reader_fn, cycle_length=num_parallel_reads, sloppy=sloppy))
source_dataset = source_dataset.repeat(num_epochs)
if batch_transfer_size:
source_dataset = source_dataset.batch(batch_transfer_size)
source_dataset = source_dataset.prefetch(1)
source_iterator = dataset_ops.make_one_shot_iterator(source_dataset)
source_handle = source_iterator.string_handle()
@function.Defun(dtypes.string)
def LoadingFunc(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(source_dataset),
dataset_ops.get_legacy_output_shapes(source_dataset))
return remote_iterator.get_next()
def MapFn(unused_input):
source_dataset_output_types = dataset_ops.get_legacy_output_types(
source_dataset)
if isinstance(source_dataset_output_types, dtypes.DType):
output_types = [source_dataset_output_types]
elif isinstance(source_dataset_output_types, (list, tuple)):
output_types = source_dataset_output_types
else:
raise ValueError('source dataset has invalid output types')
remote_calls = functional_ops.remote_call(
args=[source_handle],
Tout=output_types,
f=LoadingFunc,
target='/job:%s/replica:0/task:0/cpu:0' % file_reader_job)
if len(remote_calls) == 1:
return remote_calls[0]
else:
return remote_calls
with ops.device('/job:%s' % worker_job):
output_dataset = dataset_ops.Dataset.range(2).repeat().map(
MapFn, num_parallel_calls=4 if sloppy else None)
output_dataset = output_dataset.prefetch(1)
if batch_transfer_size:
# Undo the batching used during the transfer.
output_dataset = output_dataset.apply(batching.unbatch()).prefetch(1)
return output_dataset
|
tensorflow-master
|
tensorflow/python/tpu/datasets.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Ops related to Tensor Processing Units."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/python/tpu/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU datasets tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.lib.io import python_io
from tensorflow.python.platform import test
from tensorflow.python.tpu import datasets
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
_NUM_FILES = 10
_NUM_ENTRIES = 20
class DatasetsTest(test.TestCase):
def setUp(self):
super(DatasetsTest, self).setUp()
self._coord = server_lib.Server.create_local_server()
self._worker = server_lib.Server.create_local_server()
self._cluster_def = cluster_pb2.ClusterDef()
worker_job = self._cluster_def.job.add()
worker_job.name = 'worker'
worker_job.tasks[0] = self._worker.target[len('grpc://'):]
coord_job = self._cluster_def.job.add()
coord_job.name = 'coordinator'
coord_job.tasks[0] = self._coord.target[len('grpc://'):]
session_config = config_pb2.ConfigProto(cluster_def=self._cluster_def)
self._sess = session.Session(self._worker.target, config=session_config)
self._worker_device = '/job:' + worker_job.name
def testTextLineDataset(self):
all_contents = []
for i in range(_NUM_FILES):
filename = os.path.join(self.get_temp_dir(), 'text_line.%d.txt' % i)
contents = []
for j in range(_NUM_ENTRIES):
contents.append(compat.as_bytes('%d: %d' % (i, j)))
with open(filename, 'wb') as f:
f.write(b'\n'.join(contents))
all_contents.extend(contents)
dataset = datasets.StreamingFilesDataset(
os.path.join(self.get_temp_dir(), 'text_line.*.txt'), filetype='text')
with ops.device(self._worker_device):
iterator = dataset_ops.make_initializable_iterator(dataset)
self._sess.run(iterator.initializer)
get_next = iterator.get_next()
retrieved_values = []
for _ in range(4 * len(all_contents)):
retrieved_values.append(compat.as_bytes(self._sess.run(get_next)))
self.assertEqual(set(all_contents), set(retrieved_values))
def testTFRecordDataset(self):
all_contents = []
for i in range(_NUM_FILES):
filename = os.path.join(self.get_temp_dir(), 'tf_record.%d' % i)
writer = python_io.TFRecordWriter(filename)
for j in range(_NUM_ENTRIES):
record = compat.as_bytes('Record %d of file %d' % (j, i))
writer.write(record)
all_contents.append(record)
writer.close()
dataset = datasets.StreamingFilesDataset(
os.path.join(self.get_temp_dir(), 'tf_record*'), filetype='tfrecord')
with ops.device(self._worker_device):
iterator = dataset_ops.make_initializable_iterator(dataset)
self._sess.run(iterator.initializer)
get_next = iterator.get_next()
retrieved_values = []
for _ in range(4 * len(all_contents)):
retrieved_values.append(compat.as_bytes(self._sess.run(get_next)))
self.assertEqual(set(all_contents), set(retrieved_values))
def testTFRecordDatasetFromDataset(self):
filenames = []
all_contents = []
for i in range(_NUM_FILES):
filename = os.path.join(self.get_temp_dir(), 'tf_record.%d' % i)
filenames.append(filename)
writer = python_io.TFRecordWriter(filename)
for j in range(_NUM_ENTRIES):
record = compat.as_bytes('Record %d of file %d' % (j, i))
writer.write(record)
all_contents.append(record)
writer.close()
filenames = dataset_ops.Dataset.from_tensor_slices(filenames)
dataset = datasets.StreamingFilesDataset(filenames, filetype='tfrecord')
with ops.device(self._worker_device):
iterator = dataset_ops.make_initializable_iterator(dataset)
self._sess.run(iterator.initializer)
get_next = iterator.get_next()
retrieved_values = []
for _ in range(4 * len(all_contents)):
retrieved_values.append(compat.as_bytes(self._sess.run(get_next)))
self.assertEqual(set(all_contents), set(retrieved_values))
def testArbitraryReaderFunc(self):
def MakeRecord(i, j):
return compat.as_bytes('%04d-%04d' % (i, j))
record_bytes = len(MakeRecord(10, 200))
all_contents = []
for i in range(_NUM_FILES):
filename = os.path.join(self.get_temp_dir(), 'fixed_length.%d' % i)
with open(filename, 'wb') as f:
for j in range(_NUM_ENTRIES):
record = MakeRecord(i, j)
f.write(record)
all_contents.append(record)
def FixedLengthFile(filename):
return readers.FixedLengthRecordDataset(filename, record_bytes)
dataset = datasets.StreamingFilesDataset(
os.path.join(self.get_temp_dir(), 'fixed_length*'),
filetype=FixedLengthFile)
with ops.device(self._worker_device):
iterator = dataset_ops.make_initializable_iterator(dataset)
self._sess.run(iterator.initializer)
get_next = iterator.get_next()
retrieved_values = []
for _ in range(4 * len(all_contents)):
retrieved_values.append(compat.as_bytes(self._sess.run(get_next)))
self.assertEqual(set(all_contents), set(retrieved_values))
def testArbitraryReaderFuncFromDatasetGenerator(self):
def my_generator():
yield (1, [1] * 10)
def gen_dataset(dummy):
return dataset_ops.Dataset.from_generator(
my_generator, (dtypes.int64, dtypes.int64),
(tensor_shape.TensorShape([]), tensor_shape.TensorShape([10])))
dataset = datasets.StreamingFilesDataset(
dataset_ops.Dataset.range(10), filetype=gen_dataset)
with ops.device(self._worker_device):
iterator = dataset_ops.make_initializable_iterator(dataset)
self._sess.run(iterator.initializer)
get_next = iterator.get_next()
retrieved_values = self._sess.run(get_next)
self.assertIsInstance(retrieved_values, (list, tuple))
self.assertEqual(len(retrieved_values), 2)
self.assertEqual(retrieved_values[0], 1)
self.assertItemsEqual(retrieved_values[1], [1] * 10)
def testUnexpectedFiletypeString(self):
with self.assertRaises(ValueError):
datasets.StreamingFilesDataset(
os.path.join(self.get_temp_dir(), '*'), filetype='foo')
def testUnexpectedFiletypeType(self):
with self.assertRaises(ValueError):
datasets.StreamingFilesDataset(
os.path.join(self.get_temp_dir(), '*'), filetype=3)
def testUnexpectedFilesType(self):
with self.assertRaises(ValueError):
datasets.StreamingFilesDataset(123, filetype='tfrecord')
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/tpu/datasets_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Helper library for functions used during TPU compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
class TpuContext(object):
"""A context object holding state about the TPU computation being built."""
def __init__(self):
"""Creates a new TpuContext."""
self._number_of_shards = None
@property
def number_of_shards(self):
return self._number_of_shards
def set_number_of_shards(self, number_of_shards):
self._number_of_shards = number_of_shards
# The Tpu context holds the number of shards when a sharded computation is
# being built, or None if no computation is being built.
_current_tpu_context = TpuContext()
@contextlib.contextmanager
def tpu_shard_context(number_of_shards):
if _current_tpu_context.number_of_shards is not None:
raise NotImplementedError("tpu_shard_context cannot be nested.")
try:
_current_tpu_context.set_number_of_shards(number_of_shards)
yield
finally:
_current_tpu_context.set_number_of_shards(None)
def get_tpu_context():
return _current_tpu_context
# Decorator function for tpu computation func that was passed to tpu.rewrite()
# if there is an embedded training loop in this func, trace tools will generate
# step markers for each iteration.
def on_device_training_loop(func):
# Value for this attribute is from xla.DebugOptions.StepMarkerLocation.
setattr(func, "step_marker_location", "STEP_MARK_AT_TOP_LEVEL_WHILE_LOOP")
return func
|
tensorflow-master
|
tensorflow/python/tpu/tpu_function.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Library for constructing a training loop, suitable for TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compiler.xla import xla
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.tpu import tensor_tracer
from tensorflow.python.tpu import tpu_function
def while_loop(condition, body, inputs=None, infeed_queue=None, name=None):
"""Builds a training loop for TPUs.
The set of loop-carried tensors corresponds to `inputs`. Both
`condition` and `body` take the current value of the loop-carried
tensors. 'body' additionally takes a tuple of infeed from
infeed_queue if infeed_queue is not None. `condition` must return a
single boolean value that determines whether iteration
continues. `body` must return an updated list of values for the
loop-carried tensors.
Args:
condition: a Python function that builds the loop condition.
body: a Python function that builds the loop body.
inputs: a list of initial values passed into the training loop, or
None (equivalent to an empty list).
infeed_queue: if not None, the infeed queue from which to append a tuple
of arguments as inputs to condition.
name: (Deprecated) Does nothing.
Returns:
The final values of the loop-carried tensors.
Raises:
TypeError: if body or condition has the wrong signature.
"""
del name
# Converts inputs to Tensors.
inputs = [] if inputs is None else [ops.convert_to_tensor(x) for
x in inputs]
input_types = [x.dtype for x in inputs]
input_arity = len(inputs)
body_arg_error = xla.check_function_argument_count(
body, input_arity, infeed_queue)
if body_arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied loop body function cannot be called with the specified "
"inputs. You specified %d inputs: %s, but the loop body needs %s" % (
input_arity, str([i.name for i in inputs]), body_arg_error))
else:
raise TypeError(
"Supplied loop body function cannot be called with the specified "
"inputs. You specified %d inputs: %s and %d additional inputs from "
"infeed, but the computation needs %s" % (input_arity, str(
[i.name for i in inputs]), infeed_queue.number_of_tuple_elements,
body_arg_error))
condition_arg_error = xla.check_function_argument_count(
condition, input_arity, None)
if condition_arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied loop condition function cannot be called with the "
"specified inputs. You specified %d inputs: %s, but the loop "
"condition needs %s" % (input_arity, str([i.name for i in inputs]),
condition_arg_error))
else:
raise TypeError(
"Supplied loop condition function cannot be called with the "
"specified inputs. You specified %d inputs: %s, but the loop "
"condition needs %s. Note that infeed is not passed to the loop "
"condition." % (input_arity, str([i.name for i in inputs]),
condition_arg_error))
def condition_wrapper(*inputs):
# Discards the dummy output added for arity-0 loops.
if input_arity == 0:
inputs = []
return condition(*inputs)
def body_wrapper(*inputs):
"""Wrapper around `body` that handles infeed queues and control deps."""
inputs = list(inputs)
# Discards the dummy output added for arity-0 loops.
if input_arity == 0:
inputs = []
# Runs `body` with the dequeue_ops appended.
if infeed_queue:
number_of_shards = tpu_function.get_tpu_context().number_of_shards
if number_of_shards is None:
raise ValueError("Can't build training loop with infeed when there is "
"no tpu_shard_context. Are you building a loop or "
"graph directly rather than from inside tpu.rewrite, "
"tpu.batch_parallel, tpu.shard, or tpu.replicate?")
infeed_queue.set_number_of_shards(number_of_shards)
dequeue_ops = [d for d in infeed_queue.generate_dequeue_op()]
else:
dequeue_ops = []
outputs = body(*(inputs + dequeue_ops))
# If the computation only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs
if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU training loop body must return zero or more Tensor values "
"followed by zero or more Operations.")
output_types = [op.dtype for op in output_tensors]
if input_types != output_types:
raise TypeError(
"Mismatch between input types and output types for training loop "
"body: {} vs {}".format(input_types, output_types))
# Add the dequeue operations to output_operations to ensure they are run
# by the loop, even if the programmer's loop body does not use them.
output_operations += dequeue_ops
# Add a dummy output, if needed.
if not output_tensors:
output_tensors = array_ops.constant(0)
if output_operations:
# TODO(phawkins): in principle this is too restrictive since it serializes
# the training loop steps. In practice it does not matter since this loop
# will be compiled by XLA.
output_tensors = control_flow_ops.tuple(output_tensors,
control_inputs=output_operations)
if tensor_tracer.TensorTracer.is_enabled():
num_replicas = tpu_function.get_tpu_context().number_of_shards
if num_replicas is None:
num_replicas = 1
tt = tensor_tracer.TensorTracer()
output_tensors = tt.trace_tpu(ops.get_default_graph(),
output_tensors, None,
num_replicas)
return output_tensors
# If the body has arity 0, add a dummy loop-carried value to which we can add
# control dependencies from any side-effecting operations.
if input_arity == 0:
inputs = [array_ops.constant(0)]
return control_flow_ops.while_loop(
condition_wrapper, body_wrapper, inputs, name="", parallel_iterations=1)
def repeat(n, body, inputs=None, infeed_queue=None, name=None):
"""Builds a training loop that executes a fixed number of iterations.
The set of loop-carried tensors correspond to `inputs`.
`body` must be a function that takes and returns the values of the
loop-carried tensors.
Args:
n: the number of loop iterations
body: a Python function that builds the loop body.
inputs: a list of initial values passed into the training loop or
None (equivalent to an empty list).
infeed_queue: if not None, the infeed queue from which to append a tuple
of arguments as inputs to condition.
name: (Deprecated) Does nothing.
Returns:
The final values of the loop-carried tensors.
Raises:
ValueError: if there is a type error.
"""
def _convert_to_list(xs):
if not isinstance(xs, (list, tuple)):
return [xs]
else:
return list(xs)
def cond(i, *args):
del args
return i < n
def body_wrapper(i, *args):
return [i + 1] + _convert_to_list(body(*args))
inputs = [0] if inputs is None else [0] + _convert_to_list(inputs)
outputs = while_loop(
cond, body_wrapper, inputs=inputs, infeed_queue=infeed_queue, name=name)
outputs = _convert_to_list(outputs)
if len(outputs) == 1:
# Returns the Op rather than an empty list.
return outputs[0].op
else:
return outputs[1:]
|
tensorflow-master
|
tensorflow/python/tpu/training_loop.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tpu_function helpers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
from tensorflow.python.tpu import tpu_sharding
class ShardingTest(test.TestCase):
def testFreeze(self):
"""Tests that freezing a policy applies default values."""
p1 = tpu_sharding.ShardingPolicy()
p1.freeze()
self.assertEqual(p1.number_of_shards,
tpu_sharding._DEFAULT_NUMBER_OF_SHARDS)
self.assertEqual(p1.shard_dimension, tpu_sharding._DEFAULT_SHARD_DIMENSION)
p2 = tpu_sharding.ShardingPolicy()
p2.set_number_of_shards(17)
p2.set_shard_dimension(23)
p2.freeze()
self.assertEqual(p2.number_of_shards, 17)
self.assertEqual(p2.shard_dimension, 23)
def testFrozen(self):
"""Tests that frozen policies can't be changed."""
p1 = tpu_sharding.ShardingPolicy()
p1.freeze()
with self.assertRaises(ValueError):
p1.set_number_of_shards(17)
with self.assertRaises(ValueError):
p1.set_shard_dimension(22)
def testStr(self):
"""Tests the string representation."""
p1 = tpu_sharding.ShardingPolicy()
self.assertEqual(str(p1), "ShardingPolicy(unset)")
p1.set_number_of_shards(17)
self.assertEqual(str(p1), "ShardingPolicy(unset)")
p1.set_shard_dimension(8)
self.assertEqual(str(p1), "ShardingPolicy(17 shards dimension 8)")
def testMerge(self):
"""Tests that merging works."""
p1 = tpu_sharding.ShardingPolicy()
p1.set_number_of_shards(17)
p1.set_shard_dimension(23)
p2 = tpu_sharding.ShardingPolicy()
p2.merge(p1)
self.assertEqual(p2.number_of_shards, 17)
self.assertEqual(p2.shard_dimension, 23)
p1 = tpu_sharding.ShardingPolicy()
p1.set_shard_dimension(12)
p2.merge(p1)
self.assertEqual(p2.number_of_shards, 17)
self.assertEqual(p2.shard_dimension, 12)
p2.freeze()
p2.merge(p1)
self.assertEqual(p2.number_of_shards, 17)
self.assertEqual(p2.shard_dimension, 12)
p1.set_number_of_shards(1)
with self.assertRaises(ValueError):
p2.merge(p1)
p1 = tpu_sharding.ShardingPolicy()
p1.set_number_of_shards(17)
p2.merge(p1)
p1.set_shard_dimension(2)
with self.assertRaises(ValueError):
p2.merge(p1)
def testGetShardedShape(self):
"""Tests getting a sharded shape."""
p = tpu_sharding.ShardingPolicy()
p.set_number_of_shards(3)
p.set_shard_dimension(1)
self.assertEqual(p.get_sharded_shape([4, 9]), [4, 3])
p.freeze()
with self.assertRaises(ValueError):
p.set_shard_dimension(0)
with self.assertRaises(ValueError):
_ = p.get_sharded_shape([4, 9], shard_index=4)
with self.assertRaises(ValueError):
_ = p.get_sharded_shape([4, 9], shard_index=-1)
with self.assertRaises(TypeError):
_ = p.get_sharded_shape("not_a_shape")
with self.assertRaises(ValueError):
_ = p.get_sharded_shape(tensor_shape.TensorShape(None))
with self.assertRaises(ValueError):
_ = p.get_sharded_shape([4, 10], shard_index=-1)
def testGetUnshardedShape(self):
"""Tests getting an unsharded shape."""
p = tpu_sharding.ShardingPolicy()
p.set_number_of_shards(2)
p.set_shard_dimension(1)
self.assertEqual(p.get_unsharded_shape([[4, 3], [4, 3]]), [4, 6])
with self.assertRaises(ValueError):
_ = p.get_unsharded_shape([[4, 3]])
with self.assertRaises(ValueError):
_ = p.get_unsharded_shape([[4, 3], [4, 3], [4, 3]])
with self.assertRaises(ValueError):
_ = p.get_unsharded_shape([[4, 3], [4, 2]])
with self.assertRaises(TypeError):
_ = p.get_unsharded_shape([[4, 3], "not_a_shape"])
with self.assertRaises(ValueError):
_ = p.get_unsharded_shape([None, [4, 3]])
with self.assertRaises(ValueError):
_ = p.get_unsharded_shape([[2], [4, 3]])
def testScalar(self):
"""Tests sharding and unsharding scalars."""
p = tpu_sharding.ShardingPolicy()
p.freeze()
self.assertEqual(p.get_sharded_shape([]), [])
self.assertEqual(p.get_unsharded_shape([[]]), [])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/tpu/tpu_sharding_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow_estimator.python.estimator.tpu.tpu_config import *
# pylint: enable=wildcard-import,unused-import
|
tensorflow-master
|
tensorflow/python/tpu/tpu_config.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for bfloat16 helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.tpu import bfloat16
class BFloat16ScopeTest(test.TestCase):
def testScopeName(self):
"""Test if name for the variable scope is propogated correctly.
"""
with bfloat16.bfloat16_scope() as bf:
self.assertEqual(bf.name, "")
@test_util.run_deprecated_v1
def testRequestedDType(self):
"""Test if requested dtype is honored in the getter.
"""
with bfloat16.bfloat16_scope() as scope:
v1 = variable_scope.get_variable("v1", [])
self.assertEqual(v1.dtype.base_dtype, dtypes.float32)
v2 = variable_scope.get_variable("v2", [], dtype=dtypes.bfloat16)
self.assertEqual(v2.dtype.base_dtype, dtypes.bfloat16)
self.assertEqual([dtypes.float32, dtypes.float32],
[v.dtype.base_dtype for v in scope.global_variables()])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/tpu/bfloat16_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow_estimator.python.estimator.tpu.error_handling import *
# pylint: enable=wildcard-import,unused-import
|
tensorflow-master
|
tensorflow/python/tpu/error_handling.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPU Feature Column Library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.feature_column import feature_column_lib as fc_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu.feature_column import _is_running_on_cpu
from tensorflow.python.tpu.feature_column import _record_variable_scope_and_name
from tensorflow.python.tpu.feature_column import _SUPPORTED_CATEGORICAL_COLUMNS_V2
from tensorflow.python.tpu.feature_column import _TPUBaseEmbeddingColumn
# pylint: disable=protected-access
def embedding_column_v2(categorical_column,
dimension,
combiner='mean',
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
max_sequence_length=0):
"""TPU embedding_column for `tf.feature_column.embedding_column`.
Note that the interface for TPU embedding_column is different from the non-TPU
version. The following args available for the non-TPU version are NOT
supported: ckpt_to_load_from, tensor_name_in_ckp, max_norm and trainable.
Args:
categorical_column: A categorical_column returned from
categorical_column_with_identity, weighted_categorical_column,
categorical_column_with_vocabulary_file,
categorical_column_with_vocabulary_list,
sequence_categorical_column_with_identity,
sequence_categorical_column_with_vocabulary_file,
sequence_categorical_column_with_vocabulary_list
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row for a non-sequence column. For more information, see
`tf.feature_column.embedding_column`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
ckpt_to_load_from: Argument not used for TPU.
tensor_name_in_ckpt: Argument not used for TPU.
max_norm: Argument not used for TPU.
trainable: Argument not used for TPU.
max_sequence_length: An non-negative integer specifying the max sequence
length. Any sequence shorter then this will be padded with 0 embeddings
and any sequence longer will be truncated. This must be positive for
sequence features and 0 for non-sequence features.
Returns:
A _TPUEmbeddingColumnV2.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if `initializer` is specified but not callable.
"""
if not (ckpt_to_load_from is None and tensor_name_in_ckpt is None):
raise ValueError('ckpt_to_load_from, tensor_name_in_ckpt are not '
'supported for TPU Embeddings. To load a embedding '
'table from a different checkpoint, use a scaffold_fn '
'and tf.train.init_from_checkpoint.')
if max_norm is not None:
raise ValueError('max_norm is not support for TPU Embeddings.')
if not trainable:
raise ValueError('TPU Embeddings do not support non-trainable weights.')
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS_V2):
raise TypeError(
'categorical_column for tpu '
' embedding_column must be type %s, got %s.' % (' or '.join([
cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS_V2
]), type(categorical_column)))
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. '
'Embedding of column_name: {}'.format(
categorical_column.name))
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
column = _TPUEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
max_sequence_length=max_sequence_length)
return column
def shared_embedding_columns_v2(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
max_sequence_lengths=None):
"""List of dense columns that convert from sparse, categorical input.
Note that the interface for TPU embedding_column is different from the non-TPU
version. The following args available for the non-TPU version are NOT
supported: ckpt_to_load_from, tensor_name_in_ckp, max_norm and trainable.
Args:
categorical_columns: A list of categorical_columns returned from
categorical_column_with_identity, weighted_categorical_column,
categorical_column_with_vocabulary_file,
categorical_column_with_vocabulary_list,
sequence_categorical_column_with_identity,
sequence_categorical_column_with_vocabulary_file,
sequence_categorical_column_with_vocabulary_list
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row for a non-sequence column. For more information, see
`tf.feature_column.embedding_column`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
shared_embedding_collection_name: Optional name of the collection where
shared embedding weights are added. If not given, a reasonable name will
be chosen based on the names of `categorical_columns`. This is also used
in `variable_scope` when creating shared embedding weights.
ckpt_to_load_from: Argument not used for TPU.
tensor_name_in_ckpt: Argument not used for TPU.
max_norm: Argument not used for TPU.
trainable: Argument not used for TPU.
max_sequence_lengths: An list of non-negative integers, either None or
empty or the same length as the argument categorical_columns. Entries
corresponding to non-sequence columns must be 0 and entries corresponding
to sequence columns specify the max sequence length for the column. Any
sequence shorter then this will be padded with 0 embeddings and any
sequence longer will be truncated.
Returns:
A _TPUSharedEmbeddingColumnV2.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if `initializer` is specified but not callable.
ValueError: if `max_sequence_lengths` is specified and not the same length
as `categorical_columns`.
ValueError: if `max_sequence_lengths` is positive for a non sequence column
or 0 for a sequence column.
"""
if not (ckpt_to_load_from is None and tensor_name_in_ckpt is None):
raise ValueError('ckpt_to_load_from, tensor_name_in_ckpt are not '
'supported for TPU Embeddings. To load a embedding '
'table from a different checkpoint, use a scaffold_fn '
'and tf.train.init_from_checkpoint.')
if max_norm is not None:
raise ValueError('max_norm is not support for TPU Embeddings.')
if not trainable:
raise ValueError('TPU Embeddings do not support non-trainable weights.')
for categorical_column in categorical_columns:
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS_V2):
raise TypeError(
'categorical_column for tpu '
' shared_embedding_columns must be type %s, got %s.' % (' or '.join([
cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS_V2
]), type(categorical_column)))
if not max_sequence_lengths:
max_sequence_lengths = [0] * len(categorical_columns)
if len(max_sequence_lengths) != len(categorical_columns):
raise ValueError('max_sequence_lengths and categorical_columns must be of '
'the same length. len(max_sequence_lengths)={} '
'len(categorical_columns)={}.'.format(
len(max_sequence_lengths), len(categorical_columns)))
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. ')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
num_buckets = sorted_columns[0]._num_buckets # pylint: disable=protected-access
for c in sorted_columns[1:]:
if num_buckets != c._num_buckets: # pylint: disable=protected-access
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
sorted_columns[0], num_buckets, c, c._num_buckets)) # pylint: disable=protected-access
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
tpu_columns = []
column_creator = fc_lib.SharedEmbeddingColumnCreator(
dimension, initializer, ckpt_to_load_from, tensor_name_in_ckpt,
num_buckets, trainable, shared_embedding_collection_name)
# Create the state (_SharedEmbeddingColumnLayer) here.
for categorical_column, max_sequence_length in zip(
categorical_columns, max_sequence_lengths):
column = _TPUSharedEmbeddingColumnV2(
categorical_column=categorical_column,
shared_embedding_column_creator=column_creator,
combiner=combiner,
initializer=initializer,
shared_embedding_collection_name=shared_embedding_collection_name,
max_sequence_length=max_sequence_length)
tpu_columns.append(column)
return tpu_columns
class _TPUEmbeddingColumnV2(_TPUBaseEmbeddingColumn, fc_lib.EmbeddingColumn):
"""Core Embedding Column."""
def __new__(cls,
categorical_column,
dimension,
combiner='mean',
initializer=None,
max_sequence_length=0):
return fc_lib.EmbeddingColumn.__new__(
cls,
categorical_column,
dimension,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True)
def __init__(self,
categorical_column,
dimension,
combiner='mean',
initializer=None,
max_sequence_length=0):
_TPUBaseEmbeddingColumn.__init__(self, categorical_column,
max_sequence_length=max_sequence_length)
self._key = None
def get_combiner(self):
return self.combiner
def get_embedding_table_size(self):
"""Returns num_ids and width."""
return (self.categorical_column._num_buckets, self.dimension)
def get_feature_key_name(self):
"""get_feature_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.categorical_column.name
return self.categorical_column.name
def get_weight_key_name(self):
"""get_weight_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.weight_feature_key
return None
def get_embedding_var_name(self):
"""get_embedding_var_name."""
return self.categorical_column.name
def get_initializer(self):
return self.initializer
def is_categorical_column_weighted(self):
"""Check if the categorical column of the embedding column is weighted."""
if isinstance(
self.categorical_column,
(
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
fc_lib.WeightedCategoricalColumn)):
return True
return False
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable)
# TPU mode
# Get the embeddings from the LazyBuilder.
tensor = inputs.get(self.get_feature_key_name())
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(self.get_embedding_var_name(),
'embedding_weights')
return tensor
def create_state(self, state_manager):
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn.create_state(
self, state_manager)
# Create state is called for the EmbeddingColumn to create its embedding
# variables under feature column V2, if we are on TPU so record the scope
# here.
_record_variable_scope_and_name(self.get_embedding_var_name(),
'embedding_weights')
def get_dense_tensor(self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn.get_dense_tensor(
self, transformation_cache, state_manager)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn.get_dense_tensor(
self, transformation_cache, state_manager)
# TPU mode
# Get the embeddings from the FeatureTransformationCache.
tensor = transformation_cache.get(self.get_feature_key_name(),
state_manager)
return tensor
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable)
tensor = inputs.get(self.get_feature_key_name())
tensor_lengths = inputs.get(self.get_sequence_length_feature_key_name())
# inputs is a _LazyBuilder and for rank 1 tensors, it calls expand_dims(-1).
# We need to undo this to match the standard CPU sequence embedding.
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(self.get_embedding_var_name(),
'embedding_weights')
return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager)
tensor = transformation_cache.get(self.get_feature_key_name(),
state_manager)
tensor_lengths = transformation_cache.get(
self.get_sequence_length_feature_key_name(),
state_manager)
# FeatureTransformationCache expands rank 1 tensors (like sequence length)
# to rank 2. We need to undo this to match the standard CPU sequence
# embedding.
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths)
class _TPUSharedEmbeddingColumnV2(_TPUBaseEmbeddingColumn,
fc_lib.SharedEmbeddingColumn):
"""Core Shared Embedding Column."""
def __new__(cls,
categorical_column,
shared_embedding_column_creator,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
max_sequence_length=0):
return fc_lib.SharedEmbeddingColumn.__new__(
cls,
categorical_column,
combiner=combiner,
shared_embedding_column_creator=shared_embedding_column_creator,
max_norm=None)
def __init__(self,
categorical_column,
shared_embedding_column_creator,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
max_sequence_length=0):
_TPUBaseEmbeddingColumn.__init__(self, categorical_column,
max_sequence_length=max_sequence_length)
self._initializer = initializer
self._shared_embedding_collection_name = shared_embedding_collection_name
def get_combiner(self):
return self.combiner
def get_embedding_table_size(self):
"""Returns num_ids and width."""
return (self.categorical_column._num_buckets,
self.shared_embedding_column_creator.dimension)
def get_feature_key_name(self):
"""get_feature_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.categorical_column.name
return self.categorical_column.name
def get_weight_key_name(self):
"""get_weight_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.weight_feature_key
return None
def get_embedding_var_name(self):
"""get_embedding_var_name."""
return self._shared_embedding_collection_name
def get_initializer(self):
return self._initializer
def is_categorical_column_weighted(self):
"""Check if the categorical column of the embedding column is weighted."""
if isinstance(
self.categorical_column,
(
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
fc_lib.WeightedCategoricalColumn)):
return True
return False
def _get_dense_tensor_internal(
self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal(
self, transformation_cache, state_manager)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal(
self, transformation_cache, state_manager)
# TPU mode
# Get the embeddings from the FeatureTransformationCache.
tensor = transformation_cache.get(self.get_feature_key_name(),
state_manager)
# Add to collection for _create_tpu_embedding_variables_and_ops
# Note that in Feature Column V2, shared embeddings have no scope.
_record_variable_scope_and_name(
self.get_embedding_var_name(),
'embedding_weights',
is_shared_embedding=True)
return tensor
def get_sequence_dense_tensor(
self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.SharedEmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.SharedEmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager)
tensor = fc_lib.SharedEmbeddingColumn._dense_tensor_internal(
self, transformation_cache, state_manager)
tensor_lengths = transformation_cache.get(
self.get_sequence_length_feature_key_name(),
state_manager)
# FeatureTransformationCache expands rank 1 tensors (like sequence length)
# to rank 2. We need to undo this to match the standard CPU sequence
# embedding.
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths)
def split_sequence_columns_v2(feature_columns):
"""Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns.
For use in a TPUEstimator model_fn function. E.g.
def model_fn(features):
sequence_columns, feature_columns = (
tf.tpu.feature_column.split_sequence_columns(feature_columns))
input = tf.feature_column.input_layer(
features=features, feature_columns=feature_columns)
sequence_features, sequence_lengths = (
tf.contrib.feature_column.sequence_input_layer(
features=features, feature_columns=sequence_columns))
Args:
feature_columns: A list of _TPUEmbeddingColumns to split.
Returns:
Two lists of _TPUEmbeddingColumns, the first is the sequence columns and the
second is the non-sequence columns.
"""
sequence_columns = []
non_sequence_columns = []
for column in feature_columns:
if not isinstance(column, (_TPUEmbeddingColumnV2,
_TPUSharedEmbeddingColumnV2)):
raise TypeError(
'column must be a _TPUEmbeddingColumnV2 or '
'_TPUSharedEmbeddingColumnV2 but got %s instead.' % (type(column)))
if column.is_sequence_column():
sequence_columns.append(column)
else:
non_sequence_columns.append(column)
return sequence_columns, non_sequence_columns
|
tensorflow-master
|
tensorflow/python/tpu/feature_column_v2.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Optimizer that implements cross-shard gradient reduction for TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import optimizer
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["tpu.CrossShardOptimizer"])
class CrossShardOptimizer(optimizer.Optimizer):
"""An optimizer that averages gradients across TPU shards."""
def __init__(self,
opt,
reduction=losses.Reduction.MEAN,
name="CrossShardOptimizer",
group_assignment=None):
"""Construct a new cross-shard optimizer.
Args:
opt: An existing `Optimizer` to encapsulate.
reduction: The reduction to apply to the shard losses.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "CrossShardOptimizer".
group_assignment: Optional 2d int32 lists with shape
[num_groups, num_replicas_per_group] which describles how to apply
optimizer to subgroups.
Raises:
ValueError: If reduction is not a valid cross-shard reduction.
"""
if reduction not in (losses.Reduction.SUM, losses.Reduction.MEAN):
raise ValueError("Unsupported reduction: %s." % reduction)
if isinstance(opt, optimizer_v2.OptimizerV2):
raise TypeError(
"CrossShardOptimizer does not work with OptimizerV2. If you are "
"using TPUStrategy, OptimizerV2 will sum gradients across replicas."
"If you are using TPUEstimator, you may instead sum your gradients "
"with: grads = [tf.compat.v1.tpu.cross_replica_sum(g) for g in grads]"
". If you want to average your gradients, rescale your loss with: "
"loss /= global_batch_size")
super(CrossShardOptimizer, self).__init__(False, name)
self._opt = opt
self._reduction = reduction
self._group_assignment = group_assignment
def _verify_and_get_subgroup_size(self, group_assignment, num_shards):
"""Verify group_assignment and get the subgroup size".
Args:
group_assignment: list of group ids for applying the optimizer
to subgroups.
num_shards: The number of TPU shards.
Returns:
The size of one subgroup in group_assignment.
Raises:
ValueError: If group_assignment is invalid.
"""
if not group_assignment:
return None
if not (isinstance(group_assignment, list) and
all(isinstance(i, list) for i in group_assignment)):
raise ValueError("group_assignment must be a list of list. Got {}".format(
group_assignment))
replica_ids = set()
for g in group_assignment:
for i in g:
replica_ids.add(i)
if set(range(num_shards)) != replica_ids:
raise ValueError("group_assignment must be a permutation of range({0})."
" Got group_assignment={1}".format(
num_shards, group_assignment))
subgroup_size_list = [len(group) for group in group_assignment]
if all(subgroup_size_list[0] == size for size in subgroup_size_list):
return subgroup_size_list[0]
else:
raise ValueError("The size of each subgroup in group_assignment must "
"be equal. Got group_assignment={}".format(
self._group_assignment))
def compute_gradients(self, loss, var_list=None, **kwargs):
"""Compute gradients of "loss" for the variables in "var_list".
This simply wraps the compute_gradients() from the real optimizer. The
gradients will be aggregated in the apply_gradients() so that user can
modify the gradients like clipping with per replica global norm if needed.
The global norm with aggregated gradients can be bad as one replica's huge
gradients can hurt the gradients from other replicas.
Args:
loss: A Tensor containing the value to minimize.
var_list: Optional list or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKey.TRAINABLE_VARIABLES`.
**kwargs: Keyword arguments for compute_gradients().
Returns:
A list of (gradient, variable) pairs.
Raises:
ValueError: If not within a tpu_shard_context or group_assignment is
invalid.
"""
num_shards = tpu_function.get_tpu_context().number_of_shards
if num_shards is None:
logging.warning(
"CrossShardOptimizer should be used within a tpu_shard_context, but "
"got unset number_of_shards. Assuming 1.")
num_shards = 1
subgroup_size = self._verify_and_get_subgroup_size(self._group_assignment,
num_shards)
if num_shards > 1 and self._reduction == losses.Reduction.MEAN:
if self._group_assignment:
scale = 1.0 / subgroup_size
else:
scale = 1.0 / num_shards
loss *= scale
return self._opt.compute_gradients(loss, var_list=var_list, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
Calls tpu_ops.cross_replica_sum() to sum gradient contributions across
replicas, and then applies the real optimizer.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
An `Operation` that applies the gradients. If `global_step` was not None,
that operation also increments `global_step`.
Raises:
ValueError: If the grads_and_vars is malformed.
"""
summed_grads_and_vars = []
for (grad, var) in grads_and_vars:
if grad is None:
summed_grads_and_vars.append((grad, var))
else:
with ops.colocate_with(grad):
summed_grads_and_vars.append((tpu_ops.cross_replica_sum(
grad, self._group_assignment), var))
return self._opt.apply_gradients(summed_grads_and_vars, global_step, name)
def get_slot(self, *args, **kwargs):
"""Return a slot named "name" created for "var" by the Optimizer.
This simply wraps the get_slot() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
return self._opt.get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
"""Return a list of the names of slots created by the `Optimizer`.
This simply wraps the get_slot_names() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
A list of strings.
"""
return self._opt.get_slot_names(*args, **kwargs)
def variables(self):
"""Forwarding the variables from the underlying optimizer."""
return self._opt.variables()
|
tensorflow-master
|
tensorflow/python/tpu/tpu_optimizer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Functional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.tpu.ops import tpu_ops
TPUPartitionedCall = tpu_ops.tpu_partitioned_call # pylint: disable=invalid-name
|
tensorflow-master
|
tensorflow/python/tpu/functional.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Helper library for sharding during TPU compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import tensor_shape
_DEFAULT_NUMBER_OF_SHARDS = 1
_DEFAULT_SHARD_DIMENSION = 0
# TODO(b/36777903) change other parts of tpu.py to use this class.
class ShardingPolicy(object):
"""An object use to hold the sharding policy for a Tensor.
"""
def __init__(self):
self._number_of_shards = None
self._shard_dimension = None
self._frozen = False
def __str__(self):
if self.number_of_shards is None or self.shard_dimension is None:
return "ShardingPolicy(unset)"
else:
return ("ShardingPolicy(%d shards dimension %d)" %
(self.number_of_shards, self.shard_dimension))
def _fill_default_values(self):
if self._number_of_shards is None:
self._number_of_shards = _DEFAULT_NUMBER_OF_SHARDS
if self._shard_dimension is None:
self._shard_dimension = tensor_shape.as_dimension(
_DEFAULT_SHARD_DIMENSION)
def freeze(self):
"""Prevents further modification to the sharding policy.
Any values that have not been set when freeze is called are set to
defaults. If the ShardingPolicy is already frozen, this is a NoOp.
"""
if not self._frozen:
self._fill_default_values()
self._frozen = True
@property
def number_of_shards(self):
"""Returns the number of shards in the policy or None if unspecified."""
return self._number_of_shards
def set_number_of_shards(self, number_of_shards):
"""Sets the number of shards for the current policy.
If the policy has been frozen then number_of_shards must match the
existing setting.
Args:
number_of_shards: The number of shards to use in the policy.
Raises:
ValueError: If the policy has been frozen and number_of_shards
differs from the frozen value; or number_of_shards <= 0.
"""
if self._frozen:
if self._number_of_shards != number_of_shards:
raise ValueError(
"Can't set sharding policy to use %d shards since it has been "
"frozen to use %d." % (number_of_shards, self._number_of_shards))
else:
if number_of_shards > 0:
self._number_of_shards = number_of_shards
else:
raise ValueError(
"Can't set sharding policy to use %s shards; value must be >0" %
str(number_of_shards))
@property
def shard_dimension(self):
"""Returns the shard dimension of the policy or None if unspecified."""
return self._shard_dimension
def set_shard_dimension(self, shard_dimension):
"""Sets the shard dimension for the current policy.
If the policy has been frozen then shard_dimension must match the
existing setting.
Args:
shard_dimension: The shard dimension to use in the policy.
Raises:
ValueError: If the policy has been frozen and shard_dimension
differs from the frozen value, or shard_dimension can't be
interpreted as a Dimension.
"""
if self._frozen:
if self._shard_dimension != shard_dimension:
raise ValueError(
"Can't set shard dimension to %d since it has been frozen to "
"use %d." % (shard_dimension, self._shard_dimension))
else:
self._shard_dimension = tensor_shape.as_dimension(shard_dimension)
def merge(self, other):
"""Merges the policy of another policy into the current policy.
Args:
other: The policy to merge into this one.
Raises:
ValueError: If this policy has been frozen and the merge conflicts with
the frozen policy.
"""
if other.number_of_shards is not None:
self.set_number_of_shards(other.number_of_shards)
if other.shard_dimension is not None:
self.set_shard_dimension(other.shard_dimension)
def get_sharded_shape(self, shape, shard_index=None):
"""Returns the shape of a shard of a full Tensor.
When given the shape of a 'full-size' Tensor, returns the shape of
the sub-Tensor after it has been sharded. Freezes the policy if it
has not yet been frozen.
Args:
shape: The shape of the full-size Tensor to be sharded.
shard_index: The index of the shard whose shape should be returned.
shard_index can be None for sharding policies that use the same
shape for every shard.
Returns:
The shape of the sharded version of the Tensor.
Raises:
ValueError: If shard_index is None when shards are of different
shapes; or shard_index is not None and
!(0<=shard_index<number_of_shards); or shape does not have at
least self.shard_dimension+1 dimensions; or the value of
shape's shard dimension is not a multiple of
self.number_of_shards
"""
if self._shard_dimension is None or self._number_of_shards is None:
# Don't raise an error if the config is unset.
return None
if shard_index is not None:
if shard_index < 0 or shard_index >= self.number_of_shards:
raise ValueError("shard_index %d, but must be in [0,%d)." %
(shard_index, self._number_of_shards))
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError("shape must be a specified shape not Unknown")
if ndims <= self._shard_dimension:
raise ValueError("shape %s does not contain shard_dimension %d" %
(shape.as_list(), self._shard_dimension))
dims = shape.as_list()
if dims[self._shard_dimension] is None:
raise ValueError("shape %s must have a fixed size for dimension %d "
"that is known at graph construction time." %
(shape.as_list(), self._shard_dimension))
if (dims[self._shard_dimension] % self._number_of_shards) != 0:
raise ValueError("shape %s cannot be sharded %d ways along dimension %d" %
(shape.as_list(), self._number_of_shards,
self._shard_dimension))
dims[self._shard_dimension] /= self._number_of_shards
return tensor_shape.as_shape(dims)
def _unshard_shape(self, shape):
"""Return the unsharded shape that would generate a given sharded shape.
Args:
shape: the sharded shape to unshard
Returns:
The unsharded shape.
Raises:
ValueError: if shape is unknown or does not contain
self.shard_dimension
TypeError: if shape is not convertible to a TensorShape
"""
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError("shape must be a specified shape not Unknown")
if ndims <= self._shard_dimension:
raise ValueError("shape %s does not contain shard_dimension %d" %
(shape.as_list(), self._shard_dimension))
dims = shape.as_list()
dims[self._shard_dimension] *= self._number_of_shards
return tensor_shape.as_shape(dims)
def get_unsharded_shape(self, shapes):
"""Returns the shape of an unsharded Tensor given a list of shards.
When given a list of shapes of shards, returns the shape of the
unsharded Tensor that would generate the shards. Sets defaults for the
policy if number_of_shards or shard_dimension is None.
Args:
shapes: The shapes of the Tensor shards to be combined.
Returns:
The shape of the unsharded version of the Tensor.
Raises:
ValueError: if shapes is not a list of length
self.number_of_shards; or any element of shapes is not a valid
shape consistent with the sharding policy; or the list of
shapes is not a valid sharding of a full shape.
TypeError: if an element of shapes is not convertible to a
TensorShape
"""
self._fill_default_values()
if len(shapes) != self.number_of_shards:
raise ValueError(
"shapes is %s but must be a list of length number_of_shards=%d" % (
str(shapes), self.number_of_shards))
unsharded_shapes = [self._unshard_shape(s) for s in shapes]
for i in xrange(self.number_of_shards - 1):
if not unsharded_shapes[i].is_compatible_with(
unsharded_shapes[self.number_of_shards - 1]):
raise ValueError(
"sharded shapes %s are not consistent shards of a full shape "
"sharded %d ways along dimension %d" % (
str(shapes), self.number_of_shards, self.shard_dimension))
return unsharded_shapes[0]
|
tensorflow-master
|
tensorflow/python/tpu/tpu_sharding.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf.tpu import dynamic_padding_pb2 as dynamic_padding
from tensorflow.python.compat import compat as api_compat
from tensorflow.python.compiler.xla import xla
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
ops.NotDifferentiable("TPUReplicatedInput")
# Operations that indicate some error in the users graph, e.g. a placeholder
# that's introduced outside of the infeed.
_BLACKLISTED_OPS = set([
"Placeholder",
])
# XLA doesn't currently support reading of intermediate tensors, thus some ops
# are not supported.
_UNSUPPORTED_OPS = set([
"AudioSummary",
"AudioSummaryV2",
"HistogramSummary",
"ImageSummary",
"MergeSummary",
"Print",
"ScalarSummary",
"TensorSummary",
"TensorSummaryV2",
])
# Ops which can be safely pruned from XLA compile if they have no consumers.
# These ops should also have no inputs.
_UNCONNECTED_OPS_TO_PRUNE = set(["Placeholder", "VarHandleOp"])
_MAX_WARNING_LINES = 5
_TPU_REPLICATE_ATTR = "_tpu_replicate"
_TPU_COMPILATION_STATUS_ATTR = "_tpu_compilation_status"
_OUTSIDE_COMPILATION_ATTR = "_xla_outside_compilation"
def _tpu_system_device_name(job):
"""Returns the device name for the TPU_SYSTEM device of `job`."""
if job is None:
return "/device:TPU_SYSTEM:0"
else:
return "/job:%s/device:TPU_SYSTEM:0" % job
@tf_export(v1=["tpu.initialize_system"])
def initialize_system(embedding_config=None, job=None):
"""Initializes a distributed TPU system for use with TensorFlow.
Args:
embedding_config: If not None, a `TPUEmbeddingConfiguration` proto
describing the desired configuration of the hardware embedding lookup
tables. If embedding_config is None, no hardware embeddings can be used.
job: The job (the XXX in TensorFlow device specification /job:XXX) that
contains the TPU devices that will be initialized. If job=None it is
assumed there is only one job in the TensorFlow flock, and an error will
be returned if this assumption does not hold.
Returns:
A serialized `TopologyProto` that describes the TPU system. Note:
the topology must be evaluated using `Session.run` before it can be used.
"""
config_string = ("" if embedding_config is None else
embedding_config.SerializeToString())
with ops.device(_tpu_system_device_name(job)):
return tpu_ops.configure_distributed_tpu(embedding_config=config_string)
@tf_export(v1=["tpu.shutdown_system"])
def shutdown_system(job=None):
"""Shuts down a running a distributed TPU system.
Args:
job: The job (the XXX in TensorFlow device specification /job:XXX) that
contains the TPU devices that will be shutdown. If job=None it is
assumed there is only one job in the TensorFlow flock, and an error will
be returned if this assumption does not hold.
"""
with ops.device(_tpu_system_device_name(job)):
shutdown_distributed_tpu = tpu_ops.shutdown_distributed_tpu()
return shutdown_distributed_tpu
@tf_export(v1=["tpu.core"])
def core(num):
"""Returns the device name for a core in a replicated TPU computation.
Args:
num: the virtual core number within each replica to which operators should
be assigned.
Returns:
A device name, suitable for passing to `tf.device()`.
"""
return "device:TPU_REPLICATED_CORE:{}".format(num)
class TPUReplicateContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside a TPU computation.
The primary role of `TPUReplicateContext` is to mark operators inside a
tpu.replicate() computation with the attribute "_tpu_replicate=XYZ", where XYZ
is a unique name.
We use a `ControlFlowContext` to perform the annotation since it integrates
with Tensorflow constructs like ResourceVariables. For example, if a
`ResourceVariable` is constructed inside a tpu.replicate() block, the
`ResourceVariable` implementation can use
`with ops.control_dependencies(None)` to build the variable's definition
outside the replicated computation.
"""
def __init__(self, name, num_replicas, pivot):
"""Builds a new TPUReplicateContext.
Args:
name: a unique name for the context, used to populate the `_tpu_replicate`
attribute.
num_replicas: an integer that gives the number of replicas for the
computation.
pivot: a pivot node. Nodes in the TPUReplicateContext that do not have any
inputs will have a control dependency on the pivot node. This ensures
that nodes are correctly included in any enclosing control flow
contexts.
"""
super(TPUReplicateContext, self).__init__()
self._num_replicas = num_replicas
self._outer_device_function_stack = None
self._oc_dev_fn_stack = None
self._outside_compilation_cluster = None
self._outside_compilation_counter = 0
self._in_gradient_colocation = None
self._gradient_colocation_stack = []
self._host_compute_core = []
self._name = name
self._name_as_bytes = compat.as_bytes(name)
self._unsupported_ops = []
self._pivot = pivot
self._replicated_vars = {}
def get_replicated_var_handle(self, name, vars_):
"""Returns a variable handle for replicated TPU variable 'var'.
This is a method used by an experimental replicated variable implementation
and is not intended as a public API.
Args:
name: The common name of the variable.
vars_: The replicated TPU variables.
Returns:
The handle of the TPU replicated input node.
"""
handle = self._replicated_vars.get(name)
if handle is not None:
return handle
# Builds a TPUReplicatedInput node for the variable, if one does not already
# exist. The TPUReplicatedInput node must belong to the enclosing
# control-flow scope of the TPUReplicateContext.
# TODO(phawkins): consider changing the contract of the TPU encapsulation
# so the TPUReplicatedInput nodes go inside the TPUReplicateContext scope
# instead.
# pylint: disable=protected-access
graph = ops.get_default_graph()
saved_context = graph._get_control_flow_context()
graph._set_control_flow_context(self.outer_context)
handle = tpu_ops.tpu_replicated_input(
[v.handle for v in vars_], name=name + "/handle")
graph._set_control_flow_context(saved_context)
# pylint: enable=protected-access
self._replicated_vars[name] = handle
return handle
def report_unsupported_operations(self):
if self._unsupported_ops:
op_str = "\n".join([" %s (%s)" % (op.type, op.name)
for op in self._unsupported_ops[:_MAX_WARNING_LINES]])
logging.warning("%d unsupported operations found: \n%s",
len(self._unsupported_ops), op_str)
if len(self._unsupported_ops) > _MAX_WARNING_LINES:
logging.warning("... and %d more" %
(len(self._unsupported_ops) - _MAX_WARNING_LINES))
def EnterGradientColocation(self, op, gradient_uid):
if op is not None:
self._gradient_colocation_stack.append(op)
if not self._outside_compilation_cluster:
try:
outside_attr = op.get_attr(_OUTSIDE_COMPILATION_ATTR)
if self._in_gradient_colocation:
raise NotImplementedError(
"Cannot nest gradient colocation operations outside compilation"
)
if gradient_uid == "__unsupported__":
raise NotImplementedError(
"No gradient_uid calling gradient within outside_compilation")
# When we take the gradient of an op X in an outside_compilation
# cluster C in a forward computation we would like to put the ops
# corresponding to the gradient of X into a new outside_compilation
# cluster C'. However, if we take the gradient of X twice, the second
# one should get yet another new outside_compilation cluster C''.
#
# The mechanism we adopt is to use a 'root_cluster' which is the
# cluster that X was in before we took gradients, and a 'gradient_uid'
# which is different for every invocation of gradients, and put the
# gradient of X in cluster 'root_cluster.gradient_uid'.
#
# When taking a gradient of a gradient, some ops will be colocated
# with Op in the forward pass (e.g., cluster root_cluster) and some in
# the backward pass (e.g., cluster root_cluster.initial_gradient_uid).
# We need all of the grad-of-grad ops to be in the same cluster to
# avoid cyclic dependencies between clusters. We adopt a heuristic
# that puts any op clustered with root_cluster.<xxx> in
# root_cluster.gradient_uid, even if xxx was initial_gradient_uid.
self._in_gradient_colocation = op
parts = outside_attr.split(".")
cluster = parts[0] + "." + gradient_uid
self._EnterOutsideCompilationScope(cluster=cluster)
except ValueError:
# The attr was not present: do nothing.
pass
def ExitGradientColocation(self, op, gradient_uid):
if op is not None:
if not self._gradient_colocation_stack:
raise errors.InternalError(
op.node_def, op,
"Badly nested gradient colocation: empty stack when popping Op " +
op.name)
last_op = self._gradient_colocation_stack.pop()
if op is last_op:
if op is self._in_gradient_colocation:
self._in_gradient_colocation = None
self._ExitOutsideCompilationScope()
else:
raise errors.InternalError(
op.node_def, op, "Badly nested gradient colocation, expected " +
last_op + ", got " + op.name)
def _EnterOutsideCompilationScope(self, cluster=None):
class FakeOp(object):
"""A helper class to determine the current device.
Supports only the type and device set/get methods needed to run the
graph's _apply_device_function method.
"""
def __init__(self):
self._device = ""
@property
def type(self):
return "FakeOp"
@property
def device(self):
return self._device
def _set_device(self, device):
if isinstance(device, pydev.DeviceSpec):
self._device = device.to_string()
else:
self._device = device
def _set_device_from_string(self, device_str):
self._device = device_str
if self._outside_compilation_cluster:
raise NotImplementedError("Cannot nest outside_compilation clusters")
if cluster:
self._outside_compilation_cluster = cluster
else:
self._outside_compilation_cluster = str(self._outside_compilation_counter)
self._outside_compilation_counter += 1
graph = ops.get_default_graph()
fake_op = FakeOp()
graph._apply_device_functions(fake_op) # pylint: disable=protected-access
device = pydev.DeviceSpec.from_string(fake_op.device)
if (device.device_type == "TPU_REPLICATED_CORE" and
device.device_index is not None):
self._host_compute_core.append(self._outside_compilation_cluster + ":" +
str(device.device_index))
self._oc_dev_fn_stack = graph._device_function_stack # pylint: disable=protected-access
graph._device_function_stack = self._outer_device_function_stack # pylint: disable=protected-access
def _ExitOutsideCompilationScope(self):
if not self._outside_compilation_cluster:
raise NotImplementedError(
"Attempted to exit outside_compilation scope when not in scope")
self._outside_compilation_cluster = None
graph = ops.get_default_graph()
graph._device_function_stack = self._oc_dev_fn_stack # pylint: disable=protected-access
def Enter(self):
if not self._outer_device_function_stack:
# Capture the device function stack at the time of first entry
# since that is the stack that will be used outside_compilation.
graph = ops.get_default_graph()
# pylint: disable=protected-access
self._outer_device_function_stack = graph._device_function_stack.copy()
# pylint: enable=protected-access
super(TPUReplicateContext, self).Enter()
def HostComputeCore(self):
return self._host_compute_core
def _RemoveExternalControlEdges(self, op):
"""Remove any external control dependency on this op."""
internal_control_inputs = []
external_control_inputs = []
for x in op.control_inputs:
# pylint: disable=protected-access
is_internal_op = False
ctxt = x._get_control_flow_context()
while ctxt is not None:
if ctxt == self:
is_internal_op = True
break
ctxt = ctxt._outer_context
if is_internal_op:
internal_control_inputs.append(x)
else:
external_control_inputs.append(x)
# pylint: enable=protected-access
# pylint: disable=protected-access
op._remove_all_control_inputs()
op._add_control_inputs(internal_control_inputs)
# pylint: enable=protected-access
return internal_control_inputs, external_control_inputs
def AddOp(self, op):
# pylint: disable=protected-access
if op.type in _BLACKLISTED_OPS:
logging.error("Operation of type %s (%s) is not supported on the TPU. "
"Execution will fail if this op is used in the graph. " %
(op.type, op.name))
if op.type in _UNSUPPORTED_OPS:
self._unsupported_ops.append(op)
if any(x.dtype._is_ref_dtype for x in op.inputs):
raise NotImplementedError(
"Non-resource Variables are not supported inside TPU computations "
"(operator name: %s)" % op.name)
if _TPU_REPLICATE_ATTR in op.node_def.attr:
raise ValueError("TPU computations cannot be nested")
op._set_attr(_TPU_REPLICATE_ATTR,
attr_value_pb2.AttrValue(s=self._name_as_bytes))
if self._outside_compilation_cluster:
op._set_attr(
_OUTSIDE_COMPILATION_ATTR,
attr_value_pb2.AttrValue(
s=compat.as_bytes(self._outside_compilation_cluster)))
if self._num_replicas > 1 or not self._outside_compilation_cluster:
# Prevent feeding or fetching anything that is being compiled,
# and any replicated outside_compilation Op.
op.graph.prevent_feeding(op)
op.graph.prevent_fetching(op)
# Remove any control edges from outer control flow contexts. These may cause
# mismatched frame errors.
(internal_control_inputs,
external_control_inputs) = self._RemoveExternalControlEdges(op)
if not op.inputs:
# Add a control edge from the control pivot to this op.
if not internal_control_inputs:
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot())
# pylint: enable=protected-access
else:
for index in xrange(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
op._update_input(index, real_x) # pylint: disable=protected-access
if external_control_inputs:
# Use an identity to pull control inputs as data inputs. Note that we
# ignore ops which don't have outputs. TODO(phawkins): fix that.
with ops.control_dependencies(None):
self.Enter()
external_control_inputs = [
array_ops.identity(x.outputs[0]).op
for x in external_control_inputs
if x.outputs
]
self.Exit()
# pylint: disable=protected-access
op._add_control_inputs(external_control_inputs)
# pylint: enable=protected-access
# Mark op's outputs as seen by this context and any outer contexts.
output_names = [x.name for x in op.outputs]
context = self
while context is not None:
# pylint: disable=protected-access
context._values.update(output_names)
context = context._outer_context
# pylint: enable=protected-access
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
if val.name in self._values:
# Use the real value if it comes from outer context.
result = self._external_values.get(val.name)
return val if result is None else result
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddInnerOp(self, op):
self.AddOp(op)
if self._outer_context:
self._outer_context.AddInnerOp(op)
@property
def grad_state(self):
# Define the gradient loop state associated with the TPUReplicateContext to
# be None as the TPUReplicateContext does not get nested nor does the
# grad_state outside the TPUReplicateContext affect the graph inside so the
# grad_state should be as if this is the top-level gradient state.
return None
@property
def back_prop(self):
"""Forwards to the enclosing while context, if any."""
if self.GetWhileContext():
return self.GetWhileContext().back_prop
return False
def GetControlPivot(self):
return self._pivot
@tf_export(v1=["tpu.outside_compilation"])
def outside_compilation(computation, *args, **kwargs):
"""Builds part of a computation outside any current TPU replicate scope.
Args:
computation: A Python function that builds the computation to
place on the host.
*args: the positional arguments for the computation.
**kwargs: the keyword arguments for the computation.
Returns:
The Tensors returned by computation.
"""
args = [] if args is None else args
graph = ops.get_default_graph()
# If we are in a TPUReplicateContext, signal that we are now
# outside_compilation
initial_context = graph._get_control_flow_context() # pylint: disable=protected-access
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._EnterOutsideCompilationScope() # pylint: disable=protected-access
context = context.outer_context
retval = computation(*args, **kwargs)
# If we are in a TPUReplicateContext, signal that we are no longer
# outside_compilation
final_context = graph._get_control_flow_context() # pylint: disable=protected-access
if initial_context is not final_context:
raise NotImplementedError(
"Control-flow context cannot be different at start and end of an "
"outside_compilation scope")
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._ExitOutsideCompilationScope() # pylint: disable=protected-access
context = context.outer_context
return retval
@tf_export(v1=["tpu.replicate"])
def replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None,
maximum_shapes=None):
"""Builds a graph operator that runs a replicated TPU computation.
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs. Each input can be a nested structure
containing values that are convertible to tensors. Note that passing an
N-dimension list of compatible values will result in a N-dimention list of
scalar tensors rather than a single Rank-N tensors. If you need different
behavior, convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
maximum_shapes: A nested structure of tf.TensorShape representing the shape
to which the respective component of each input element in each replica
should be padded. Any unknown dimensions (e.g.
tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like
object) will be padded to the maximum size of that dimension over all
replicas. Note that if the input dimension is already static, we won't do
padding on it and we require the maximum_shapes to have the same value or
None on that dimension. The structure of `maximum_shapes` needs to be the
same as `inputs[0]`.
Returns:
A list of outputs, indexed by `[replica_num]` each output can be a nested
structure same as what computation() returns with a few exceptions.
Exceptions include:
1) None output: a NoOp would be returned which control-depends on
computation.
2) Single value output: A tuple containing the value would be returned.
3) Operation-only outputs: a NoOp would be returned which
control-depends on computation.
TODO(b/121383831): Investigate into removing these special cases.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
ValueError: If the static `inputs` dimensions don't match with the values
given in `maximum_shapes`.
ValueError: If the structure of inputs per replica does not match
the structure of `maximum_shapes`.
"""
return split_compile_and_replicate(
computation,
inputs,
infeed_queue,
device_assignment,
name,
maximum_shapes=maximum_shapes)[1]
def _pad_all_input(inputs, padded_shapes):
"""Pad all input tensors given padded_shapes.
The real shape tensors will be concatenated with the padded original inputs.
Args:
inputs: The original inputs.
padded_shapes: A list of padded shapes for each input.
Returns:
The padded inputs and a PaddingMap list which maps the padded input
dimension to the real shape argument index.
"""
input_shape_tensors = []
for core_idx, inputs_per_core in enumerate(inputs):
for idx, input_tensor in enumerate(inputs_per_core):
if core_idx == 0:
input_shape_tensors.append([])
input_shape_tensors[idx].append(array_ops.shape(input_tensor))
maximum_shapes = []
for shapes_per_input in input_shape_tensors:
maximum_shapes.append(
math_ops.reduce_max(array_ops.stack(shapes_per_input), axis=0))
padded_inputs = []
real_shapes = []
padding_maps = []
for core_idx, inputs_per_core in enumerate(inputs):
padded_inputs.append([])
real_shapes.append([])
real_shape_idx = len(inputs_per_core) - 1
for idx, input_tensor in enumerate(inputs_per_core):
input_shape_tensor = input_shape_tensors[idx][core_idx]
input_shape = input_tensor.get_shape()
padded_shape = padded_shapes[idx]
# The static shape of inputs should be compatible with the given padded
# shapes.
input_shape.assert_is_compatible_with(padded_shape)
if input_shape.is_fully_defined():
# Do nothing if the shape of the whole tensor is already static.
padded_inputs[core_idx].append(input_tensor)
else:
# Only pad the non static shape dimension.
for i, s in enumerate(input_shape.dims):
if s.value is None:
if core_idx == 0:
real_shape_idx += 1
padding_map = dynamic_padding.PaddingMap()
padding_map.arg_index = idx
padding_map.shape_index = i
padding_map.padding_arg_index = real_shape_idx
padding_maps.append(padding_map)
real_shapes[core_idx].append(
math_ops.cast(input_shape_tensor[i], dtypes.uint32))
paddings = []
for i, s in enumerate(padded_shape.dims):
if input_shape.dims[i].value:
# Don't pad if input shape is already static.
padding = [0, 0]
else:
if s.value:
# Pad to the given maximum value.
padding = [0, s.value - input_shape_tensor[i]]
else:
# If maximum value is not given, then pad to the maximum dimension
# among all the cores.
padding = [0, maximum_shapes[idx][i] - input_shape_tensor[i]]
paddings.append(padding)
padded_input = array_ops.pad(input_tensor, paddings)
padded_inputs[core_idx].append(padded_input)
num_replicas = len(padded_inputs)
for i in range(num_replicas):
padded_inputs[i].extend(real_shapes[i])
return padded_inputs, padding_maps
def split_compile_and_replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None,
use_tpu=True,
maximum_shapes=None):
"""Builds graph operators that runs compilation and replicated computation.
This is a lower level interface than replicate that returns a separate compile
and execute output tensor. In the generated graph the compile op feeds into
the execute op and no additional compilation is incurred when running the
compile op before the execute op. The compile op returns additional
information about the compilation but does not return the compiled program.
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs. Each input can be a nested structure
containing values that are convertible to tensors. Note that passing an
N-dimension list of compatible values will result in a N-dimention list of
scalar tensors rather than a single Rank-N tensors. If you need different
behavior, convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
use_tpu: When false, the input `computation` is executed on the XLA CPU/GPU
backends. Currently, only supports a default placement (computation is
placed on GPU if one is available, and on CPU if not).
maximum_shapes: A nested structure of tf.TensorShape representing the shape
to which the respective component of each input element in each replica
should be padded. Any unknown dimensions (e.g.
tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like
object) will be padded to the maximum size of that dimension over all
replicas. Note that if the input dimension is already static, we won't do
padding on it and we require the maximum_shapes to have the same value or
None on that dimension. The structure of `maximum_shapes` needs to be the
same as `inputs[0]`.
Returns:
A list of lists with the first list corresponding to the compile op and the
second a list of output tensors, indexed by `[replica_num][output_num]`.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
ValueError: If the static `inputs` dimensions don't match with the values
given in `maximum_shapes`.
ValueError: If the structure of inputs per replica does not match
the structure of `maximum_shapes`.
"""
del name
inputs = [[]] if inputs is None else inputs
metadata_kwargs = {}
if device_assignment is not None:
# Turn the Numpy array into a flattened list so we can pass it as an
# operator attribute.
metadata_kwargs = {
"topology":
device_assignment.topology.serialized(),
"device_assignment":
device_assignment.core_assignment.flatten().tolist()
}
# TODO(phawkins): remove this case after the forward compatibility window
# expires on 2018-10-5.
if api_compat.forward_compatible(2018, 10, 5):
metadata_kwargs["num_cores_per_replica"] = (
device_assignment.num_cores_per_replica)
else:
metadata_kwargs["computation_shape"] = [
device_assignment.num_cores_per_replica
]
if ((not isinstance(inputs, list)) or
any(not isinstance(inp, (list, tuple)) for inp in inputs)):
raise TypeError("tpu.replicate() inputs must be a list of lists/tuples")
num_replicas = len(inputs)
# No replicas? Nothing to do.
if num_replicas == 0:
return []
# Checks all replicas have the same structure.
for i in xrange(1, num_replicas):
nest.assert_same_structure(inputs[0], inputs[i])
# Flatten inputs.
flat_inputs = [
nest.flatten(per_replica_input) for per_replica_input in inputs
]
# Converts inputs to Tensors.
flat_inputs = [[ops.convert_to_tensor(x) for x in inp] for inp in flat_inputs]
# Verifies that all replicas have matching numbers and types of inputs
flat_input_types = [x.dtype for x in flat_inputs[0]]
input_arity = len(inputs[0])
flat_input_arity = len(flat_input_types)
for i in range(num_replicas):
if len(inputs[i]) != input_arity:
raise ValueError("Replicas must have the same number of inputs. "
"Replica 0 had {} inputs, replica {} had {} "
"inputs.".format(input_arity, i, len(inputs[i])))
types = [x.dtype for x in flat_inputs[i]]
if types != flat_input_types:
raise ValueError("Replicas must have matching input types. Replica 0 had "
"input types {}, replica {} had input types {}".format(
flat_input_types, i, types))
arg_error = xla.check_function_argument_count(
computation, input_arity, infeed_queue)
if arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s, but the computation needs %s" % (
input_arity, str([i.name for i in inputs[0]]), arg_error))
else:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s and %d additional inputs from infeed,"
" but the computation needs %s" % (input_arity, str(
[i.name
for i in inputs[0]]), infeed_queue.number_of_tuple_elements,
arg_error))
if maximum_shapes:
if infeed_queue:
raise ValueError(
"Dynamic input shapes are not supported with infeed queues")
# Make sure maximum_shapes has the same structure as inputs.
nest.assert_same_structure(inputs[0], maximum_shapes, check_types=False)
# Flatten padded shapes.
flat_maximum_shapes = nest.flatten(maximum_shapes)
flat_maximum_shapes = [
tensor_shape.TensorShape(s) for s in flat_maximum_shapes
]
flat_inputs, padding_maps = _pad_all_input(flat_inputs, flat_maximum_shapes)
serialized_padding_maps = []
for padding_map in padding_maps:
serialized_padding_maps.append(padding_map.SerializeToString())
metadata_kwargs["padding_map"] = serialized_padding_maps
metadata_kwargs["step_marker_location"] = getattr(
computation, "step_marker_location", "STEP_MARK_AT_ENTRY")
graph = ops.get_default_graph()
# Fan-in: Builds a TPUReplicatedInput node for each input.
flat_replicated_inputs = []
for i in range(0, len(flat_inputs[0])):
replicas = [flat_inputs[replica][i] for replica in xrange(num_replicas)]
flat_replicated_inputs.append(
tpu_ops.tpu_replicated_input(replicas, name="input{}".format(i)))
if isinstance(graph, func_graph.FuncGraph):
# When we are in Tensorflow 2.0 function, 'graph' will be a FuncGraph
# object. If both outside graph and this function have a TPU cluster,
# they will have the same cluster name and it will cause problems (because
# we lower functional ops in Tensorflow 2.0). Append function name to
# 'cluster_name' to avoid cluster name collision.
cluster_name = graph.unique_name("cluster_" + graph.name)
else:
cluster_name = graph.unique_name("cluster")
pivot = control_flow_ops.no_op(name=cluster_name + "/pivot")
context = TPUReplicateContext(
name=cluster_name, num_replicas=num_replicas, pivot=pivot)
try:
context.Enter()
metadata = tpu_ops.tpu_replicate_metadata(
num_replicas=num_replicas, use_tpu=use_tpu, **metadata_kwargs)
with tpu_function.tpu_shard_context(
num_replicas), ops.control_dependencies([metadata]):
# Add identity ops so even unused inputs are "consumed" by the
# computation. This is to avoid orphaned TPUReplicatedInput nodes.
# TODO(phawkins): consider instead pruning unused TPUReplicatedInput
# and eliding trivial TPUReplicatedInput/TPUReplicatedOutput pairs.
flat_replicated_inputs = [
array_ops.identity(x, name="replicated_input_{}".format(i))
for i, x in enumerate(flat_replicated_inputs)
]
for i in flat_replicated_inputs:
# pylint: disable=protected-access
# Add an attribute to the identity node so that they could be removed in
# encapsulate TPU computation pass if unused. However we don't remove
# inputs when dynamic padding is enabled.
# TODO(rxsang): Use other ways except argument index in padding_map so
# outside compilation can work with dynamic padding correctly.
if maximum_shapes is None:
i.op._set_attr("_tpu_input_identity",
attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
# Unflatten the computation inputs to match original input structure.
computation_inputs = nest.pack_sequence_as(
structure=inputs[0],
flat_sequence=flat_replicated_inputs[:flat_input_arity])
# If there is an infeed queue, adds the dequeued values to the
# computation's inputs.
if infeed_queue is not None:
infeed_queue.set_number_of_shards(num_replicas)
for t in infeed_queue.generate_dequeue_op():
computation_inputs.append(t)
# Only resource variables work inside a TPU computation, so turn on
# resource variables for the computation.
# TODO(phawkins): consider removing this code. It will
# be less confusing to clients if they knowingly choose to use resource
# variables.
# Partitioned variables is not supported (b/112311320).
vscope = variable_scope.get_variable_scope()
saved_use_resource = vscope.use_resource
saved_custom_getter = vscope.custom_getter
def custom_getter(getter, name, *args, **kwargs):
"""Variables on TPU have a few restrictions."""
partitioner = kwargs["partitioner"]
if partitioner is not None:
kwargs["partitioner"] = None
logging.warning(
"Partitioned variables are not supported on TPU. Got "
"`partitioner` that is {} for variable {}. "
"Setting `partitioner` to `None`."
.format(partitioner, name))
if saved_custom_getter is None:
return getter(name, *args, **kwargs)
else:
return saved_custom_getter(getter, name, *args, **kwargs)
vscope.set_use_resource(True)
vscope.set_custom_getter(custom_getter)
outputs = computation(*computation_inputs)
vscope.set_use_resource(saved_use_resource)
vscope.set_custom_getter(saved_custom_getter)
outputs_is_flat = xla.is_flat(outputs)
if outputs_is_flat:
output_tensors, control_deps = _postprocess_flat_outputs(outputs)
else:
output_tensors, control_deps = _postprocess_non_flat_outputs(outputs)
# tensor_tracer imports tpu.py. Local import to tensor_tracer to avoid
# import-cycle
# pylint: disable=g-import-not-at-top
from tensorflow.python.tpu import tensor_tracer
# pylint: enable=g-import-not-at-top
if tensor_tracer.TensorTracer.is_enabled():
tt = tensor_tracer.TensorTracer()
output_tensors = tt.trace_tpu(ops.get_default_graph(),
output_tensors, control_deps,
num_replicas)
context.ExitResult(output_tensors)
finally:
context.report_unsupported_operations()
context.Exit()
host_compute_core = context.HostComputeCore()
if host_compute_core:
attr_value = attr_value_pb2.AttrValue()
attr_value.list.s.extend([compat.as_bytes(x) for x in host_compute_core])
metadata._set_attr("host_compute_core", attr_value) # pylint: disable=protected-access
with ops.control_dependencies([metadata]):
if use_tpu:
compile_status = tpu_ops.tpu_compilation_result()
op = compile_status.op
attr_value = attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name))
op._set_attr(_TPU_COMPILATION_STATUS_ATTR, attr_value) # pylint: disable=protected-access
else:
compile_status = control_flow_ops.no_op(name="compilation_status")
if not output_tensors:
# Returns a list of NoOps dependent on the replication Op, indexed by
# [replica_num].
return [
compile_status,
[
control_flow_ops.group(control_deps, name="shard_%d" % i)
for i in range(num_replicas)
]
]
# Fan-out: Builds a TPUReplicatedOutput node for each output.
replicated_outputs = [[] for i in xrange(num_replicas)]
for i, t in enumerate(output_tensors):
# Fan-out: Builds a TPUReplicatedOutput node for each output.
ys = tpu_ops.tpu_replicated_output(
t, num_replicas, name="output{}".format(i))
# Wraps the outputs in identity operators so the names of any possible
# `fetch` nodes are preserved by the replication rewrite.
with ops.control_dependencies(control_deps):
for replica in xrange(num_replicas):
replicated_outputs[replica].append(
array_ops.identity(
ys[replica], name="output_%d_shard_%d" % (i, replica)))
if not outputs_is_flat:
replicated_outputs = [
nest.pack_sequence_as(outputs, replica_outs)
for replica_outs in replicated_outputs
]
return [compile_status, replicated_outputs]
def _postprocess_flat_outputs(outputs):
"""Validates non-flat outputs, add backs device assignments and other attrs.
Args:
outputs: Output from `computation` inside `tpu.rewrite`.
Returns:
Tensors and Operations extracted from outputs.
"""
# Following code segment is to preserve legacy behavior. Previously we only
# supported flat outputs and thus for consistency it was nice to convert even
# single element into a tuple. But now that we support arbitrary output
# structure, this is no longer necessary.
# TODO(b/121383831): Migrate all legacy use cases and delete this special
# case.
# If the computation returns `None`, make it an empty tuple.
if outputs is None:
outputs = tuple()
# If the computation only returned one value, makes it a tuple.
if not isinstance(outputs, collections.Sequence):
outputs = (outputs,)
# Append `no_op` here so that fetching any return value of this function
# will trigger TPUExecute node.
outputs += (control_flow_ops.no_op(),)
try:
with ops.device(core(0)):
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
"convertible to Tensors. Got '%s'" % str(e))
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU functions must return zero-or more Tensor values followed by "
"zero or more Operations.")
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
new_output_tensors = []
for t in output_tensors:
with ops.device(t.device if t.device else core(0)):
o = array_ops.identity(t)
# pylint: disable=protected-access
o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
new_output_tensors.append(o)
return new_output_tensors, output_operations
def _postprocess_non_flat_outputs(outputs):
"""Validates non-flat outputs, add backs device assignments and other attrs.
Args:
outputs: Output from `computation` inside `tpu.rewrite`.
Returns:
Tensors extracted from outputs and an empty list because Operations are not
allowed in non-flat outputs..
"""
# Flatten output items.
flat_outputs = nest.flatten(outputs)
# Convert all non-Operation outputs to Tensors.
for i, o in enumerate(flat_outputs):
if isinstance(o, ops.Operation):
raise ValueError(
"tpu.rewrite does not support Operation as return value in non-flat "
"output structure. You can set returned Operations as control "
"dependencies of returned Tensors so Operations are triggered when "
'Tensors are evaluated. Operation found: "%s"' % o.name)
try:
o = ops.convert_to_tensor(o)
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
'convertible to Tensors. Got error: "%s"' % str(e))
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
with ops.device(core(0)):
o = array_ops.identity(o)
# pylint: disable=protected-access
o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
flat_outputs[i] = array_ops.identity(o)
# All flat_outputs are Tensors, and no Operations.
return flat_outputs, []
def split_compile_and_shard(computation,
inputs=None,
num_shards=1,
input_shard_axes=None,
outputs_from_all_shards=True,
output_shard_axes=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Shards `computation` for parallel execution.
`inputs` must be a list of Tensors or None (equivalent to an empty list), each
of which has a corresponding split axis (from `input_shard_axes`). Each input
is split into `num_shards` pieces along the corresponding axis, and
computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
If `outputs_from_all_shards` is true, the outputs from all shards of
`computation` are concatenated back together along their `output_shards_axes`.
Otherwise, each output is taken from an arbitrary shard.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty list). Each
input tensor has a corresponding shard axes, given by `input_shard_axes`,
which must have size divisible by `num_shards`.
num_shards: The number of shards.
input_shard_axes: A list of dimensions along which to shard `inputs`, or
`None`. `None` means "shard all inputs along dimension 0". If not `None`,
there must be one dimension per input.
outputs_from_all_shards: Boolean or list of boolean. For each output, if
`True`, outputs from all shards are concatenated along the corresponding
`output_shard_axes` entry. Otherwise, each output is taken
from an arbitrary shard. If the argument is a boolean, the argument's
value is used for each output.
output_shard_axes: A list of dimensions along which to concatenate the
outputs of `computation`, or `None`. `None` means "concatenate all outputs
along dimension 0". If not `None`, there must be one dimension per output.
Ignored if `outputs_from_all_shards` is False.
infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs
of `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A tuple of (compile op, [output tensors]).
Raises:
ValueError: If num_shards <= 0
ValueError: If len(input_shard_axes) != len(inputs)
ValueError: If len(output_shard_axes) != len(outputs from `computation`)
"""
# TODO(phawkins): consider adding support for broadcasting Tensors passed as
# inputs.
if num_shards <= 0:
raise ValueError("num_shards must be a positive integer.")
inputs = [] if inputs is None else inputs
if not isinstance(inputs, list):
raise TypeError("tpu.shard()'s inputs must be a list of Tensors or None.")
# Converts inputs to Tensors.
inputs = [ops.convert_to_tensor(x) for x in inputs]
if input_shard_axes is None:
input_shard_axes = [0] * len(inputs)
if len(inputs) != len(input_shard_axes):
raise ValueError("Length of input_shard_axes must be equal to the number "
"of inputs.")
if inputs:
# Splits the `inputs` along the corresponding `input_shard_axes`, giving
# lists with layout [input][shard]
split_inputs = [
array_ops.split(x, num_shards, axis=axis)
for (axis, x) in zip(input_shard_axes, inputs)]
# Transposes the input lists to have layout [shard][input]
transposed_inputs = [list(i) for i in zip(*split_inputs)]
else:
transposed_inputs = [[]] * num_shards
compile_op, outputs = split_compile_and_replicate(
computation,
transposed_inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# There must be at least one shard since num_shards > 0.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
if isinstance(outputs[0], ops.Operation):
# pylint: enable=indexing-exception
# There were no outputs from the computation and replicate returned a list
# of NoOps with control dependencies on the computation. Return the first
# one so it can be used as a control dependency or fetch node.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return compile_op, [outputs[0]]
# pylint: enable=indexing-exception
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
num_outputs = len(outputs[0])
# pylint: enable=indexing-exception
if output_shard_axes is None:
output_shard_axes = [0] * num_outputs
if num_outputs != len(output_shard_axes):
raise ValueError("Length of output_shard_axes must be equal to the number "
"of outputs.")
if isinstance(outputs_from_all_shards, bool):
outputs_from_all_shards = [outputs_from_all_shards] * num_outputs
if num_outputs != len(outputs_from_all_shards):
raise ValueError("Length of outputs_from_all_shards must be equal to the "
"number of outputs.")
results = []
for (axis, all_shards, x) in zip(output_shard_axes, outputs_from_all_shards,
zip(*outputs)):
if all_shards:
# Concatenate all of the outputs together (use stack for scalars).
shape = x[0].shape
is_scalar = shape is not None and (shape.ndims == 0)
results.append((array_ops.stack(list(x)) if is_scalar
else array_ops.concat(list(x), axis=axis)))
else:
# TODO(phawkins): use a smarter policy, e.g., round-robin across shards.
results.append(x[0])
return compile_op, results
@tf_export(v1=["tpu.shard"])
def shard(computation,
inputs=None,
num_shards=1,
input_shard_axes=None,
outputs_from_all_shards=True,
output_shard_axes=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Shards `computation` for parallel execution.
`inputs` must be a list of Tensors or None (equivalent to an empty list), each
of which has a corresponding split axis (from `input_shard_axes`). Each input
is split into `num_shards` pieces along the corresponding axis, and
computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
TODO(phawkins): consider adding support for broadcasting Tensors passed
as inputs.
If `outputs_from_all_shards` is true, the outputs from all shards of
`computation` are concatenated back together along their `output_shards_axes`.
Otherwise, each output is taken from an arbitrary shard.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty list). Each
input tensor has a corresponding shard axes, given by `input_shard_axes`,
which must have size divisible by `num_shards`.
num_shards: The number of shards.
input_shard_axes: A list of dimensions along which to shard `inputs`, or
`None`. `None` means "shard all inputs along dimension 0". If not `None`,
there must be one dimension per input.
outputs_from_all_shards: Boolean or list of boolean. For each output, if
`True`, outputs from all shards are concatenated along the corresponding
`output_shard_axes` entry. Otherwise, each output is taken
from an arbitrary shard. If the argument is a boolean, the argument's
value is used for each output.
output_shard_axes: A list of dimensions along which to concatenate the
outputs of `computation`, or `None`. `None` means "concatenate all outputs
along dimension 0". If not `None`, there must be one dimension per output.
Ignored if `outputs_from_all_shards` is False.
infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs
of `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A list of output tensors.
Raises:
ValueError: If num_shards <= 0
ValueError: If len(input_shard_axes) != len(inputs)
ValueError: If len(output_shard_axes) != len(outputs from `computation`)
"""
return split_compile_and_shard(
computation,
inputs=inputs,
num_shards=num_shards,
input_shard_axes=input_shard_axes,
outputs_from_all_shards=outputs_from_all_shards,
output_shard_axes=output_shard_axes,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)[1]
@tf_export(v1=["tpu.batch_parallel"])
def batch_parallel(computation,
inputs=None,
num_shards=1,
infeed_queue=None,
device_assignment=None,
name=None):
"""Shards `computation` along the batch dimension for parallel execution.
Convenience wrapper around shard().
`inputs` must be a list of Tensors or None (equivalent to an empty list).
Each input is split into `num_shards` pieces along the 0-th dimension, and
computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
The outputs from all shards are concatenated back together along their 0-th
dimension.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty list). The
0-th dimension of each Tensor must have size divisible by `num_shards`.
num_shards: The number of shards.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A list of output tensors.
Raises:
ValueError: If `num_shards <= 0`
"""
return shard(
computation,
inputs,
num_shards=num_shards,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
@tf_export(v1=["tpu.rewrite"])
def rewrite(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Rewrites `computation` for execution on a TPU system.
Args:
computation: A Python function that builds a computation to apply to the
input. If the function takes n inputs, 'inputs' should be a list of n
tensors.
`computation` may return a list of operations and tensors. Tensors must
come before operations in the returned list. The return value of
`rewrite` is a list of tensors corresponding to the tensors from the
output of `computation`.
All `Operation`s constructed during `computation` will be executed when
evaluating any of the returned output tensors, not just the ones returned.
inputs: A list of input tensors or `None` (equivalent to an empty list).
Each input can be a nested structure containing values that are
convertible to tensors. Note that passing an N-dimension list of
compatible values will result in a N-dimention list of scalar tensors
rather than a single Rank-N tensors. If you need different behavior,
convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: (Deprecated) Does nothing.
Returns:
Same data structure as if computation(*inputs) is called directly with some
exceptions for correctness. Exceptions include:
1) None output: a NoOp would be returned which control-depends on
computation.
2) Single value output: A tuple containing the value would be returned.
3) Operation-only outputs: a NoOp would be returned which
control-depends on computation.
TODO(b/121383831): Investigate into removing these special cases.
"""
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return replicate(
computation,
None if inputs is None else [inputs],
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)[0]
# pylint: enable=indexing-exception
# Operations that indicate some error in the user's inference graph.
_BLACKLISTED_INFERENCE_OPS = set([
"ReadVariableOp",
"AssignVariableOp",
"AssignAddVariableOp",
"AssignSubVariableOp",
"VarHandleOp",
"Variable",
"VariableV2",
])
def under_tpu_inference_context():
"""Check if it is currently under `tpu.rewrite_for_inference()`."""
graph = ops.get_default_graph()
context = graph._get_control_flow_context() # pylint: disable=protected-access
while context:
if isinstance(context, _TPUInferenceContext):
return True
context = context.outer_context
return False
class _TPUInferenceContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside a TPU inference computation.
The primary role of `TPUReplicateContext` is to sanity check operators inside
a tpu.rewrite_for_inference() computation.
"""
def __init__(self, name):
super(_TPUInferenceContext, self).__init__()
self._name = name
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
# pylint: disable=protected-access
if op.type in _BLACKLISTED_INFERENCE_OPS:
raise NotImplementedError(
"Operation of type %s (%s) is not supported on the TPU for inference."
" Execution will fail if this op is used in the graph. Make sure your"
" variables are using variable_scope." % (op.type, op.name))
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
result = val
if self._outer_context:
result = self._outer_context.AddValue(val)
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
@property
def grad_state(self):
return None
def validate_inference_rewrite_for_variables(graph):
"""Validates whether rewrite_for_inference() 'worked' for variables.
The rewrite_for_inference() method is supposed to append GuaranteeConstOps
after ReadVariableOps, but this mechanism works only if you are using
tf.compat.v1.get_variable() to create and access variables in your tpu
computation. This validation method can be called immediately after calling
tpu.rewrite_for_inference() to check whether GuaranteeConstOps where added
to the graph.
Typical usages:
tpu.validate_inference_rewrite_for_variables(
tf.compat.v1.get_default_graph())
tpu.validate_inference_rewrite_for_variables(sess.graph)
Args:
graph: The graph which needs to be validated.
Raises:
RuntimeError: if validation failed.
"""
if not any(x.type == "GuaranteeConst" for x in graph.get_operations()):
raise RuntimeError(
"No GuaranteeConst ops found in the graph after running "
"tpu.rewrite_for_inference(...). Please check that you are using "
"tf.get_variable() to create and access variables in your tpu "
"computation.")
def rewrite_for_inference(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Rewrites `computation` for inference on a TPU system.
Other than 'rewriting' the computation to run on a TPU, if using variables
in your computation, it moves the ReadVariableOps outside the TPU
computation, and adds GuaranteeConst ops just after the ReadVariableOps.
This mechanism works only if you are using tf.compat.v1.get_variable() to
create and access variables in your tpu computation. You can validate
whether this worked, by calling validate_inference_rewrite_for_variables()
method immediately after this method to check whether GuaranteeConstOps
where added to the graph.
Args:
computation: A Python function that builds a computation to apply to the
input. If the function takes n inputs, 'inputs' should be a list of n
tensors. If the function returns m outputs, rewrite will return a list of
m tensors.
inputs: A list of input tensors or `None` (equivalent to an empty list).
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: The name of the operator.
Returns:
A list of output tensors.
"""
def guarantee_const_getter(getter, name, *args, **kwargs):
with ops.control_dependencies(None):
return array_ops.guarantee_const(
getter(name, *args, **kwargs), name=name + "/GuaranteeConst")
def wrapped_computation(*args, **kwargs):
"""Execute computation under `_TPUInferenceContext`."""
context = _TPUInferenceContext(
name=ops.get_default_graph().unique_name("rewrite_for_inference"))
try:
context.Enter()
vscope = variable_scope.get_variable_scope()
prev_custom_getter = vscope.custom_getter
prev_caching_device = vscope.caching_device
vscope.set_custom_getter(guarantee_const_getter)
vscope.set_caching_device(lambda op: op.device)
result = computation(*args, **kwargs)
vscope.set_custom_getter(prev_custom_getter)
vscope.set_caching_device(prev_caching_device)
finally:
context.Exit()
return result
# pylint: disable=undefined-variable
return rewrite(
wrapped_computation,
inputs=inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# pylint: enable=undefined-variable
def prune_unconnected_ops_from_xla(prune_graph):
"""Prunes unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE.
Args:
prune_graph: A tensorflow graph from which we wish to prune unconnected ops
as listed in _UNCONNECTED_OPS_TO_PRUNE. In general, these ops should have
no inputs and no consumers. These can often be left behind due to graph
construction rewiring (for instance TF-Hub). While they never execute,
they will cause XLA compile to fail so we strip them from XLA compile by
removing the tpu_replicate attribute.
"""
# Scan over the top level graph and all function graphs.
for graph in [prune_graph] + list(prune_graph._functions.values()): # pylint: disable=protected-access
for op in graph.get_operations():
if op.type not in _UNCONNECTED_OPS_TO_PRUNE:
continue
outputs_consumed = False
for output in op.outputs:
if output.consumers():
outputs_consumed = True
break
if not outputs_consumed:
logging.info(
"Pruning OP %s of type %s from XLA Compile due to "
"it being disconnected.", op.name, op.type)
op._clear_attr(_TPU_REPLICATE_ATTR) # pylint: disable=protected-access
|
tensorflow-master
|
tensorflow/python/tpu/tpu.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Operations for handling session logging and shutdown notifications."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from google.protobuf import text_format
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
_WATCHDOG = None
class CoordinatorResetError(errors.AbortedError):
"""Raised when the monitored session should reset."""
def __init__(self):
errors.AbortedError.__init__(
self, None, None, 'Resetting session loop due to worker shutdown.')
def _clone_session(session, graph=None):
return session_lib.Session(
target=session.sess_str,
config=session._config, # pylint: disable=protected-access
graph=graph if graph else session.graph)
class WorkerHeartbeatManager(object):
"""Manages the status/heartbeat monitor for a set of workers."""
def __init__(self, session, devices, heartbeat_ops, request_placeholder):
"""Construct a new WorkerHeartbeatManager.
(Prefer using `WorkerHeartbeatManager.from_devices` when possible.)
Args:
session: `tf.compat.v1.Session`, session to use for heartbeat operations.
devices: `list[string]` Set of devices to connect to.
heartbeat_ops: `list[tf.Operation]` Heartbeat operations.
request_placeholder: `tf.Placeholder[String]` Placeholder used to specify
the WorkerHeartbeatRequest protocol buffer.
"""
self._session = session
self._devices = devices
self._ops = heartbeat_ops
self._request_placeholder = request_placeholder
@staticmethod
def from_devices(session, devices):
"""Construct a heartbeat manager for the given devices."""
if not devices:
logging.error('Trying to create heartbeat manager with no devices?')
logging.info('Creating heartbeat manager for %s', devices)
request_placeholder = array_ops.placeholder(
name='worker_heartbeat_request', dtype=dtypes.string)
heartbeat_ops = []
for device in devices:
with ops.device(device):
heartbeat_ops.append(tpu_ops.worker_heartbeat(request_placeholder))
return WorkerHeartbeatManager(session, devices, heartbeat_ops,
request_placeholder)
def num_workers(self):
return len(self._devices)
def configure(self, message):
"""Configure heartbeat manager for all devices.
Args:
message: `event_pb2.WorkerHeartbeatRequest`
Returns: `None`
"""
logging.info('Configuring worker heartbeat: %s',
text_format.MessageToString(message))
self._session.run(self._ops,
{self._request_placeholder: message.SerializeToString()})
def ping(self, request=None, timeout_in_ms=5000):
"""Ping all workers, returning the parsed status results."""
if request is None:
request = event_pb2.WorkerHeartbeatRequest()
options = config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
results = self._session.run(
self._ops,
feed_dict={self._request_placeholder: request.SerializeToString()},
options=options)
parsed_results = [
event_pb2.WorkerHeartbeatResponse.FromString(res_pb)
for res_pb in results
]
logging.debug('Ping results: %s', parsed_results)
return parsed_results
def lame_workers(self):
"""Ping all workers, returning manager containing lame workers (or None)."""
ping_results = self.ping()
lame_workers = []
for ping_response, device, op in zip(ping_results, self._devices,
self._ops):
if ping_response.health_status != event_pb2.OK:
lame_workers.append((device, op))
if not lame_workers:
return None
bad_devices, bad_ops = zip(*lame_workers)
return WorkerHeartbeatManager(self._session, bad_devices, bad_ops,
self._request_placeholder)
def __repr__(self):
return 'HeartbeatManager(%s)' % ','.join(self._devices)
# Default timeout is set to allow other shutdown triggered operations (log
# flushing etc) to finish before terminating the worker.
def shutdown(self, wait_time_in_ms=60000):
"""Shutdown all workers after `shutdown_timeout_secs`."""
logging.info('Shutting down %s.', self)
req = event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(timeout_ms=wait_time_in_ms),
shutdown_mode=event_pb2.SHUTDOWN_AFTER_TIMEOUT)
self.configure(req)
# Wait for workers to shutdown.
sleep_sec = 10.0 + wait_time_in_ms / 1000
logging.info('Waiting %.2f seconds for worker shutdown.', sleep_sec)
time.sleep(sleep_sec)
def all_worker_devices(session):
"""Return a list of devices for each worker in the system."""
devices = session.list_devices()
devices_that_support_heartbeats = []
for device in devices:
name = device.name
# Pick devices that have a TPU but target the attached CPU
if ':TPU:0' in name and 'coordinator' not in name:
devices_that_support_heartbeats.append(name.replace('TPU', 'CPU'))
return devices_that_support_heartbeats
class WatchdogManager(threading.Thread):
"""Configures worker watchdog timer and handles periodic pings.
Usage:
# Ping workers every minute, shutting down workers if they haven't received
# a ping after 1 hour.
watchdog_manager = WatchdogManager(
ping_interval=60, shutdown_timeout=3600
)
# Use as a context manager, resetting watchdog on context exit:
with watchdog_manager:
session.run(...)
# Or setup globally; watchdog will remain active until program exit.
watchdog_manager.configure_and_run()
"""
def __init__(self,
session,
devices=None,
ping_interval=60,
shutdown_timeout=3600):
"""Initialize a watchdog manager.
Args:
session: Session connected to worker devices. A cloned session and graph
will be created for managing worker pings.
devices: Set of devices to monitor. If none, all workers will be
monitored.
ping_interval: Time, in seconds, between watchdog pings.
shutdown_timeout: Time, in seconds, before watchdog timeout.
"""
threading.Thread.__init__(self)
self.ping_interval = ping_interval
self.shutdown_timeout = shutdown_timeout
self.daemon = True
self._config = session._config # pylint: disable=protected-access
self._target = session.sess_str
self._running = False
self._devices = devices
self._graph = None
self._session = None
self._worker_manager = None
def _reset_manager(self):
"""Reset the graph, session and worker manager."""
self._graph = ops.Graph()
self._session = session_lib.Session(
target=self._target,
graph=self._graph,
config=self._config,
)
if self._devices is None:
self._devices = all_worker_devices(self._session)
with self._graph.as_default():
self._worker_manager = WorkerHeartbeatManager.from_devices(
self._session, self._devices)
self._worker_manager.configure(
event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(
timeout_ms=self.shutdown_timeout * 1000,),
shutdown_mode=event_pb2.WAIT_FOR_COORDINATOR))
def configure_and_run(self):
logging.info(
'Enabling watchdog timer with %d second timeout '
'and %d second ping interval.', self.shutdown_timeout,
self.ping_interval)
self._reset_manager()
self._running = True
self.start()
def stop(self):
logging.info('Stopping worker watchdog.')
self._worker_manager.configure(
event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(timeout_ms=-1,),
shutdown_mode=event_pb2.NOT_CONFIGURED))
self._running = False
self.join()
def __enter__(self):
self.configure_and_run()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def run(self):
# Don't fetch logs or adjust timing: just ping the watchdog.
#
# If we hit an exception, reset our session as it is likely broken.
while self._running:
try:
self._worker_manager.ping(request=None)
time.sleep(self.ping_interval)
except errors.OpError as e:
# Catch any TF errors that occur so we don't stop sending heartbeats
logging.debug('Caught error while sending heartbeat: %s', e)
self._reset_manager()
def start_worker_watchdog(session,
devices=None,
ping_interval=60,
shutdown_timeout=3600):
"""Start global worker watchdog to shutdown workers on coordinator exit."""
global _WATCHDOG
if _WATCHDOG is None:
# Ensure we can send a few pings before we timeout!
ping_interval = min(shutdown_timeout / 10., ping_interval)
_WATCHDOG = WatchdogManager(session, devices, ping_interval,
shutdown_timeout)
_WATCHDOG.configure_and_run()
class GracefulShutdownHook(session_run_hook.SessionRunHook):
"""Session hook that watches for shutdown events.
If a shutdown is indicated, `saver.save(checkpoint_prefix)` is executed, and a
SystemShutdown exception is raised to terminate the main session. If `saver`
is None the `SAVERS` collection will be read to find a saver.
`on_shutdown_hooks` is an optional list of functions that should be called
after checkpointing. The function is called with (`run_context`,
`all_workers`, `lame_workers`).
If `heartbeat_group` is not specified, it will default to all CPU workers
in the system.
"""
def __init__(self, checkpoint_prefix, saver=None, on_shutdown_hooks=None):
self._saver = saver
self._checkpoint_prefix = checkpoint_prefix
self._on_shutdown_hooks = on_shutdown_hooks if on_shutdown_hooks else []
# Worker heartbeats are managed independently of the main training graph.
self._graph = ops.Graph()
self._workers = None
self._session = None
self._heartbeat_supported = False
def after_create_session(self, training_session, coord): # pylint: disable=unused-argument
# N.B. We have to pull the global step here to avoid it being unavailable
# at checkpoint time; the graph has been frozen at that point.
if training_util.get_global_step() is None and self.saver() is not None:
raise ValueError(
'Saver defined but no global step. Run `get_or_create_global_step()`'
' in your model definition to allow checkpointing.')
with self._graph.as_default():
logging.info('Installing graceful shutdown hook.')
self._session = _clone_session(training_session, self._graph)
self._workers = WorkerHeartbeatManager.from_devices(
self._session, all_worker_devices(self._session))
self._heartbeat_supported = self._workers.num_workers() > 0
if self._heartbeat_supported:
try:
self._workers.configure(
event_pb2.WorkerHeartbeatRequest(
shutdown_mode=event_pb2.WAIT_FOR_COORDINATOR))
except errors.InvalidArgumentError:
logging.warn(
'TPU device does not support heartbeats. Failure '
'handling will be disabled.')
self._heartbeat_supported = False
else:
logging.warn(
'No workers support hearbeats. Failure handling will be disabled.')
def saver(self):
if self._saver:
return self._saver
savers = ops.get_collection(ops.GraphKeys.SAVERS)
if not savers:
return None
if not isinstance(savers, list):
return savers
if len(savers) > 1:
logging.error(
'Multiple savers in the SAVERS collection. On-demand checkpointing '
'will be disabled. Pass an explicit `saver` to the constructor to '
'override this behavior.')
return None
return savers[0]
def after_run(self, run_context, run_values):
del run_values
if not self._heartbeat_supported:
return
lame_workers = self._workers.lame_workers()
if lame_workers:
logging.info('ShutdownHook: lame workers found: %s', lame_workers)
if self.saver():
logging.info('ShutdownHook: saving checkpoint to %s',
self._checkpoint_prefix)
self.saver().save(
run_context.session,
self._checkpoint_prefix,
global_step=training_util.get_global_step(),
write_state=True,
)
else:
logging.info('ShutdownHook: no Saver defined.')
for fn in self._on_shutdown_hooks:
fn(run_context, self._workers, lame_workers)
class ResetComputation(object):
"""Hook to reset a TPUEstimator computation loop.
This hook shuts down all workers and resets the monitored session loop by
throwing a CoordinatorResetError.
"""
def __init__(self):
pass
def __call__(self, run_context, all_workers, lame_workers):
del run_context, lame_workers
all_workers.shutdown()
logging.info('Resetting coordinator.')
raise CoordinatorResetError()
class ShutdownLameWorkers(object):
"""Shutdown lamed workers.
Processing will continue normally (typically by waiting for the down
workers to be restarted).
"""
def __init__(self):
pass
def __call__(self, run_context, all_workers, lame_workers):
lame_workers.shutdown()
class ShutdownAllWorkers(object):
"""Shutdown all workers.
Processing will continue normally (typically by waiting for the down
workers to be restarted).
"""
def __init__(self):
pass
def __call__(self, run_context, all_workers, lame_workers):
all_workers.shutdown()
|
tensorflow-master
|
tensorflow/python/tpu/session_support.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Defines the `Topology` class, that describes a TPU fabric topology."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf.tpu import topology_pb2
def _tpu_device_name(job, task, device):
"""Returns the device name for the TPU `device` on `task` of `job`."""
if job is None:
return "/task:%d/device:TPU:%d" % (task, device)
else:
return "/job:%s/task:%d/device:TPU:%d" % (job, task, device)
def _tpu_host_device_name(job, task):
"""Returns the device name for the CPU device on `task` of `job`."""
if job is None:
return "/task:%d/device:CPU:0" % task
else:
return "/job:%s/task:%d/device:CPU:0" % (job, task)
class Topology(object):
"""Describes a set of TPU devices.
Represents both the shape of the physical mesh, and the mapping between
TensorFlow TPU devices to physical mesh coordinates.
"""
def __init__(self, serialized=None, mesh_shape=None, device_coordinates=None):
"""Builds a Topology object.
If `serialized` is not `None`, the topology is parsed from `serialized` and
the other arguments are ignored. Otherwise, the topology is computed from
`mesh_shape` and `device_coordinates`.
Args:
serialized: A serialized `TopologyProto`, or `None`. If not `None`, the
serialized proto is parsed to discover the topology.
mesh_shape: A sequence of 3 positive integers, or `None`. If not `None`,
the shape of the TPU topology, in number of cores. Ignored if
`serialized` is not `None`.
device_coordinates: A rank 3 numpy array that describes the mapping from
TensorFlow TPU devices to TPU fabric coordinates, or `None`. Ignored
if `serialized is not `None`.
Raises:
ValueError: If `serialized` does not describe a well-formed topology.
ValueError: If `serialized` is `None` and `mesh_shape` is not a sequence
of 3 positive integers.
ValueError: If `serialized` is `None` and `device_coordinates` is not a
rank 3 numpy int32 array that describes a valid coordinate mapping.
"""
self._serialized = serialized
if serialized:
self._parse_topology(serialized)
else:
self._mesh_shape = np.asarray(mesh_shape, dtype=np.int32)
self._device_coordinates = np.asarray(device_coordinates, np.int32)
if len(self._mesh_shape) != 3 or any(self._mesh_shape < 1):
raise ValueError("`mesh_shape` must be a sequence of 3 positive "
"entries; got {}".format(self._mesh_shape))
if (len(self._device_coordinates.shape) != 3 or
self._device_coordinates.shape[2] != len(self._mesh_shape)):
raise ValueError("`device_coordinates` must be a rank 3 int32 array "
"with minor dimension equal to the mesh shape rank")
self._topology_tasks, self._topology_devices = self._invert_topology()
def _parse_topology(self, serialized):
"""Parses a serialized `TopologyProto` into `self`."""
proto = topology_pb2.TopologyProto()
proto.ParseFromString(serialized)
self._mesh_shape = np.array(proto.mesh_shape, dtype=np.int32)
if len(self._mesh_shape) != 3 or any(self._mesh_shape < 1):
raise ValueError("`mesh_shape` must be a vector of size 3 with positive "
"entries; got {}".format(self._mesh_shape))
if proto.num_tasks < 0:
raise ValueError("`num_tasks` must be >= 0; got {}".format(
proto.num_tasks))
if proto.num_tpu_devices_per_task < 0:
raise ValueError("`num_tpu_devices_per_task` must be >= 0; got {}".format(
proto.num_tpu_devices_per_task))
expected_coordinates_size = (
proto.num_tasks * proto.num_tpu_devices_per_task * len(
proto.mesh_shape))
if len(proto.device_coordinates) != expected_coordinates_size:
raise ValueError("`device_coordinates` must have shape num_tasks ({}) * "
"num_tpu_devices_per_task ({}) * len(mesh_shape) ({}); "
"got shape {}".format(proto.num_tasks,
proto.num_tpu_devices_per_task,
proto.mesh_shape,
len(proto.device_coordinates)))
coords = np.array(proto.device_coordinates, dtype=np.int32)
if any(coords < 0):
raise ValueError("`device_coordinates` must be >= 0")
coords = coords.reshape((proto.num_tasks, proto.num_tpu_devices_per_task,
len(proto.mesh_shape)))
self._device_coordinates = coords
def _invert_topology(self):
"""Inverts a [task,device,axis] topology to [x,y,z] -> task/device maps."""
tasks = np.full(list(self.mesh_shape), -1, dtype=np.int32)
devices = np.full(list(self.mesh_shape), -1, dtype=np.int32)
for task in xrange(self.device_coordinates.shape[0]):
for device in xrange(self.device_coordinates.shape[1]):
x, y, z = self.device_coordinates[task, device, :]
tasks[x, y, z] = task
devices[x, y, z] = device
return tasks, devices
@property
def mesh_shape(self):
"""A rank 1 int32 array describing the shape of the TPU topology."""
return self._mesh_shape
@property
def mesh_rank(self):
"""Returns the number of dimensions in the mesh."""
return len(self._mesh_shape)
@property
def device_coordinates(self):
"""Describes the mapping from TPU devices to topology coordinates.
Returns:
A rank 3 int32 array with shape `[tasks, devices, axis]`.
`tasks` is the number of tasks in the TPU cluster, `devices` is the number
of TPU devices per task, and `axis` is the number of axes in the TPU
cluster topology. Each entry gives the `axis`-th coordinate in the
topology of a task/device pair. TPU topologies are 3-dimensional, with
dimensions `(x, y, core number)`.
"""
return self._device_coordinates
def task_ordinal_at_coordinates(self, device_coordinates):
"""Returns the TensorFlow task number attached to `device_coordinates`.
Args:
device_coordinates: An integer sequence describing a device's physical
coordinates in the TPU fabric.
Returns:
Returns the TensorFlow task number that contains the TPU device with those
physical coordinates.
"""
return self._topology_tasks[tuple(device_coordinates)]
def tpu_device_ordinal_at_coordinates(self, device_coordinates):
"""Returns the TensorFlow device number at `device_coordinates`.
Args:
device_coordinates: An integer sequence describing a device's physical
coordinates in the TPU fabric.
Returns:
Returns the TensorFlow device number within the task corresponding to
attached to the device with those physical coordinates.
"""
return self._topology_devices[tuple(device_coordinates)]
def cpu_device_name_at_coordinates(self, device_coordinates, job=None):
"""Returns the CPU device attached to a logical core."""
return _tpu_host_device_name(
job, self._topology_tasks[tuple(device_coordinates)])
def tpu_device_name_at_coordinates(self, device_coordinates, job=None):
"""Returns the name of the TPU device assigned to a logical core."""
return _tpu_device_name(job,
self._topology_tasks[tuple(device_coordinates)],
self._topology_devices[tuple(device_coordinates)])
@property
def num_tasks(self):
"""Returns the number of TensorFlow tasks in the TPU slice."""
return self._device_coordinates.shape[0]
@property
def num_tpus_per_task(self):
"""Returns the number of TPU devices per task in the TPU slice."""
return self._device_coordinates.shape[1]
def serialized(self):
"""Returns the serialized form of the topology."""
if self._serialized is None:
proto = topology_pb2.TopologyProto()
proto.mesh_shape[:] = list(self._mesh_shape)
proto.num_tasks = self._device_coordinates.shape[0]
proto.num_tpu_devices_per_task = self._device_coordinates.shape[1]
proto.device_coordinates.extend(list(self._device_coordinates.flatten()))
self._serialized = proto.SerializeToString()
return self._serialized
|
tensorflow-master
|
tensorflow/python/tpu/topology.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow_estimator.python.estimator.tpu.tpu_context import *
# pylint: enable=wildcard-import,unused-import
|
tensorflow-master
|
tensorflow/python/tpu/tpu_context.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Hook for asynchronous checkpointing.
This hook dispatches checkpoint writing operations in a separate thread to
allow execution to continue on the main thread.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
class AsyncCheckpointSaverHook(basic_session_run_hooks.CheckpointSaverHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances. Used for
callbacks that run immediately before or after this hook saves the
checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create AsyncCheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._save_thread = None
self._write_graph_thread = None
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
self._listeners = listeners or []
self._steps_per_run = 1
self._summary_writer = None
self._global_step_tensor = None
self._last_checkpoint_step = None
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
def _write_graph_fn(self):
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir, "graph.pbtxt")
self._write_graph_thread = threading.Thread(target=_write_graph_fn,
args=[self])
self._write_graph_thread.start()
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._timer.update_last_triggered_step(global_step)
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
logging.info("Triggering checkpoint. %s", global_step)
if self._save(run_context.session, global_step):
run_context.request_stop()
def end(self, session):
if self._save_thread:
logging.info("Waiting for any pending checkpoints to finish.")
self._save_thread.join()
if self._write_graph_thread:
logging.info("Waiting for any pending write_graph to finish.")
self._write_graph_thread.join()
last_step = session.run(self._global_step_tensor)
if self._last_checkpoint_step != last_step:
self._save(session, last_step, asynchronous=False)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step, asynchronous=True):
"""Saves the latest checkpoint, returns should_stop."""
def _save_fn():
"""Run the saver process."""
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
start_time = time.time()
for l in self._listeners:
l.before_save(session, step)
self._get_saver().save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
for l in self._listeners:
l.after_save(session, step)
end_time = time.time()
logging.info("Checkpoint actual writing time: (%.3f sec)",
end_time - start_time)
logging.info("Checkpoint finished for %d into %s.", step, self._save_path)
if not asynchronous:
self._last_checkpoint_step = step
_save_fn()
return
if self._save_thread is not None:
self._save_thread.join(timeout=0.1)
if self._save_thread.is_alive():
logging.info("Saver thread still in progress, skipping checkpoint.")
return
self._last_checkpoint_step = step
self._save_thread = threading.Thread(target=_save_fn)
self._save_thread.start()
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor."
.format(collection_key))
self._saver = savers[0]
return savers[0]
|
tensorflow-master
|
tensorflow/python/tpu/async_checkpoint.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Helper context for running models with bfloat16."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
def _get_custom_getter():
"""Returns a custom getter that this class's methods must be called under.
All methods of this class must be called under a variable scope that was
passed this custom getter. Example:
```python
network = ConvNetBuilder(...)
with tf.compat.v1.variable_scope('cg',
custom_getter=network.get_custom_getter()):
network.conv(...)
# Call more methods of network here
```
Currently, this custom getter only does anything if self.use_tf_layers is
True. In that case, it causes variables to be stored as dtype
self.variable_type, then casted to the requested dtype, instead of directly
storing the variable as the requested dtype.
"""
def inner_custom_getter(getter, *args, **kwargs):
"""Custom getter that forces variables to have type self.variable_type."""
cast_to_bfloat16 = False
requested_dtype = kwargs['dtype']
if requested_dtype == dtypes.bfloat16:
# Only change the variable dtype if doing so does not decrease variable
# precision.
kwargs['dtype'] = dtypes.float32
cast_to_bfloat16 = True
var = getter(*args, **kwargs)
# This if statement is needed to guard the cast, because batch norm
# assigns directly to the return value of this custom getter. The cast
# makes the return value not a variable so it cannot be assigned. Batch
# norm variables are always in fp32 so this if statement is never
# triggered for them.
if cast_to_bfloat16:
var = math_ops.cast(var, dtypes.bfloat16)
return var
return inner_custom_getter
@tf_export(v1=['tpu.bfloat16_scope'])
@tf_contextlib.contextmanager
def bfloat16_scope():
"""Scope class for bfloat16 variables so that the model uses custom getter.
This enables variables to be read as bfloat16 type when using get_variable.
"""
with variable_scope.variable_scope(
'', custom_getter=_get_custom_getter()) as varscope:
yield varscope
|
tensorflow-master
|
tensorflow/python/tpu/bfloat16.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPU system metadata and associated tooling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tpu
_PINGING_MASTER_TIMEOUT_IN_MS = 5 * 60 * 1000 # 10 min
_RETRY_TIMES = 12 * 24 # 1 day
_INITIAL_TPU_SYSTEM_TIMEOUT_IN_MS = 300 * 1000 # 5 mins
_TPU_DEVICE_REG = re.compile(r'.*task:(\d+)/.*device:TPU:(\d+)$')
_DEVICE_TYPE_REGEX = re.compile('.*device:([^:]+).*')
_DEFAULT_JOB_NAME = 'tpu_worker'
_DEFAULT_COORDINATOR_JOB_NAME = 'coordinator'
_LOCAL_MASTERS = ('', 'local')
# _TPUSystemMetadata is used by TPUEstimator to hold TPU configuration,
# including num_cores and num_hosts.
_TPUSystemMetadata = collections.namedtuple('_TPUSystemMetadata', [
'num_cores',
'num_hosts',
'num_of_cores_per_host',
'topology',
'devices',
])
def _query_tpu_system_metadata(master_address, cluster_def=None,
query_topology=False):
"""Automatically detects the TPU system metadata in the system."""
tpu_core_count = 0
devices = []
device_dict = collections.defaultdict(list)
if context.executing_eagerly():
device_names = context.list_devices()
devices = []
# We want the output type to match in both eager and session mode
for name in device_names:
device_match = _DEVICE_TYPE_REGEX.match(name)
device_type = 'CPU'
if device_match:
device_type = device_match.group(1)
devices.append(session_lib._DeviceAttributes(name, device_type, 0, 0)) # pylint: disable=protected-access
else:
# TODO(b/120564445): Replace with standard library for retries.
retry_count = 1
while True:
logging.info('Querying Tensorflow master (%s) for TPU system metadata.',
master_address)
try:
with ops.Graph().as_default():
with session_lib.Session(
master_address,
config=get_session_config_with_timeout(
_PINGING_MASTER_TIMEOUT_IN_MS,
cluster_def)) as sess:
devices = sess.list_devices()
break
except errors.DeadlineExceededError:
msg = ('Failed to connect to the Tensorflow master. The TPU worker may '
'not be ready (still scheduling) or the Tensorflow master '
'address is incorrect: got (%s).' %
(master_address))
# TODO(xiejw): For local or grpc master we might not need retry logic
# here.
if retry_count <= _RETRY_TIMES:
logging.warning('%s', msg)
logging.warning('Retrying (%d/%d).', retry_count, _RETRY_TIMES)
retry_count += 1
else:
raise ValueError(msg)
for device in devices:
match = _TPU_DEVICE_REG.match(device.name)
if match:
host_id = match.group(1)
core_id = match.group(2)
device_dict[host_id].append(core_id)
tpu_core_count += 1
num_of_cores_per_host = 0
if tpu_core_count:
num_cores_per_host_set = set(
[len(core_ids) for core_ids in device_dict.values()])
if len(num_cores_per_host_set) != 1:
raise RuntimeError(
'TPU cores on each host is not same. This should not happen!. '
'devices: {}'.format(devices))
num_of_cores_per_host = num_cores_per_host_set.pop()
topology = None
if query_topology:
if not tpu_core_count:
raise RuntimeError(
'Cannot find any TPU cores in the system (master address {}). '
'This usually means the master address is incorrect or the '
'TPU worker has some problems. Available devices: {}'.format(
master_address, devices))
topology = _obtain_topology(master_address, cluster_def)
# We sort the metadata devices so that downstream users get a sorted list
# for creating mirrored variables correctly.
def _sort_key(device):
spec = tf_device.DeviceSpec.from_string(device.name)
return (spec.job, spec.replica, spec.task, spec.device_type,
spec.device_index)
devices = tuple(sorted(devices, key=_sort_key))
metadata = _TPUSystemMetadata(
num_cores=tpu_core_count,
num_hosts=len(device_dict),
num_of_cores_per_host=num_of_cores_per_host,
topology=topology,
devices=devices)
if tpu_core_count:
logging.info('Found TPU system:')
logging.info('*** Num TPU Cores: %d', metadata.num_cores)
logging.info('*** Num TPU Workers: %d', metadata.num_hosts)
logging.info('*** Num TPU Cores Per Worker: %d',
metadata.num_of_cores_per_host)
for device in metadata.devices:
logging.info('*** Available Device: %s', device)
else:
logging.info('Failed to find TPU: %s', metadata)
return metadata
def _obtain_topology(master_address, cluster_def):
"""Obtains TPU fabric topology."""
try:
logging.info('Initializing TPU system (master: %s) to fetch topology '
'for model parallelism. This might take a while.',
master_address)
with ops.Graph().as_default():
session_config = get_session_config_with_timeout(
_INITIAL_TPU_SYSTEM_TIMEOUT_IN_MS, cluster_def)
with session_lib.Session(
master_address, config=session_config) as sess:
topology = sess.run(tpu.initialize_system())
return topology
except errors.DeadlineExceededError:
raise ValueError(
'Fail to initialize TPU system with master (%s). '
'Please double check the TPU system is functional.' % (
master_address))
def get_session_config_with_timeout(timeout_in_secs, cluster_def):
"""Returns a session given a timeout and a cluster configuration."""
config = config_pb2.ConfigProto(
operation_timeout_in_ms=timeout_in_secs, cluster_def=cluster_def)
return config
def master_job(master, cluster_def):
"""Returns the canonnical job name to use to place TPU computations on.
Args:
master: A `string` representing the TensorFlow master to use.
cluster_def: A ClusterDef object describing the TPU cluster.
Returns:
A string containing the job name, or None if no job should be specified.
Raises:
ValueError: If the user needs to specify a tpu_job_name, because we are
unable to infer the job name automatically, or if the user-specified job
names are inappropriate.
"""
# If the user specifies the tpu_job_name, use that.
if master in _LOCAL_MASTERS:
return None
if (not cluster_def or not cluster_def.job):
return _DEFAULT_JOB_NAME
job_names = set([job.name for job in cluster_def.job])
if _DEFAULT_JOB_NAME in job_names:
# b/37868888 tracks allowing ClusterSpec propagation to reuse job names.
raise ValueError('Currently, tpu_worker is not an allowed job name.')
if len(job_names) == 1:
return cluster_def.job[0].name
if len(job_names) == 2:
if _DEFAULT_COORDINATOR_JOB_NAME in job_names:
job_names.remove(_DEFAULT_COORDINATOR_JOB_NAME)
return job_names.pop()
# TODO(b/67716447): Include more sophisticated heuristics.
raise ValueError(
'Could not infer TPU job name. Please specify a tpu_job_name as part '
'of your TPUConfig.')
|
tensorflow-master
|
tensorflow/python/tpu/tpu_system_metadata.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""Helper library for handling infeed between hosts and TPUs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_sharding
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.util import nest
def partition_or_replicate_on_host(tensor, dims):
"""Partitions or replicates the input tensor.
The ops inside this function are placed on the host side.
Args:
tensor: The input tensor which will be partitioned or replicated.
dims: A list of integer describes how to partition the input tensor.
Returns:
An iterator of `Tensor`s or a list of partitioned tensors.
"""
if dims is None:
return itertools.repeat(tensor)
dims = np.array(dims)
output = [tensor]
shape_list = np.array(tensor.shape.as_list())
quotients, remainders = np.divmod(shape_list, dims)
for axis, (quotient, remainder, dim, original_size) in enumerate(
zip(quotients, remainders, dims, shape_list)):
if dim <= 1:
continue
if remainder > 0:
# For each dimension, when it cannot be evenly partitioned, XLA assumes
# tensors are partitioned in a greedy manner by using
# ceil_ratio(size/dim) first. E.g. 2D tensor with shape (5, 14) and dims
# are (2, 4). Since 5 % 2 = 1 and 14 % 4 = 2, [5, 14] =>
# [[(3, 4), (3, 4), (2, 4), (2, 2)],
# [(2, 4), (2, 4), (2, 4), (2, 2)]]
ceil_ratio = quotient + 1
num_full_slots, left_over = np.divmod(original_size, ceil_ratio)
num_or_size_splits = [ceil_ratio] * num_full_slots + [left_over]
if len(num_or_size_splits) < dim:
num_or_size_splits += [0] * (dim - len(num_or_size_splits))
new_output = []
for x in output:
new_output.append(
array_ops.split(
x, num_or_size_splits=num_or_size_splits, axis=axis))
output = new_output
else:
output = [array_ops.split(x, int(dim), axis=axis) for x in output]
output = nest.flatten(output)
return output
def _tag_sharding_attribute_for_dequeued_tensor(tensor, dims):
"""Tags appropriate XLA sharding attribute to the dequeued tensor.
The sharding attribute of the dequeued tensor will be a tuple.
Args:
tensor: The dequeued tensor on TPU.
dims: A list of integer describes how the tensor is partitioned.
Returns:
The same tensor with the xla_sharding attribute.
"""
if dims is None:
return xla_sharding.replicate(tensor, assign_tuple_sharding=True)
elif np.prod(dims) == 1:
return xla_sharding.assign_device(tensor, 0, assign_tuple_sharding=True)
else:
tile_assignment = np.arange(np.prod(dims)).reshape(dims)
return xla_sharding.tile(
tensor=tensor,
tile_assignment=tile_assignment,
assign_tuple_sharding=True)
def tag_sharding_attribute_for_dequeued_tensors(dequeues, dims):
"""Tags appropriate XLA sharding attribute to the dequeued tensors.
Args:
dequeues: A list of dequeued tensors on TPU.
dims: A list of integer describes how the tensor is partitioned.
Returns:
The same dequeues with appropriate xla_sharding attribute.
"""
nest.assert_shallow_structure(dequeues, dims)
return nest.map_structure_up_to(
dequeues, _tag_sharding_attribute_for_dequeued_tensor, dequeues, dims)
class InfeedQueue(object):
"""A helper object to build a device infeed queue.
The InfeedQueue builds the host-side and device-side Ops to enqueue and
dequeue elements, respectively, and ensures that their types and
shapes match.
"""
def __init__(self,
number_of_tuple_elements=None,
tuple_types=None,
tuple_shapes=None,
shard_dimensions=None,
name=None):
"""Creates a new InfeedQueue with the given configuration.
The configuration need not be fully specified at creation since it
can be modified subsequently by methods that set the values
explicitly or infer them from the shapes of inputs.
Args:
number_of_tuple_elements: the number of Tensors fed atomically through the
queue, must be present unless it can be inferred from other arguments.
tuple_types: if not None, a list of types of the elements of the queue.
tuple_shapes: if not None, a list of shapes of the elements of the queue.
shard_dimensions: if not None, a list of dimensions on which the
elements of the queue should be sharded during automatic
parallelization.
name: the name of the queue.
Raises:
ValueError: if number_of_tuple_elements <= 0; or
number_of_tuple_arguments, tuple_types, tuple_shapes, and
shard_dimensions are all None; or the length of tuple_types,
tuple_shapes, or shard_dimensions is not equal to
number_of_tuple_elements; or any element of shard_dimensions
can't be converted to a Dimension.
TypeError: if any element of tuple_types or tuple_shapes can't
be converted to a dtype or TensorShape, respectively.
"""
self._frozen = False
self._generated_enqueue_ops = False
self._generated_dequeue_op = False
self._name = "InfeedQueue" if name is None else name
if number_of_tuple_elements is None:
if tuple_types is not None:
number_of_tuple_elements = len(tuple_types)
elif tuple_shapes is not None:
number_of_tuple_elements = len(tuple_shapes)
elif shard_dimensions is not None:
number_of_tuple_elements = len(shard_dimensions)
else:
raise ValueError(
"number of tuple elements cannot be inferred from InfeedQueue "
"constructor")
if number_of_tuple_elements <= 0:
raise ValueError("number_of_tuple_elements %d must be > 0" %
number_of_tuple_elements)
# Make an empty sharding policy for each tuple element.
self._sharding_policies = [
tpu_sharding.ShardingPolicy()
for _ in xrange(number_of_tuple_elements)
]
if tuple_types is not None:
self.set_tuple_types(tuple_types)
else:
self._tuple_types = None
if tuple_shapes is not None:
self.set_tuple_shapes(tuple_shapes)
else:
self._tuple_shapes = None
if shard_dimensions is not None:
self.set_shard_dimensions(shard_dimensions)
self._validate()
def _validate(self):
"""Checks that the configuration is self-consistent.
Raises:
ValueError: if the shapes and sharding policies don't match.
"""
if self.tuple_shapes is not None:
for (policy, shape) in zip(self._sharding_policies, self._tuple_shapes):
# Raise an error if the policy is incompatible with the shape.
_ = policy.get_sharded_shape(shape)
@property
def number_of_tuple_elements(self):
"""Returns the number of InfeedQueue tuple elements."""
return len(self._sharding_policies)
@property
def tuple_types(self):
"""Returns the types of the InfeedQueue tuple elements."""
return self._tuple_types
def set_tuple_types(self, tuple_types):
"""Sets the type of each element of the queue.
tuple_types must be a list of length
self.number_of_tuple_elements, and each element must be
convertible to a dtype.
Args:
tuple_types: the types of each queue element.
Raises:
ValueError: if tuple_types is not of length
self.number_of_tuple_elements.
TypeError: if an element of tuple_types cannot be converted to a
dtype.
"""
if len(tuple_types) != self.number_of_tuple_elements:
raise ValueError("tuple_types is %s, but must be a list of length %d" %
(str(tuple_types), self.number_of_tuple_elements))
if self._frozen:
for (frozen, updated) in zip(self._tuple_types, tuple_types):
if frozen != updated:
raise ValueError(
"Trying to update InfeedQueue with frozen configuration with an "
"incompatible type. Frozen types are %s, updated types are %s" % (
str(self._tuple_types), str(tuple_types)))
else:
try:
self._tuple_types = [dtypes.as_dtype(t) for t in tuple_types]
except (TypeError) as e:
raise TypeError(
"tuple_types is %s, but must be a list of elements each "
"convertible to dtype: got error %s" % (str(tuple_types), str(e)))
@property
def tuple_shapes(self):
"""Returns the shapes of the InfeedQueue tuple elements."""
return self._tuple_shapes
def set_tuple_shapes(self, tuple_shapes):
"""Sets the shape of each element of the queue.
tuple_shapes must be a list of length
self.number_of_tuple_elements, and each element must be
convertible to a TensorShape.
Args:
tuple_shapes: the shapes of each queue element.
Raises:
ValueError: if tuple_shapes is not of length
self.number_of_tuple_elements.
TypeError: if an element of tuple_shapes cannot be converted to
a TensorShape.
"""
if len(tuple_shapes) != self.number_of_tuple_elements:
raise ValueError("tuple_shapes is %s, but must be a list of length %d" %
(str(tuple_shapes), self.number_of_tuple_elements))
try:
tuple_shapes = [tensor_shape.as_shape(shape) for shape in tuple_shapes]
except (ValueError, TypeError) as e:
raise TypeError(
"tuple_shapes is %s, but must be a list of elements each "
"convertible to TensorShape: got error %s" % (str(tuple_shapes),
str(e)))
if self._frozen:
for (frozen, updated) in zip(self._tuple_shapes, tuple_shapes):
if frozen != updated:
raise ValueError(
"Trying to update InfeedQueue with frozen configuration with an "
"incompatible shape. Frozen shapes are %s, updated shapes are %s"
% (str(self._tuple_shapes), str(tuple_shapes)))
else:
self._tuple_shapes = tuple_shapes
self._validate()
@property
def sharding_policies(self):
"""Returns the sharding policies of the InfeedQueue tuple elements."""
return self._sharding_policies
@property
def shard_dimensions(self):
"""Gets the shard dimension of each tuple element.
Returns:
A list of length number_of_tuple_elements, where each list entry
is the shard dimension of that tuple element or None if the
shard dimension has not been set.
"""
# The number of shards is always the same for all the policies.
return [policy.shard_dimension for policy in self._sharding_policies]
def set_shard_dimensions(self, shard_dimensions):
"""Sets the shard_dimension of each element of the queue.
shard_dimensions must be a list of length
self.number_of_tuple_elements, and each element must be
convertible to a Dimension compatible with self.tuple_shapes.
Args:
shard_dimensions: the dimensions of each queue element.
Raises:
ValueError: if shard_dimensions is not of length
self.number_of_tuple_elements; or an element of
shard_dimensions cannot be converted to a Dimension; or an
element of shard_dimensions is a Dimension that is out of
range for the corresponding tuple element shape.
"""
if len(shard_dimensions) != self.number_of_tuple_elements:
raise ValueError("shard_dimensions is %s, but must be a list of length %d"
% (str(shard_dimensions),
self.number_of_tuple_elements))
for (policy, dimension) in zip(self._sharding_policies, shard_dimensions):
policy.set_shard_dimension(dimension)
self._validate()
@property
def number_of_shards(self):
"""Gets the number of shards to use for the InfeedQueue.
Returns:
Number of shards or None if the number of shards has not been set.
"""
# The number of shards is always the same for all the policies.
return self._sharding_policies[0].number_of_shards
def set_number_of_shards(self, number_of_shards):
"""Sets the number of shards to use for the InfeedQueue.
Args:
number_of_shards: number of ways to shard the InfeedQueue.
Raises:
ValueError: if number_of_shards is not > 0; or the policies have
been frozen and number_of_shards was already set to something
else.
"""
for policy in self._sharding_policies:
policy.set_number_of_shards(number_of_shards)
self._validate()
def set_configuration_from_input_tensors(self, input_tensors):
"""Sets the shapes and types of the queue tuple elements.
input_tensors is a list of Tensors whose types and shapes are used
to set the queue configuration.
Args:
input_tensors: list of Tensors of the same types and shapes as
the desired queue Tuple.
Raises:
ValueError: if input_tensors is not a list of length
self.number_of_tuple_elements
"""
if len(input_tensors) != self.number_of_tuple_elements:
raise ValueError("input_tensors is %s, but should be a list of %d Tensors"
% (str(input_tensors), self.number_of_tuple_elements))
self.set_tuple_shapes([t.shape for t in input_tensors])
self.set_tuple_types([t.dtype for t in input_tensors])
def set_configuration_from_sharded_input_tensors(self, input_tensors):
"""Sets the shapes and types of the queue tuple elements.
input_tensors is a list of lists of Tensors whose types and shapes are used
to set the queue configuration. The length of the outer list is the number
of shards required, and each inner list is the tuple of Tensors to use to
determine the types and shapes of the corresponding shard. This method
depends on the shard dimension, and calling it freezes the shard policy.
Args:
input_tensors: list of lists of Tensors. The outer list length corresponds
to the desired number of shards, and each inner list is the size
and shape of the desired configuration of the corresponding shard.
Raises:
ValueError: if any inner list is not a list of length
self.number_of_tuple_elements; or the inner lists do not combine to
form a consistent unsharded shape.
TypeError: if the types of the Tensors in the inner lists do not match.
"""
if not self._frozen:
# Unset the tuple shapes in case the configuration becomes
# transiently inconsistent.
self._tuple_shapes = None
number_of_shards = len(input_tensors)
self.set_number_of_shards(number_of_shards)
for t in input_tensors:
if len(t) != self.number_of_tuple_elements:
raise ValueError(
"input_tensors is %s but must be a list of lists, where each inner"
" list has length number_of_tuple_elements=%d" % (
str(input_tensors), self.number_of_tuple_elements))
# Transpose the inputs to make a list of shard shapes for each tuple
# element.
sharded_shapes = [[t[i].shape for t in input_tensors]
for i in xrange(self.number_of_tuple_elements)]
# For each tuple, get the unsharded shape using that tuple's policy.
unsharded_shapes = [
policy.get_unsharded_shape(s)
for (policy, s) in zip(self._sharding_policies, sharded_shapes)
]
self.set_tuple_shapes(unsharded_shapes)
for i in xrange(1, self.number_of_shards):
for (t1, t2) in zip(input_tensors[0], input_tensors[i]):
if t1.dtype != t2.dtype:
raise TypeError(
"types of the tuple elements of input_tensors %s are not "
"consistent" % str(input_tensors))
self.set_tuple_types([t.dtype for t in input_tensors[0]])
def freeze(self):
"""Freezes the InfeedQueue so it can no longer be modified.
The configuration is implicitly frozen before any host-side or
device-side Ops are generated. The configuration cannot be frozen
until the types and shapes of the tuple elements have been set.
Raises:
ValueError: if the types or shapes of the tuple elements have not been
set.
"""
self._frozen = True
if self._tuple_types is None:
raise ValueError(
"Can't freeze an InfeedQueue without setting all tuple types.")
if self._tuple_shapes is None:
raise ValueError(
"Can't freeze an InfeedQueue without setting all tuple shapes.")
for shape in self._tuple_shapes:
if shape.dims is None:
raise ValueError(
"Can't freeze an InfeedQueue without setting all tuple shapes.")
for policy in self._sharding_policies:
policy.freeze()
self._validate()
def generate_dequeue_op(self, tpu_device=0):
"""Generates the device-side Op to dequeue a tuple from the queue.
Implicitly freezes the queue configuration if it is not already
frozen, which will raise errors if the shapes and types have not
been fully specified.
Args:
tpu_device: The TPU device ordinal where the infeed instruction should be
placed. If None, no explicit placement will be performed, and it is up
to the user to call this API from within a proper TPU device scope.
The XLA code will fail if the TPU dequeue instruction is not bound to
any device.
Returns:
A list of Outputs corresponding to a shard of infeed dequeued
into XLA, suitable for use within a replicated block.
Raises:
ValueError: if the types or shapes of the tuple elements have not been
set; or if a dequeue op has already been generated.
"""
self.freeze()
if self._generated_dequeue_op:
raise ValueError("Can't generate two dequeue Ops from the same queue")
self._generated_dequeue_op = True
full_name = "%s/dequeue" % self._name
sharded_shapes = [
policy.get_sharded_shape(shape)
for (shape, policy) in zip(self._tuple_shapes, self._sharding_policies)
]
if tpu_device is not None:
with ops.device(tpu.core(tpu_device)):
return tpu_ops.infeed_dequeue_tuple(
dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)
else:
return tpu_ops.infeed_dequeue_tuple(
dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)
def _generate_enqueue_op(self,
inputs,
name_prefix,
index,
device=None,
tpu_ordinal=-1):
"""Generate a host-side Op to enqueue a tuple to the queue.
If device is None the inputs are all required to have the same
device specification, and the enqueue Op is colocated with
inputs[0]. Otherwise the enqueue Op is placed on 'device'.
Args:
inputs: a list of Tensors with the types and shapes of the tuple elements.
name_prefix: the base name for the Op.
index: the shard index, used to uniquify the Op name.
device: device to place the Op on, or None if it should be
colocated with the inputs.
tpu_ordinal: ordinal of the TPU device on the host to use for
infeed if device is a CPU device. Should be set to -1 if device
is a TPU device.
Returns:
An Op corresponding to a shard of infeed enqueued at the host,
suitable for use within a replicated block.
Raises:
ValueError: if device is None and inputs do not all have the
same device specification.
"""
full_name = "%s/%d" % (name_prefix, index)
shapes = [t.shape for t in inputs]
if device is None:
devices = [t.device for t in inputs]
for i in xrange(1, self.number_of_tuple_elements):
if devices[0] != devices[i]:
raise ValueError(
"input devices for shard %d are %s, but should all be the same" %
(index, str(devices)))
with ops.colocate_with(inputs[0]):
return tpu_ops.infeed_enqueue_tuple(
inputs=inputs,
shapes=shapes,
name=full_name,
device_ordinal=tpu_ordinal)
else:
with ops.device(device):
return tpu_ops.infeed_enqueue_tuple(
inputs=inputs,
shapes=shapes,
name=full_name,
device_ordinal=tpu_ordinal)
def generate_enqueue_ops(self,
sharded_inputs,
tpu_ordinal_function=None,
placement_function=None):
"""Generates the host-side Ops to enqueue the shards of a tuple.
sharded_inputs is a list, one for each shard, of lists of
Tensors. sharded_inputs[0] is the tuple of Tensors to use to feed
shard 0 if the queue. Returns the host-side Ops that must be run to
enqueue the sharded tuple. The Op for shard i is colocated with the inputs
for shard i.
Implicitly freezes the queue configuration if it is not already
frozen. If the configuration has already been frozen, and is not
compatible with the types and shapes of sharded_inputs, an error
will be raised.
Args:
sharded_inputs: a list of lists of Tensors. The length of the outer list
determines the number of shards. Each inner list indicates the types
and shapes of the tuples in the corresponding shard.
tpu_ordinal_function: if not None, a function that takes the
shard index as input and returns the ordinal of the TPU device
the shard's infeed should be placed on. tpu_ordinal_function must be
set if the inputs are placed on CPU devices.
placement_function: if not None, a function that takes the shard index as
input and returns the host device where the enqueue op should be placed
on.
Returns:
A list of host-side Ops, one for each shard, that when executed together
will enqueue a full-size element of infeed.
Raises:
ValueError: if the queue configuration has previously been frozen and the
shapes of the elements of sharded_inputs are not compatible with the
frozen configuration; or if the shapes of the elements of sharded_inputs
don't form a consistent unsharded tuple; or if the elements of a tuple
have different device constraints.
TypeError: if the queue configuration has previously been frozen and the
types of the elements of sharded_inputs are not compatible with the
frozen configuration; or if the types of the elements of sharded_inputs
don't form a consistent unsharded tuple.
"""
self.set_configuration_from_sharded_input_tensors(sharded_inputs)
self.freeze()
if self._generated_enqueue_ops:
raise ValueError("Can't generate two enqueue Ops from the same queue")
self._generated_enqueue_ops = True
if tpu_ordinal_function is None:
tpu_ordinal_function = lambda index: -1
name_prefix = "%s/enqueue" % self._name
return [
self._generate_enqueue_op(
shard,
name_prefix,
index,
tpu_ordinal=tpu_ordinal_function(index),
device=placement_function(index) if placement_function else None)
for (shard, index) in zip(sharded_inputs, xrange(self.number_of_shards))
]
# TODO(misard) Generalize this to the case of systems that don't
# have 8 devices per host, and figure out what to do with
# model-parallelism.
def _default_placement_function(self, index):
return "/task:%d/device:CPU:0" % (index / 8)
def _default_ordinal_function(self, index):
return index % 8
# TODO(b/36470756) remove this from tutorials once we have a better story
# for automatic placement of input pipelines.
def split_inputs_and_generate_enqueue_ops(self,
inputs,
device_assignment=None,
placement_function=None,
tpu_ordinal_function=None):
"""POORLY-PERFORMING ON MULTI-HOST SYSTEMS.
Generates the host-side Ops to enqueue a tuple.
This method performs poorly because it takes an entire input on a single
host, splits it, and distributes it to all of the cores. It is present only
to simplify tutorial examples.
inputs is a list of Tensors to use to feed the queue. Each input is split
into self.number_of_shards shards. Returns an Op for each shard to enqueue
the shard. The Op for shard i is placed on device placement_function(i).
Implicitly freezes the queue configuration if it is not already
frozen. If the configuration has already been frozen, and is not
compatible with the types and shapes of inputs, an error
will be raised.
Args:
inputs: a list of Tensors which indicates the types and shapes of the
queue tuple.
device_assignment: if not `None`, a TPU `DeviceAssignment`. If
device_assignment is not `None`, but `placement_function` and
`ordinal_function` are None, then `device_assignment` will be used to
place infeeds on the first k TPU shards, where k is the number of shards
in the queue. If all three are `None`, then default placement and
ordinal functions are used.
placement_function: if not None, a function that takes the shard
index as input and returns a device string indicating which
device the shard's infeed should be placed on. If placement_function
and tpu_ordinal_function are None, inputs are sharded round-robin
across the devices in the system.
tpu_ordinal_function: if not None, a function that takes the
shard index as input and returns the ordinal of the TPU device
the shard's infeed should be placed on. If placement_function
and tpu_ordinal_function are None, inputs are sharded round-robin
across the devices in the system.
Returns:
A list of host-side Ops, one for each shard, that when executed together
will enqueue a full-size element of infeed.
Raises:
ValueError: if the queue configuration has previously been frozen and the
shapes of the elements of inputs are not compatible with the frozen
configuration.
TypeError: if the queue configuration has previously been frozen and the
types of the elements of inputs are not compatible with the frozen
configuration.
"""
if device_assignment is None:
if placement_function is None:
placement_function = self._default_placement_function
if tpu_ordinal_function is None:
tpu_ordinal_function = self._default_ordinal_function
else:
def _placement_function_from_map(index):
return device_assignment.host_device(replica=index)
def _ordinal_function_from_map(index):
return device_assignment.tpu_ordinal(replica=index)
if placement_function is None:
placement_function = _placement_function_from_map
if tpu_ordinal_function is None:
tpu_ordinal_function = _ordinal_function_from_map
self.set_configuration_from_input_tensors(inputs)
self.freeze()
if self._generated_enqueue_ops:
raise ValueError("Can't generate two enqueue Ops from the same queue")
self._generated_enqueue_ops = True
split_name_prefix = "%s/split" % self._name
if self.number_of_shards == 1:
transposed_sharded_inputs = [[inp] for inp in inputs]
else:
def split_fn(inp, num_shards, axis, name):
with ops.colocate_with(inp):
return array_ops.split(inp, num_shards, axis=axis, name=name)
transposed_sharded_inputs = [
split_fn(
inp,
self.number_of_shards,
axis=policy.shard_dimension,
name="%s/%d" % (split_name_prefix, index))
for (inp, policy, index) in zip(inputs, self._sharding_policies,
xrange(self.number_of_tuple_elements))
]
sharded_inputs = [[shard[i] for shard in transposed_sharded_inputs]
for i in xrange(self.number_of_shards)]
name_prefix = "%s/enqueue" % self._name
return [
self._generate_enqueue_op(
shard,
name_prefix,
index,
device=placement_function(index),
tpu_ordinal=tpu_ordinal_function(index))
for (shard, index) in zip(sharded_inputs, xrange(self.number_of_shards))
]
class _PartitionedInfeedQueue(InfeedQueue):
"""A helper object to build a device infeed queue with input partition.
Args:
number_of_tuple_elements: the number of Tensors fed atomically through the
queue, must be present unless it can be inferred from other arguments.
device_assignment: A TPU `DeviceAssignment` which is used to place all the
partitions to different TPU infeed queues.
host_id: The id of the host machine.
input_partition_dims: A nested list/tuple of integers. Each inner
list/tuple describes how to partition the corresponding input tensor.
tuple_types: If not None, a list of types of the elements of the queue.
tuple_shapes: If not None, a list of shapes of the elements of the queue.
name: The name of the queue.
"""
def __init__(self,
number_of_tuple_elements,
device_assignment,
host_id,
input_partition_dims=None,
tuple_types=None,
tuple_shapes=None,
name=None):
super(_PartitionedInfeedQueue, self).__init__(
number_of_tuple_elements=number_of_tuple_elements,
tuple_types=tuple_types,
tuple_shapes=None,
shard_dimensions=None,
name="PartitionedInfeedQueue" if name is None else name)
self._input_partition_dims = input_partition_dims
self._host_id = host_id
self._device_assignment = device_assignment
def generate_dequeue_op(self, tpu_device=0):
"""Generate TPU dequeue ops.
Args:
tpu_device: The TPU device ordinal where the infeed instruction should be
placed.
Returns:
A list of Outputs corresponding to a partition of infeed dequeued
into XLA, suitable for use within a replicated block.
Raises:
ValueError: if the types or shapes of the tuple elements have not been
set; or if a dequeue op has already been generated.
"""
self.freeze()
if self._generated_dequeue_op:
raise ValueError("Can't generate two dequeue Ops from the same queue")
self._generated_dequeue_op = True
full_name = "%s/dequeue" % self._name
sharded_shapes = [
policy.get_sharded_shape(shape)
for (shape, policy) in zip(self._tuple_shapes, self._sharding_policies)
]
with ops.device(tpu.core(tpu_device)):
values = tpu_ops.infeed_dequeue_tuple(
dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)
return tag_sharding_attribute_for_dequeued_tensors(
values, self._input_partition_dims)
def generate_enqueue_ops(self, per_host_sharded_inputs):
"""Generates the host-side Ops to enqueue the partitioned inputs.
per_host_sharded_inputs is a list, one for each replica, of lists of
Tensors. sharded_inputs[i] is the tuple of Tensors to use to feed
replica i.
sharded_inputs[i][j] is partitioned by self._input_partition_dims[j].
For example, if sharded_inputs[i][j] is a 2-D Tensor:
[[A, B, C, D],
[E ,F, G, H]]
self._input_partition_dims[j] is [2, 4].
sharded_inputs[i][j] will be partitioned and flattened into:
[A, B, C, D, E, F, G, H] and fed into the logical core ids:
[0, 1, 2, 3, 4, 5, 6, 7] respectively.
Args:
per_host_sharded_inputs: a list of lists of Tensors. The length of the
outer list determines the number of shards. Each inner list indicates
the types and shapes of the tuples in the corresponding shard.
Returns:
A list of host-side Ops, one for each shard, that when executed together
will enqueue a full-size element of infeed.
Raises:
ValueError: if the queue configuration has previously been frozen and the
shapes of the elements of sharded_inputs are not compatible with the
frozen configuration; or if the shapes of the elements of sharded_inputs
don't form a consistent unsharded tuple; or if the elements of a tuple
have different device constraints; or if the partition dims are invalid.
TypeError: if the queue configuration has previously been frozen and the
types of the elements of sharded_inputs are not compatible with the
frozen configuration; or if the types of the elements of sharded_inputs
don't form a consistent unsharded tuple.
"""
self.set_configuration_from_sharded_input_tensors(per_host_sharded_inputs)
number_of_replicas_per_host = len(per_host_sharded_inputs)
number_of_tuple_elements = len(per_host_sharded_inputs[0])
assert len(self._input_partition_dims) == number_of_tuple_elements
per_host_enqueue_ops = []
for replica_index in range(number_of_replicas_per_host):
flattened_inputs = per_host_sharded_inputs[replica_index]
inputs_part_dims_flat = nest.flatten_up_to(flattened_inputs,
self._input_partition_dims)
inputs_parted_iters = [
iter(self._check_dims_and_partition_or_replicate_on_host(x, dims))
for x, dims in zip(per_host_sharded_inputs[replica_index],
inputs_part_dims_flat)
]
for logical_core in xrange(self._device_assignment.num_cores_per_replica):
# Places different partitions to different logic cores.
replica_id = self._device_assignment.lookup_replicas(
self._host_id, logical_core)[replica_index]
ordinal = self._device_assignment.tpu_ordinal(
replica=replica_id, logical_core=logical_core)
infeed_inputs = []
for it in inputs_parted_iters:
input_for_device = next(it, None)
if input_for_device is not None:
infeed_inputs.append(input_for_device)
if infeed_inputs:
per_host_enqueue_ops.append(
tpu_ops.infeed_enqueue_tuple(
inputs=infeed_inputs,
shapes=[x.shape for x in infeed_inputs],
name="enqueue/replica_{0}/input_{1}".format(
replica_index, logical_core),
device_ordinal=ordinal))
return per_host_enqueue_ops
def _check_input_partition_dims(self, tensor, dims):
"""Checks that input partition dims are valid for the `Tensor`.
Args:
tensor: Input tensor for partitioning.
dims: A list of integer describes how to partition the input tensor.
Raises:
ValueError: If the tensor can't be partitioned by dims or the
num_cores_per_replica doesn't match the number of
partitions(dims.prod()).
"""
# No partitioning specified, so don't perform further checks.
if dims is None:
return
dims = np.array(dims)
if (dims < 1).any():
raise ValueError("All input partition dims must be >= 1.")
# No partitioning, so don't perform further checks.
if dims.prod() == 1:
return
if dims.prod() != self._device_assignment.num_cores_per_replica:
raise ValueError(
"The product of each input parition dim should equal to "
"num_cores_per_replica. (dim = {}, num_cores_per_replica "
"= {})".format(dims, self._device_assignment.num_cores_per_replica))
if dims.shape[0] != tensor.shape.ndims:
raise ValueError(
"Input partition dims must have the same number of dimensions "
"as the `Tensor` to be partitioned. (tensor shape = {}, input "
"partition dims = {}).".format(tensor.shape.as_list(), dims))
tensor.shape.assert_is_fully_defined()
def _check_dims_and_partition_or_replicate_on_host(self, tensor, dims):
"""Checks dims and partitions or replicates the input tensor.
The ops inside this function are placed on the host side.
Args:
tensor: The input tensor which will be partitioned or replicated.
dims: A list of integer describes how to partition the input tensor.
Returns:
An iterator of `Tensor`s or a list of partitioned tensors.
"""
self._check_input_partition_dims(tensor, dims)
return partition_or_replicate_on_host(tensor, dims)
|
tensorflow-master
|
tensorflow/python/tpu/tpu_feed.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tpu_function helpers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import convolutional
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import training_loop
class TPUContextTest(test.TestCase):
@test_util.deprecated_graph_mode_only
def testIsInContext(self):
"""Test that control_flow_util can check that we're in a TPU context."""
z1 = array_ops.identity(1)
pivot = control_flow_ops.no_op()
context = tpu.TPUReplicateContext(b"context", 1, pivot=pivot)
context.Enter()
z2 = array_ops.identity(1)
context.Exit()
self.assertFalse(control_flow_util.IsInXLAContext(z1.op))
self.assertTrue(control_flow_util.IsInXLAContext(z2.op))
class TPULayerRewriteTest(test.TestCase):
@test_util.deprecated_graph_mode_only
def testUsingInfeedQueueWithRegularizer(self):
"""Test that Layer regularizers can reference data created in loops."""
def make_regularizer(scale):
return lambda inputs: scale * math_ops.reduce_sum(math_ops.square(inputs))
def training_step(inputs, scale):
outputs = convolutional.conv2d(
inputs,
filters=16,
kernel_size=(3, 3),
data_format="channels_first",
kernel_regularizer=make_regularizer(scale))
loss = math_ops.reduce_mean(math_ops.square(outputs))
return loss.op
inputs = array_ops.zeros(shape=(128, 32, 32, 16))
scale = array_ops.ones(shape=())
infeed = tpu_feed.InfeedQueue(
tuple_types=[dtypes.float32, dtypes.float32],
tuple_shapes=[inputs.shape, scale.shape])
def loop():
return training_loop.repeat(5, training_step, infeed_queue=infeed)
# This should not throw an error.
tpu.rewrite(loop)
class TPUGraphPruneTest(test.TestCase):
def test_prune_unconnected_ops(self):
with ops.Graph().as_default():
a = array_ops.placeholder(dtype=dtypes.float32, name="a")
b = array_ops.placeholder(dtype=dtypes.float32, name="b")
constant_op.constant(1.0, name="constant")
x = variable_scope.get_variable(
name="x",
dtype=dtypes.float32,
shape=[],
use_resource=True,
initializer=init_ops.constant_initializer(2.0))
y = variable_scope.get_variable(
name="y",
dtype=dtypes.float32,
shape=[],
use_resource=True,
initializer=init_ops.constant_initializer(3.0))
math_ops.add(a, b)
math_ops.add(x, y)
graph_def = ops.get_default_graph().as_graph_def()
for node in graph_def.node:
# Attach a TPU_REPLICATE_ATTR to each node.
node.attr[tpu._TPU_REPLICATE_ATTR].s = b"0"
# Rewire placeholder "a" and variable "y" leaving them unconnected.
for (input_index, node_input) in enumerate(node.input):
if node_input == "b":
node.input[input_index] = "constant"
if node_input == "y":
node.input[input_index] = "x"
with ops.Graph().as_default() as graph:
# Reimport the graph and prune unconnected ops.
importer.import_graph_def(graph_def)
tpu.prune_unconnected_ops_from_xla(ops.get_default_graph())
# Verify that ops "a" and "x" still have TPU_REPLICATE_ATTR.
a = graph.get_operation_by_name("import/a").get_attr(
tpu._TPU_REPLICATE_ATTR)
self.assertEqual(b"0", a)
x = graph.get_operation_by_name("import/x").get_attr(
tpu._TPU_REPLICATE_ATTR)
self.assertEqual(b"0", x)
# Verify that ops "b" and "y" have TPU_REPLICATE_ATTR removed.
with self.assertRaisesRegexp(
ValueError,
"Operation \'import/b\' has no attr named \'_tpu_replicate\'"):
graph.get_operation_by_name("import/b").get_attr(
tpu._TPU_REPLICATE_ATTR)
with self.assertRaisesRegexp(
ValueError,
"Operation \'import/y\' has no attr named \'_tpu_replicate\'"):
graph.get_operation_by_name("import/y").get_attr(
tpu._TPU_REPLICATE_ATTR)
def do_einsum():
a = array_ops.placeholder(dtype=dtypes.float32, name="a", shape=[2, 3, 4])
b = array_ops.placeholder(dtype=dtypes.float32, name="b", shape=[2, 4, 5])
return special_math_ops.einsum("abc,acd->abd", a, b)
def find_einsum(g):
graph_def = g.as_graph_def()
for node in graph_def.node:
if node.op == "XlaEinsum":
return True
return False
class TPUXlaEinsumTest(test.TestCase):
def test_tpu_rewrite_uses_xla_einsum(self):
with ops.Graph().as_default() as g:
tpu.rewrite(do_einsum)
self.assertTrue(find_einsum(g))
def test_default_does_not_use_xla_einsum(self):
with ops.Graph().as_default() as g:
do_einsum()
self.assertFalse(find_einsum(g))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/tpu/tpu_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""Tensor Tracer report generation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
_TRACER_LOG_PREFIX = ' [>>>TT>>>]'
_MARKER_SECTION_BEGIN = '!!!!!!! section-begin:'
_MARKER_SECTION_END = '!!!!!!! section-end:'
_SECTION_NAME_CONFIG = 'configuration'
_SECTION_NAME_REASON = 'reason'
_SECTION_NAME_OP_LIST = 'op-list'
_SECTION_NAME_TENSOR_LIST = 'tensor-list'
_SECTION_NAME_CACHE_INDEX_MAP = 'cache-index-map'
_SECTION_NAME_GRAPH = 'graph'
_SECTION_NAME_TENSOR_TRACER_CHECKPOINT = 'tensor_tracer_checkpoint'
_FIELD_NAME_VERSION = 'version:'
_FIELD_NAME_DEVICE = 'device:'
_FIELD_NAME_TRACE_MODE = 'trace-mode:'
_FIELD_NAME_SUBMODE = 'submode:'
_FIELD_NAME_NUM_REPLICAS = 'num-replicas:'
_FIELD_NAME_NUM_REPLICAS_PER_HOST = 'num-replicas-per-host:'
_FIELD_NAME_NUM_HOSTS = 'num-hosts:'
_FIELD_NAME_NUM_OPS = 'number-of-ops:'
_FIELD_NAME_NUM_TENSORS = 'number-of-tensors:'
_FIELD_NAME_NUM_CACHE_INDICES = 'number-of-indices:'
_FIELD_NAME_TOPOLOGICAL_SORT_SUCCEED = 'topological-sort-succeed:'
_CURRENT_VERSION = 'use-outside-compilation'
def topological_sort(g):
"""Performs topological sort on the given graph.
Args:
g: the graph.
Returns:
A pair where the first element indicates if the topological
sort succeeded (True if there is no cycle found; False if a
cycle is found) and the second element is either the sorted
list of nodes or the cycle of nodes found.
"""
def _is_loop_edge(op):
"""Returns true if the op is the end of a while-loop creating a cycle."""
return op.type in ['NextIteration']
def _in_op_degree(op):
"""Returns the number of incoming edges to the given op.
The edge calculation skips the edges that come from 'NextIteration' ops.
NextIteration creates a cycle in the graph. We break cycles by treating
this op as 'sink' and ignoring all outgoing edges from it.
Args:
op: Tf.Operation
Returns:
the number of incoming edges.
"""
count = 0
for op in op.control_inputs + [in_tensor.op for in_tensor in op.inputs]:
if not _is_loop_edge(op):
count += 1
return count
sorted_ops = []
op_in_degree = {op: _in_op_degree(op) for op in g.get_operations()}
frontier = [op for (op, degree) in op_in_degree.items() if degree == 0]
frontier.sort(key=lambda op: op.name)
while frontier:
op = frontier.pop()
# Remove the op from graph, and remove its outgoing edges.
sorted_ops.append(op)
if _is_loop_edge(op):
continue
# pylint: disable=protected-access
consumers = list(op._control_outputs)
# pylint: enable=protected-access
for out_tensor in op.outputs:
consumers += [consumer_op for consumer_op in out_tensor.consumers()]
consumers.sort(key=lambda op: op.name)
for consumer in consumers:
# For each deleted edge shift the bucket of the vertex.
op_in_degree[consumer] -= 1
if op_in_degree[consumer] == 0:
frontier.append(consumer)
if op_in_degree[consumer] < 0:
raise ValueError('consumer:%s degree mismatch'%consumer.name)
left_ops = set([op for (op, degree) in op_in_degree.items() if degree > 0])
if left_ops:
return (True, left_ops)
else:
assert len(g.get_operations()) == len(sorted_ops)
return (False, sorted_ops)
class TensorTracerConfig(object):
"""Tensor Tracer config object."""
def __init__(self):
self.version = _CURRENT_VERSION
self.device_type = None
self.num_replicas = None
self.num_replicas_per_host = None
self.num_hosts = None
class TensorTraceOrder(object):
"""Class that is responsible from storing the trace-id of the tensors."""
def __init__(self, graph_order, traced_tensors):
self.graph_order = graph_order
self.traced_tensors = traced_tensors
self._create_tensor_maps()
def _create_tensor_maps(self):
"""Creates tensor to cache id maps."""
self.tensorname_to_cache_idx = {}
self.cache_idx_to_tensor_idx = []
for out_tensor in self.traced_tensors:
tensor_name = out_tensor.name
if tensor_name in self.tensorname_to_cache_idx:
raise ValueError(
'Tensor name %s should not be already in '
'tensorname_to_cache_idx'%tensor_name)
if tensor_name not in self.graph_order.tensor_to_idx:
raise ValueError(
'Tensor name %s is not in the tensor_to_idx'%tensor_name)
tensor_idx = self.graph_order.tensor_to_idx[tensor_name]
cache_idx = len(self.tensorname_to_cache_idx)
self.tensorname_to_cache_idx[tensor_name] = cache_idx
self.cache_idx_to_tensor_idx.append(tensor_idx)
if len(self.tensorname_to_cache_idx) != len(
self.cache_idx_to_tensor_idx):
raise RuntimeError('len(self.tensorname_to_cache_idx) != '
'len(self.cache_idx_to_tensor_idx')
def sort_tensors_and_ops(graph):
"""Returns a wrapper that has consistent tensor and op orders."""
graph_wrapper = collections.namedtuple('GraphWrapper',
['graph', 'operations', 'op_to_idx',
'tensors', 'tensor_to_idx',
'contains_cycle',
'topological_order_or_cycle'])
contains_cycle, topological_order_or_cycle = topological_sort(graph)
if not contains_cycle:
operations = topological_order_or_cycle
else:
operations = graph.get_operations()
op_to_idx = {op.name: index for index, op
in enumerate(operations)}
tensors = []
for op in operations:
tensors.extend(op.outputs)
tensor_to_idx = {tensor.name: index for index, tensor in
enumerate(tensors)}
return graph_wrapper(graph=graph, operations=operations, op_to_idx=op_to_idx,
tensors=tensors, tensor_to_idx=tensor_to_idx,
contains_cycle=contains_cycle,
topological_order_or_cycle=topological_order_or_cycle)
class OpenReportFile(object):
"""Context manager for writing report file."""
def __init__(self, tt_parameters):
if not tt_parameters.report_file_path:
self._report_file = None
return
try:
self._report_file = gfile.Open(tt_parameters.report_file_path, 'w')
except IOError as e:
raise e
def __enter__(self):
return self._report_file
def __exit__(self, unused_type, unused_value, unused_traceback):
if self._report_file:
self._report_file.close()
class TTReportHandle(object):
"""Utility class responsible from creating a tensor tracer report."""
def __init__(self):
self.instrument_records = {}
self._report_file = None
def instrument(self, name, explanation):
self.instrument_records[name] = explanation
def instrument_op(self, op, explanation):
self.instrument(op.name, explanation)
def instrument_tensor(self, tensor, explanation):
self.instrument(tensor.name, explanation)
def create_report(self, tt_config, tt_parameters,
tensor_trace_order, tensor_trace_points):
"""Creates a report file and writes the trace information."""
with OpenReportFile(tt_parameters) as self._report_file:
self._write_config_section(tt_config, tt_parameters)
self._write_op_list_section(tensor_trace_order.graph_order)
self._write_tensor_list_section(tensor_trace_order.graph_order)
self._write_trace_points(tensor_trace_points)
self._write_cache_index_map_section(tensor_trace_order)
self._write_reason_section()
self._write_graph_section(tensor_trace_order.graph_order)
def _write_trace_points(self, tensor_trace_points):
"""Writes the list of checkpoints."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN,
_SECTION_NAME_TENSOR_TRACER_CHECKPOINT))
for (tensor, checkpoint_name) in tensor_trace_points:
self._write_report('%s %s\n'%(tensor.name, checkpoint_name))
self._write_report('%s %s\n'%(_MARKER_SECTION_END,
_SECTION_NAME_TENSOR_TRACER_CHECKPOINT))
def _write_report(self, content):
"""Writes the given content to the report."""
line = '%s %s'%(_TRACER_LOG_PREFIX, content)
if self._report_file:
self._report_file.write(line)
else:
logging.info(line)
def _write_config_section(self, tt_config, tt_parameters):
"""Writes the config section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_CONFIG))
self._write_report('%s %s\n'%(_FIELD_NAME_VERSION, tt_config.version))
self._write_report('%s %s\n'%(_FIELD_NAME_DEVICE, tt_config.device_type))
self._write_report('%s %s\n'%(_FIELD_NAME_TRACE_MODE,
tt_parameters.trace_mode))
self._write_report('%s %s\n'%(_FIELD_NAME_SUBMODE,
tt_parameters.submode))
if tt_parameters.included_cores:
self._write_report('%s %s\n'%(_FIELD_NAME_NUM_REPLICAS,
len(tt_parameters.included_cores)))
else:
self._write_report('%s %s\n'%(_FIELD_NAME_NUM_REPLICAS,
tt_config.num_replicas))
self._write_report('%s %s\n'%(_FIELD_NAME_NUM_REPLICAS_PER_HOST,
tt_config.num_replicas_per_host))
self._write_report('%s %s\n'%(_FIELD_NAME_NUM_HOSTS, tt_config.num_hosts))
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_CONFIG))
def _write_reason_section(self):
"""Writes the reason section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_REASON))
for key in sorted(self.instrument_records):
self._write_report('"%s" %s\n'%(key, self.instrument_records[key]))
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_REASON))
def _write_op_list_section(self, graph_order):
"""Writes the Op-list section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_OP_LIST))
self._write_report('%s %d\n'%(_FIELD_NAME_NUM_OPS,
len(graph_order.operations)))
for i in range(0, len(graph_order.operations)):
op = graph_order.operations[i]
line = '%d "%s" %s'%(i, op.name, op.type)
for out_tensor in op.outputs:
if out_tensor.name not in graph_order.tensor_to_idx:
raise ValueError(
'out_tensor %s is not in tensor_to_idx'%out_tensor.name)
line += ' %d'%graph_order.tensor_to_idx[out_tensor.name]
line += '\n'
self._write_report(line)
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_OP_LIST))
def _write_tensor_list_section(self, graph_order):
"""Writes the tensor-list section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN,
_SECTION_NAME_TENSOR_LIST))
self._write_report('%s %d\n'%(_FIELD_NAME_NUM_TENSORS,
len(graph_order.tensors)))
for i in range(0, len(graph_order.tensors)):
tensor = graph_order.tensors[i]
line = '%d "%s"'%(i, tensor.name)
consumers = tensor.consumers()
consumers.sort(key=lambda op: op.name)
for consumer_op in consumers:
if consumer_op.name not in graph_order.op_to_idx:
raise ValueError(
'consumer_op %s is not in op_to_idx'%consumer_op.name)
line += ' %d'%graph_order.op_to_idx[consumer_op.name]
line += '\n'
self._write_report(line)
self._write_report('%s %s\n'%(_MARKER_SECTION_END,
_SECTION_NAME_TENSOR_LIST))
def _write_cache_index_map_section(self, tensor_trace_order):
"""Writes the mapping from cache index to tensor index to the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN,
_SECTION_NAME_CACHE_INDEX_MAP))
self._write_report('%s %d\n'%(
_FIELD_NAME_NUM_CACHE_INDICES,
len(tensor_trace_order.cache_idx_to_tensor_idx)))
for cache_idx in range(0, len(tensor_trace_order.cache_idx_to_tensor_idx)):
tensor_idx = tensor_trace_order.cache_idx_to_tensor_idx[cache_idx]
line = '%d %d\n'%(cache_idx, tensor_idx)
self._write_report(line)
self._write_report('%s %s\n'%(_MARKER_SECTION_END,
_SECTION_NAME_CACHE_INDEX_MAP))
def _write_graph_section(self, graph_order):
"""Writes the graph section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_GRAPH))
self._write_report('%s %s\n'%(_FIELD_NAME_TOPOLOGICAL_SORT_SUCCEED,
not graph_order.contains_cycle))
l = list(graph_order.topological_order_or_cycle)
for i in range(0, len(l)):
self._write_report('%d "%s"\n'%(i, l[i].name))
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_GRAPH))
|
tensorflow-master
|
tensorflow/python/tpu/tensor_tracer_report.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""Optional helper for gradient handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu.ops import tpu_ops
def get_gradients_through_compute_gradients(optimizer, loss, activations):
"""Compute gradients to send to TPU embedding.
Args:
optimizer: a subclass of optimizer.Optimizer, usually CrossShardOptimizer.
Used to call compute_gradients().
loss: a Tensor to call optimizer.compute_gradients() on.
activations: an OrderedDict mapping feature_name to Tensors of activations.
Returns:
An OrderedDict mapping from feature name Strings to Tensors of gradients of
the loss wrt the activations of the features.
"""
activation_list = activations.values()
grads_and_vars = optimizer.compute_gradients(loss, activation_list)
grads = [grad for grad, _ in grads_and_vars]
feature_to_gradient_dict = collections.OrderedDict(
zip(activations.keys(), grads))
return feature_to_gradient_dict
def create_dummy_table_variables(tpu_embedding):
"""Create dummy embedding table variables.
The sole purpose of these dummy variables are to trigger gradient
calcuation wrt them so that the gradients wrt activation can be captured
and later sent to TPU embedding.
Args:
tpu_embedding: TPUEmbedding, dummy table variables will be created for use
with tpu_embedding.
Returns:
A tuple of dummy variables and their initializer.
Raises:
RuntimeError: if collection to store gradients already exists and is not
empty.
"""
dummy_table_variables = collections.OrderedDict()
for table_id, table in enumerate(tpu_embedding.table_to_features_dict):
dummy_table_variables[table] = (
# Explicitly specifying collections prevents this variable from
# being added to the GLOBAL_VARIABLES collection, so that Saver()
# ignores it.
# But Tensorflow optimizer creates slot variable for these dummy
# variable, e.g. tpu_embedding_dummy_table_variable_mlp_user/Adam{_1},
# which will be in GLOBAL_VARIABLES collection,
variable_scope.get_variable(
'tpu_embedding_dummy_table_variable_{}'.format(table),
dtype=dtypes.float32,
shape=[1],
use_resource=True,
trainable=True,
collections=['tpu_embedding_dummy_table_variables']))
g = ops.get_default_graph()
table_gradients = g.get_collection_ref(
'tpu_embedding_gradients_table_{}'.format(table_id))
if table_gradients:
raise RuntimeError(
'tpu_embedding_gradients_table_{} is not empty.'.format(table_id))
table_gradients.extend(
[None] * len(tpu_embedding.table_to_features_dict[table]))
return (dummy_table_variables,
variables.variables_initializer(
dummy_table_variables.values(),
name='tpu_embedding_dummy_table_variables_init'))
def hook_dummy_table_variables_to_activations(tpu_embedding, activations,
dummy_table_variables):
"""Have activations depend on dummy table variables for gradient intercept.
Args:
tpu_embedding: TPUEmbedding, activations and dummy_table_variables are from
tpu_embedding.
activations: An OrderedDict of feature name String to activation tensors.
dummy_table_variables: An OrderedDict of table name String to dummy table
variables.
Returns:
An OrderedDict of feature name String to activation tensors, which can be
used just as the activations input.
"""
new_activations = collections.OrderedDict()
for feature in activations:
table = tpu_embedding.feature_to_config_dict[feature].table_id
new_activations[feature] = tpu_ops.tpu_embedding_activations(
dummy_table_variables[table],
activations[feature],
table_id=list(tpu_embedding.table_to_config_dict).index(table),
lookup_id=tpu_embedding.table_to_features_dict[table].index(feature))
return new_activations
def get_gradients_through_dummy_table_variables(tpu_embedding):
"""Get gradients wrt the activations of each feature.
Args:
tpu_embedding: TPUEmbedding, create dummy table variable to be used with
tpu_embedding.
Returns:
An OrderedDict mapping feature name to gradient.
Raises:
ValueError: if some gradients are not defined.
"""
g = ops.get_default_graph()
feature_to_gradient_dict = collections.OrderedDict()
for table_id, table in enumerate(tpu_embedding.table_to_config_dict):
table_gradients = g.get_collection(
'tpu_embedding_gradients_table_{}'.format(table_id))
if all(gradient is None for gradient in table_gradients):
raise ValueError(
'Table {} with id {} has undefined gradients: this is probably '
'because the model asked TPUEmbedding to compute activations that '
'were not used.'.format(table, table_id))
if any(gradient is None for gradient in table_gradients):
# TODO(bfontain): create a white-list for optimizers which are compatible
# with `tf.stop_gradient`.
logging.warn(
'Table {} with id {} has undefined gradients: this is probably '
'because the model asked TPUEmbedding to compute activations that '
'were not used, or tf.stop_gradient() is applied. Gradients of zeros '
'are sent back to TPUEmbedding instead. Gradients of zeros and no '
'gradients are equivalent for SGD, AdaGrad, FTRL, momentum, etc, but '
'might differ for other optimizers due to implementation of tpu '
'embedding optimziers.'
.format(table, table_id))
for feature, gradient in zip(tpu_embedding.table_to_features_dict[table],
table_gradients):
if gradient is not None:
feature_to_gradient_dict[feature] = gradient
else:
dimension = tpu_embedding.table_to_config_dict[table].dimension
batch_size = tpu_embedding.batch_size_per_core
max_sequence_length = (
tpu_embedding.feature_to_config_dict[feature].max_sequence_length)
if max_sequence_length:
feature_to_gradient_dict[feature] = array_ops.zeros(
[batch_size, max_sequence_length, dimension])
else:
feature_to_gradient_dict[feature] = array_ops.zeros(
[batch_size, dimension])
return feature_to_gradient_dict
|
tensorflow-master
|
tensorflow/python/tpu/tpu_embedding_gradient.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import *
# pylint: enable=wildcard-import,unused-import
|
tensorflow-master
|
tensorflow/python/tpu/_tpu_estimator_embedding.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import,redefined-builtin
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import *
# used by tests
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _clone_export_output_with_tensors
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _create_global_step
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _export_output_to_tensors
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _get_scaffold
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _Inputs
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _ITERATIONS_PER_LOOP_VAR
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _TPU_ENQUEUE_OPS
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _TPU_ESTIMATOR
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _TPU_TRAIN_OP
# pylint: enable=wildcard-import,unused-import,redefined-builtin
|
tensorflow-master
|
tensorflow/python/tpu/tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""Tests for python.tpu.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.feature_column import feature_column_lib as fc_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.tpu import feature_column_v2 as tpu_fc
def _initialized_session():
sess = session.Session()
sess.run(variables_lib.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
return sess
class EmbeddingColumnTestV2(test.TestCase):
def test_defaults(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column_v2(
categorical_column, dimension=embedding_dimension)
# Can't test default initializer as it's a random function.
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('mean', embedding_column.combiner)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
def test_all_constructor_args(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column_v2(
categorical_column,
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer')
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('my_initializer', embedding_column.initializer())
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
@test_util.deprecated_graph_mode_only
def test_feature_layer_cpu(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 1), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 2))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
expected_lookups_sequence = (
# example 0, ids [2], embedding = [[7, 11], [0, 0]]
((7., 11.), (0., 0.),),
# example 1, ids [0, 1], embedding = [[1, 2], [3. 5]]
((1., 2.), (3., 5.),),
# example 2, ids [], embedding = [0, 0]
((0., 0.), (0., 0.),),
# example 3, ids [1], embedding = [3, 5]
((3., 5.), (0., 0.),),
)
# Build columns.
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
sequence_categorical_column = (
fc_lib.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size))
embedding_column = tpu_fc.embedding_column_v2(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
sequence_embedding_column = tpu_fc.embedding_column_v2(
sequence_categorical_column,
dimension=embedding_dimension,
initializer=_initializer,
max_sequence_length=2)
# Provide sparse input and get dense result.
features = {'aaa': sparse_input, 'bbb': sparse_input}
dense_features = fc_lib.DenseFeatures([embedding_column])
sequence_features = fc_lib.SequenceFeatures([sequence_embedding_column])
embedding_lookup = dense_features(features)
sequence_embedding_lookup = sequence_features(features)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('dense_features/aaa_embedding/embedding_weights:0',
'sequence_features/bbb_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval())
self.assertAllEqual(expected_lookups_sequence,
sequence_embedding_lookup[0].eval())
class SharedEmbeddingColumnTestV2(test.TestCase):
@test_util.deprecated_graph_mode_only
def test_defaults(self):
vocabulary_size = 3
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_dimension = 2
embedding_column_b, embedding_column_a = tpu_fc.shared_embedding_columns_v2(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual('mean', embedding_column_a.combiner)
self.assertEqual('mean', embedding_column_b.combiner)
self.assertIsNotNone(embedding_column_a.get_initializer())
self.assertIsNotNone(embedding_column_b.get_initializer())
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_a.get_embedding_var_name())
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_b.get_embedding_var_name())
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape)
@test_util.deprecated_graph_mode_only
def test_all_constructor_args(self):
vocabulary_size = 3
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_dimension = 2
embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
shared_embedding_collection_name='var_scope_name')
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual('my_combiner', embedding_column_a.combiner)
self.assertEqual('my_combiner', embedding_column_b.combiner)
self.assertEqual('my_initializer', embedding_column_a.get_initializer()())
self.assertEqual('my_initializer', embedding_column_b.get_initializer()())
self.assertEqual('var_scope_name',
embedding_column_a.get_embedding_var_name())
self.assertEqual('var_scope_name',
embedding_column_b.get_embedding_var_name())
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape)
@test_util.deprecated_graph_mode_only
def test_feature_layer_cpu(self):
# Inputs.
vocabulary_size = 3
input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(3, 2))
input_features = {'aaa': input_a, 'bbb': input_b}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups_a = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
)
expected_lookups_b = (
# example 0:
((7., 11.), (0., 0.),), # ids [2], embedding = [[7, 11], [0, 0]]
# example 1:
((1., 2.), (3., 5.),), # ids [0, 1], embedding = [[1, 2], [3, 5]]
# example 2:
((0., 0.), (0., 0.),), # ids [], embedding = [[0, 0], [0, 0]]
)
# Build columns.
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer,
max_sequence_lengths=[0, 2])
# Provide sparse input and get dense result.
dense_features = fc_lib.DenseFeatures([embedding_column_a])
sequence_features = fc_lib.SequenceFeatures([embedding_column_b])
embedding_lookup_a = dense_features(input_features)
embedding_lookup_b = sequence_features(input_features)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('aaa_bbb_shared_embedding:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var.eval())
self.assertAllEqual(expected_lookups_a, embedding_lookup_a.eval())
self.assertAllEqual(expected_lookups_b,
embedding_lookup_b[0].eval())
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/tpu/feature_column_v2_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""Tests for python.tpu.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.feature_column import feature_column_lib as fc_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.tpu import feature_column as tpu_fc
def _initialized_session():
sess = session.Session()
sess.run(variables_lib.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
return sess
class EmbeddingColumnTest(test.TestCase):
def test_defaults(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column(
categorical_column, dimension=embedding_dimension)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('mean', embedding_column.combiner)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual('aaa_embedding', embedding_column._var_scope_name)
self.assertEqual((embedding_dimension,), embedding_column._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
def test_all_constructor_args(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer')
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual('aaa_embedding', embedding_column._var_scope_name)
self.assertEqual((embedding_dimension,), embedding_column._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
@test_util.deprecated_graph_mode_only
def test_get_dense_tensor(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = tpu_fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column._get_dense_tensor(
fc._LazyBuilder({
'aaa': sparse_input
}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval())
class SharedEmbeddingColumnTest(test.TestCase):
@test_util.deprecated_graph_mode_only
def test_defaults(self):
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = tpu_fc.shared_embedding_columns(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual(embedding_dimension, embedding_column_a.dimension)
self.assertEqual(embedding_dimension, embedding_column_b.dimension)
self.assertEqual('mean', embedding_column_a.combiner)
self.assertEqual('mean', embedding_column_b.combiner)
self.assertIsNotNone(embedding_column_a.initializer)
self.assertIsNotNone(embedding_column_b.initializer)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_a.shared_embedding_collection_name)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_b.shared_embedding_collection_name)
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_a._var_scope_name)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_b._var_scope_name)
self.assertEqual((embedding_dimension,), embedding_column_a._variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a._parse_example_spec)
self.assertEqual({
'bbb': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_b._parse_example_spec)
@test_util.deprecated_graph_mode_only
def test_all_constructor_args(self):
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
shared_embedding_collection_name='var_scope_name')
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual(embedding_dimension, embedding_column_a.dimension)
self.assertEqual(embedding_dimension, embedding_column_b.dimension)
self.assertEqual('my_combiner', embedding_column_a.combiner)
self.assertEqual('my_combiner', embedding_column_b.combiner)
self.assertEqual('my_initializer', embedding_column_a.initializer())
self.assertEqual('my_initializer', embedding_column_b.initializer())
self.assertEqual('var_scope_name',
embedding_column_a.shared_embedding_collection_name)
self.assertEqual('var_scope_name',
embedding_column_b.shared_embedding_collection_name)
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual('var_scope_name', embedding_column_a._var_scope_name)
self.assertEqual('var_scope_name', embedding_column_b._var_scope_name)
self.assertEqual((embedding_dimension,), embedding_column_a._variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a._parse_example_spec)
self.assertEqual({
'bbb': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_b._parse_example_spec)
@test_util.deprecated_graph_mode_only
def test_get_dense_tensor(self):
# Inputs.
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array([
[2, -1, -1], # example 0, ids [2]
[0, 1, -1]
]) # example 1, ids [0, 1]
input_b = np.array([
[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]
]) # example 1, ids []
input_features = {'aaa': input_a, 'bbb': input_b}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups_a = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
)
expected_lookups_b = (
# example 0:
(1., 2.), # ids [0], embedding = [1, 2]
# example 1:
(0., 0.), # ids [], embedding = [0, 0]
)
# Build columns.
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup_a = embedding_column_a._get_dense_tensor(
fc._LazyBuilder(input_features))
embedding_lookup_b = embedding_column_b._get_dense_tensor(
fc._LazyBuilder(input_features))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var.eval())
self.assertAllEqual(expected_lookups_a, embedding_lookup_a.eval())
self.assertAllEqual(expected_lookups_b, embedding_lookup_b.eval())
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/tpu/feature_column_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental TPU library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.tpu import tpu_strategy_util
# pylint: enable=unused-import
|
tensorflow-master
|
tensorflow/python/tpu/experimental/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Operations for TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.ops import gen_tpu_ops
from tensorflow.python.ops.gen_tpu_ops import *
# pylint: enable=wildcard-import,unused-import
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tpu_function
from tensorflow.python.util.tf_export import tf_export
def _create_default_group_assignment():
num_shards = tpu_function.get_tpu_context().number_of_shards
if num_shards is None:
logging.warning(
"cross_replica_sum should be used within a tpu_shard_context, but "
"got unset number_of_shards. Assuming 1.")
num_shards = 1
group_assignment = [list(range(num_shards))]
return group_assignment
def all_to_all(x,
concat_dimension,
split_dimension,
split_count,
group_assignment=None,
name=None):
"""Exchange data across TPU replicas.
Args:
x: The local tensor.
concat_dimension: The dimension number to concatenate.
split_dimension: The dimension number to split.
split_count: The number of splits, this number must equal to the sub-group
size(group_assignment.get_shape()[1])
group_assignment: Optional 2d int32 lists with shape [num_groups,
num_replicas_per_group]. `group_assignment[i]` represents the replica
ids in the ith subgroup.
name: Optional op name.
Returns:
A `Tensor` which is concatenated by data from different replicas.
"""
if group_assignment is None:
group_assignment = _create_default_group_assignment()
return gen_tpu_ops.all_to_all(
x,
group_assignment,
concat_dimension=concat_dimension,
split_dimension=split_dimension,
split_count=split_count,
name=name)
@ops.RegisterGradient("AllToAll")
def _all_to_all_grad(op, grad):
# The gradient of a all-to-all is also a all-to-all but the
# split_dimension and concat_dimension is swapped.
# The graident with respect to group_assignment is None.
return [
gen_tpu_ops.all_to_all(
grad,
op.inputs[1],
concat_dimension=op.get_attr("split_dimension"),
split_dimension=op.get_attr("concat_dimension"),
split_count=op.get_attr("split_count")), None
]
@tf_export(v1=["tpu.cross_replica_sum"])
def cross_replica_sum(x, group_assignment=None, name=None):
"""Sum the input tensor across replicas according to group_assignment.
Args:
x: The local tensor to the sum.
group_assignment: Optional 2d int32 lists with shape [num_groups,
num_replicas_per_group]. `group_assignment[i]` represents the replica
ids in the ith subgroup.
name: Optional op name.
Returns:
A `Tensor` which is summed across replicas.
"""
if group_assignment is None:
group_assignment = _create_default_group_assignment()
return gen_tpu_ops.cross_replica_sum(x, group_assignment, name=name)
def collective_permute(x, source_target_pairs, name=None):
"""Permute the input tensor across replicas given source_target_pairs.
For each source_target_pair <a, b>, we send replica a's input to replica b.
Each replica id must only appear once in the source column. Also it must
only appear once in the target column.
For the replica id not in the target column, this op returns a zero tensor
with the same shape and dtype of the input x.
For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing
source_target_pairs=`[[0,1],[1,2],[2,3]]` gets the outputs:
`[0, A, B, C]`.
Args:
x: The local tensor to be permuted.
source_target_pairs: 2d int lists with shape [num_pairs, 2].
source_target_pairs[i][0] represents the source replica id and
source_target_pairs[i][1] represents the target replica id.
name: Optional op name.
Returns:
A `Tensor` which is permuted.
"""
return gen_tpu_ops.collective_permute(x, source_target_pairs, name=name)
@ops.RegisterGradient("CollectivePermute")
def _collective_permute_grad(op, grad):
# The gradient of a collective permute operation is also a collective
# permute, but with source/target pairs reversed. The gradient with respect
# to input argument `source_target_pairs` is `None`.
source_target_pairs = op.inputs[1][:, ::-1]
return [gen_tpu_ops.collective_permute(grad, source_target_pairs), None]
@ops.RegisterGradient("CrossReplicaSum")
def _cross_replica_sum_grad(op, grad):
# The gradient of a cross replica sum is also a cross-replica sum.
# The gradient with respect to group_assignment is None.
return [gen_tpu_ops.cross_replica_sum(grad, op.inputs[1]), None]
# This extra type checking exists to give a more helpful error message in
# the common case that uint8 and int64 values are infed. Remove when both
# types are supported.
_SUPPORTED_INFEED_DTYPES = set([
dtypes.bool, dtypes.int32, dtypes.int64, dtypes.bfloat16, dtypes.float32,
dtypes.complex64, dtypes.uint32
])
@ops.RegisterGradient("TPUEmbeddingActivations")
def _embedding_activations_grad(activations_op, grad_wrt_activations):
"""Saves the gradient of embedding activations ops in a graph collection."""
g = ops.get_default_graph()
table_id = activations_op.get_attr("table_id")
lookup_id = activations_op.get_attr("lookup_id")
table_gradients = g.get_collection_ref(
"tpu_embedding_gradients_table_%d" % table_id)
if not table_gradients:
raise RuntimeError(
"Gradients for TPUEmbedding have been generated in non-training mode."
"This is not expected. Consider putting your Optimizer.minimize code "
"behind the training mode condition check. For Estimator, you can "
"do \n\n"
" if mode == tf.estimator.ModeKeys.TRAIN:\n"
" train_op = opt.minimize(loss)\n"
"\n")
table_gradients[lookup_id] = array_ops.identity(grad_wrt_activations)
return [
# RegisterGradient requires that value be returned for all inputs. Since
# the first argument (tpu_gradient_variable_{table_name}) has shape [1],
# we will return zeros(shape=[1]). The actual gradient w.r.t. the
# embedding activations (grad_wrt_activations) has the same shape as the
# activations returned by embedding_activations.
array_ops.zeros(arg.shape, dtype=dtypes.float32)
for arg in activations_op.inputs
]
def infeed_dequeue(dtype, shape, name=None):
"""A placeholder op for a value that will be fed into the computation.
Args:
dtype: A `tf.DType`. The type of elements in the tensor.
shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
A tensor that will be provided using the infeed mechanism.
Raises:
TypeError: If 'dtype` is not a supported infeed type.
"""
if dtype not in _SUPPORTED_INFEED_DTYPES:
raise TypeError(
"{} is not a supported TPU infeed type. Supported types are: "
"{}".format(dtype, list(_SUPPORTED_INFEED_DTYPES)))
return gen_tpu_ops.infeed_dequeue(dtype, shape, name=name)
# pylint: disable=redefined-outer-name
def infeed_dequeue_tuple(dtypes, shapes, name=None):
"""A placeholder op for values fed into the TPU simultaneously as a tuple.
Args:
dtypes: A list of `tf.DType`s that has length `>= 1`.
The element types of each element in `outputs`.
shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`).
The shapes of each tensor in `outputs`.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `dtypes`.
A list of tensors that will be provided using the infeed mechanism.
Raises:
TypeError: If a type in 'dtypes` is not a supported infeed type.
"""
for dtype in dtypes:
if dtype not in _SUPPORTED_INFEED_DTYPES:
raise TypeError(
"{} is not a supported TPU infeed type. Supported types are: "
"{}".format(dtype, list(_SUPPORTED_INFEED_DTYPES)))
return gen_tpu_ops.infeed_dequeue_tuple(dtypes, shapes, name=name)
# pylint: enable=redefined-outer-name
# pylint: disable=protected-access
def send_tpu_embedding_gradients(inputs,
config,
learning_rates=None,
name=None):
"""A placeholder op for feeding per-sample gradients to the embedding layer.
Args:
inputs: A TensorList of gradients with which to update embedding tables.
This argument has the same length and shapes as the return value of
RecvTPUEmbeddingActivations, but contains gradients of the model's
loss with respect to the embedding activations. The embedding tables
are updated from these gradients via the optimizers specified in the
TPU embedding configuration given to tpu.initialize_system.
config: Serialized TPUEmbeddingConfiguration proto.
learning_rates: A TensorList of float32 scalars, one for each dynamic
learning rate tag: see the comments in
//third_party/tensorflow/core/protobuf/tpu/
optimization_parameters.proto.
Multiple tables can share the same dynamic learning rate tag as
specified in the configuration. If the learning rates for all tables
are constant, this list should be empty.
name: A name for the operation (optional).
Returns:
A SendTPUEmbeddingGradients operation.
"""
if learning_rates is None:
learning_rates = []
return gen_tpu_ops.send_tpu_embedding_gradients(
inputs=inputs, learning_rates=learning_rates, config=config, name=name)
send_tpu_embedding_gradients.__doc__ = (
gen_tpu_ops.send_tpu_embedding_gradients.__doc__)
# pylint: disable=protected-access
def enqueue_tpu_embedding_integer_batch(batch,
device_ordinal,
mode_override=None,
name=None):
"""A placeholder op for enqueueing embedding IDs to the TPU.
Args:
batch: A list of 1D tensors, one for each embedding table, containing the
indices into the tables.
device_ordinal: The TPU device to use. Should be >= 0 and less than the
number of TPU cores in the task on which the node is placed.
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
name: A name for the operation (optional).
Returns:
An EnqueueTPUEmbeddingIntegerBatch operation.
"""
if mode_override is None:
mode_override = "unspecified"
return gen_tpu_ops.enqueue_tpu_embedding_integer_batch(
batch=batch,
device_ordinal=device_ordinal,
mode_override=mode_override,
name=name)
enqueue_tpu_embedding_integer_batch.__doc__ = (
gen_tpu_ops.enqueue_tpu_embedding_integer_batch.__doc__)
# pylint: disable=protected-access
def enqueue_tpu_embedding_sparse_batch(sample_indices,
embedding_indices,
aggregation_weights,
device_ordinal,
combiners=None,
mode_override=None,
name=None):
"""A placeholder op for enqueueing embedding IDs to the TPU.
Args:
sample_indices: A list of rank 1 Tensors specifying the training example
and feature to which the corresponding embedding_indices and
aggregation_weights values belong. sample_indices[i] must equal b * nf +
f, where nf is the number of features from the corresponding table, f is
in [0, nf), and b is in [0, batch size). Both int32 and int64 are allowed,
and will be converted to int32 internally.
embedding_indices: A list of rank 1 Tensors, indices into the embedding
tables. Both int32 and int64 are allowed and will be converted to int32
internally.
aggregation_weights: A list of rank 1 Tensors containing per sample --
i.e. per (training example, feature) -- aggregation weights. Both float32
and float64 are allowed and will be converted to float32 internally.
device_ordinal: The TPU device to use. Should be >= 0 and less than the
number of TPU cores in the task on which the node is placed.
combiners: A list of string scalars, one for each embedding table that
specify how to normalize the embedding activations after weighted
summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is
invalid to have the sum of the weights be 0 for 'mean' or the sum of the
squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default
is to use 'sum' for all tables (optional).
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
name: A name for the operation (optional).
Returns:
An EnqueueTPUEmbeddingSparseBatch operation.
"""
if mode_override is None:
mode_override = "unspecified"
return gen_tpu_ops.enqueue_tpu_embedding_sparse_batch(
sample_indices=sample_indices,
embedding_indices=embedding_indices,
aggregation_weights=aggregation_weights,
device_ordinal=device_ordinal,
combiners=combiners,
mode_override=mode_override,
name=name)
enqueue_tpu_embedding_sparse_batch.__doc__ = (
gen_tpu_ops.enqueue_tpu_embedding_sparse_batch.__doc__)
# pylint: disable=protected-access
def enqueue_tpu_embedding_sparse_tensor_batch(sample_indices,
embedding_indices,
aggregation_weights,
table_ids,
device_ordinal,
max_sequence_lengths=None,
combiners=None,
mode_override=None,
name=None):
"""A placeholder op for enqueueing embedding IDs to the TPU.
Args:
sample_indices: A list of rank 2 Tensors specifying the training example
to which the corresponding embedding_indices and aggregation_weights
values belong. It corresponds to sp_ids.indices in
embedding_lookup_sparse(). If the size of its first dimension is 0, we
assume each embedding_indices belongs to a different sample. Both int32
and int64 are allowed and will be converted to int32 internally.
embedding_indices: A list of rank 1 Tensors, indices into the embedding
tables. It corresponds to sp_ids.values in embedding_lookup_sparse(). Both
int32 and int64 are allowed and will be converted to int32 internally.
aggregation_weights: A list of rank 1 Tensors containing per training
example aggregation weights. It corresponds to sp_weights.values in
embedding_lookup_sparse(). If the size of its first dimension is 0, we
assume all weights are 1. Both float32 and float64 are allowed and will
be converted to float32 internally.
table_ids: A list of integers specifying the identifier of the embedding
table (offset of TableDescriptor in the TPUEmbeddingConfiguration) to
lookup the corresponding input. The ith input is looked up using
table_ids[i]. The size of the table_ids list must be equal to that of
sample_indices, embedding_indices and aggregation_weights.
device_ordinal: The TPU device to use. Should be >= 0 and less than the
number of TPU cores in the task on which the node is placed.
max_sequence_lengths: A list of integers, the size of which is equal to
sample_indices. If equal to 0, the corresponding feature is considered to
be a non-sequence feature, If greater than 0, the corresponding feature is
a sequence feature with the given maximal length. If None, then we assume
a list of all zeroes.
combiners: A list of string scalars, one for each embedding table that
specify how to normalize the embedding activations after weighted
summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is
invalid to have the sum of the weights be 0 for 'mean' or the sum of the
squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default
is to use 'sum' for all tables (optional).
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
name: A name for the operation (optional).
Returns:
An EnqueueTPUEmbeddingSparseTensorBatch operation.
"""
if mode_override is None:
mode_override = "unspecified"
return gen_tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch(
sample_indices=sample_indices,
embedding_indices=embedding_indices,
aggregation_weights=aggregation_weights,
table_ids=table_ids,
device_ordinal=device_ordinal,
max_sequence_lengths=max_sequence_lengths,
combiners=combiners,
mode_override=mode_override,
name=name)
enqueue_tpu_embedding_sparse_tensor_batch.__doc__ = (
gen_tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch.__doc__)
|
tensorflow-master
|
tensorflow/python/tpu/ops/tpu_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Operations to select TPU core to run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/python/tpu/ops/tpu_ordinal_selector_op.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
#
# Do not use pylint on generated code.
# pylint: disable=missing-docstring,g-short-docstring-punctuation,g-no-space-after-docstring-summary,invalid-name,line-too-long,unused-argument,g-doc-args
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import grpc
from tensorflow.core.profiler import profiler_analysis_pb2 as third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
class ProfileAnalysisStub(object):
"""//////////////////////////////////////////////////////////////////////////////
ProfileAnalysis service provide entry point for profiling TPU and for
serving profiled data to Tensorboard through GRPC
//////////////////////////////////////////////////////////////////////////////
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.NewSession = channel.unary_unary(
'/tensorflow.ProfileAnalysis/NewSession',
request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.NewProfileSessionRequest.SerializeToString,
response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.NewProfileSessionResponse.FromString,
)
self.EnumSessions = channel.unary_unary(
'/tensorflow.ProfileAnalysis/EnumSessions',
request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.EnumProfileSessionsAndToolsRequest.SerializeToString,
response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.EnumProfileSessionsAndToolsResponse.FromString,
)
self.GetSessionToolData = channel.unary_unary(
'/tensorflow.ProfileAnalysis/GetSessionToolData',
request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.ProfileSessionDataRequest.SerializeToString,
response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.ProfileSessionDataResponse.FromString,
)
class ProfileAnalysisServicer(object):
"""//////////////////////////////////////////////////////////////////////////////
ProfileAnalysis service provide entry point for profiling TPU and for
serving profiled data to Tensorboard through GRPC
//////////////////////////////////////////////////////////////////////////////
"""
def NewSession(self, request, context):
"""Starts a profiling session, blocks until it completes.
TPUProfileAnalysis service delegate this to TPUProfiler service.
Populate the profiled data in repository, then return status to caller.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def EnumSessions(self, request, context):
"""Enumerate existing sessions and return available profile tools."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSessionToolData(self, request, context):
"""Retrieve specific tool's data for specific session."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ProfileAnalysisServicer_to_server(servicer, server):
rpc_method_handlers = {
'NewSession':
grpc.unary_unary_rpc_method_handler(
servicer.NewSession,
request_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.NewProfileSessionRequest.FromString,
response_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.NewProfileSessionResponse.SerializeToString,
),
'EnumSessions':
grpc.unary_unary_rpc_method_handler(
servicer.EnumSessions,
request_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.EnumProfileSessionsAndToolsRequest.FromString,
response_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.EnumProfileSessionsAndToolsResponse.SerializeToString,
),
'GetSessionToolData':
grpc.unary_unary_rpc_method_handler(
servicer.GetSessionToolData,
request_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.ProfileSessionDataRequest.FromString,
response_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.ProfileSessionDataResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'tensorflow.ProfileAnalysis', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
tensorflow-master
|
tensorflow/python/tpu/profiler/profiler_analysis_pb2_grpc.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Classes for TPU trace events."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow.core.protobuf.trace_events_pb2 import *
from tensorflow.core.profiler.profiler_analysis_pb2 import *
# pylint: enable=wildcard-import,unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['Trace', 'Resource', 'Device', 'TraceEvent']
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/python/tpu/profiler/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`tf.data.Dataset` API for input pipelines.
See [Importing Data](https://tensorflow.org/guide/datasets) for an overview.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.data import experimental
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.data.ops.dataset_ops import make_initializable_iterator
from tensorflow.python.data.ops.dataset_ops import make_one_shot_iterator
from tensorflow.python.data.ops.iterator_ops import Iterator
from tensorflow.python.data.ops.readers import FixedLengthRecordDataset
from tensorflow.python.data.ops.readers import TextLineDataset
from tensorflow.python.data.ops.readers import TFRecordDataset
# pylint: enable=unused-import
|
tensorflow-master
|
tensorflow/python/data/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for building input pipelines.
This module contains experimental `Dataset` sources and transformations that can
be used in conjunction with the `tf.data.Dataset` API. Note that the
`tf.data.experimental` API is not subject to the same backwards compatibility
guarantees as `tf.data`, but we will provide deprecation advice in advance of
removing existing functionality.
See [Importing Data](https://tensorflow.org/guide/datasets) for an overview.
@@Counter
@@CheckpointInputPipelineHook
@@CsvDataset
@@DatasetStructure
@@DistributeOptions
@@MapVectorizationOptions
@@NestedStructure
@@OptimizationOptions
@@Optional
@@OptionalStructure
@@RaggedTensorStructure
@@RandomDataset
@@Reducer
@@SparseTensorStructure
@@SqlDataset
@@StatsAggregator
@@StatsOptions
@@Structure
@@TFRecordWriter
@@TensorArrayStructure
@@TensorStructure
@@ThreadingOptions
@@bucket_by_sequence_length
@@bytes_produced_stats
@@cardinality
@@choose_from_datasets
@@copy_to_device
@@dense_to_sparse_batch
@@enumerate_dataset
@@from_variant
@@get_next_as_optional
@@get_single_element
@@get_structure
@@group_by_reducer
@@group_by_window
@@ignore_errors
@@latency_stats
@@make_batched_features_dataset
@@make_csv_dataset
@@make_saveable_from_iterator
@@map_and_batch
@@map_and_batch_with_legacy_function
@@parallel_interleave
@@parse_example_dataset
@@prefetch_to_device
@@rejection_resample
@@sample_from_datasets
@@scan
@@shuffle_and_repeat
@@take_while
@@to_variant
@@unbatch
@@unique
@@AUTOTUNE
@@INFINITE_CARDINALITY
@@UNKNOWN_CARDINALITY
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.data.experimental.ops.batching import dense_to_sparse_batch
from tensorflow.python.data.experimental.ops.batching import map_and_batch
from tensorflow.python.data.experimental.ops.batching import map_and_batch_with_legacy_function
from tensorflow.python.data.experimental.ops.batching import unbatch
from tensorflow.python.data.experimental.ops.cardinality import cardinality
from tensorflow.python.data.experimental.ops.cardinality import INFINITE as INFINITE_CARDINALITY
from tensorflow.python.data.experimental.ops.cardinality import UNKNOWN as UNKNOWN_CARDINALITY
from tensorflow.python.data.experimental.ops.counter import Counter
from tensorflow.python.data.experimental.ops.distribute_options import DistributeOptions
from tensorflow.python.data.experimental.ops.enumerate_ops import enumerate_dataset
from tensorflow.python.data.experimental.ops.error_ops import ignore_errors
from tensorflow.python.data.experimental.ops.get_single_element import get_single_element
from tensorflow.python.data.experimental.ops.grouping import bucket_by_sequence_length
from tensorflow.python.data.experimental.ops.grouping import group_by_reducer
from tensorflow.python.data.experimental.ops.grouping import group_by_window
from tensorflow.python.data.experimental.ops.grouping import Reducer
from tensorflow.python.data.experimental.ops.interleave_ops import choose_from_datasets
from tensorflow.python.data.experimental.ops.interleave_ops import parallel_interleave
from tensorflow.python.data.experimental.ops.interleave_ops import sample_from_datasets
from tensorflow.python.data.experimental.ops.iterator_ops import CheckpointInputPipelineHook
from tensorflow.python.data.experimental.ops.iterator_ops import make_saveable_from_iterator
from tensorflow.python.data.experimental.ops.optimization_options import MapVectorizationOptions
from tensorflow.python.data.experimental.ops.optimization_options import OptimizationOptions
from tensorflow.python.data.experimental.ops.parsing_ops import parse_example_dataset
from tensorflow.python.data.experimental.ops.prefetching_ops import copy_to_device
from tensorflow.python.data.experimental.ops.prefetching_ops import prefetch_to_device
from tensorflow.python.data.experimental.ops.random_ops import RandomDataset
from tensorflow.python.data.experimental.ops.readers import CsvDataset
from tensorflow.python.data.experimental.ops.readers import make_batched_features_dataset
from tensorflow.python.data.experimental.ops.readers import make_csv_dataset
from tensorflow.python.data.experimental.ops.readers import SqlDataset
from tensorflow.python.data.experimental.ops.resampling import rejection_resample
from tensorflow.python.data.experimental.ops.scan_ops import scan
from tensorflow.python.data.experimental.ops.shuffle_ops import shuffle_and_repeat
from tensorflow.python.data.experimental.ops.stats_aggregator import StatsAggregator
from tensorflow.python.data.experimental.ops.stats_ops import bytes_produced_stats
from tensorflow.python.data.experimental.ops.stats_ops import latency_stats
from tensorflow.python.data.experimental.ops.stats_options import StatsOptions
from tensorflow.python.data.experimental.ops.take_while_ops import take_while
from tensorflow.python.data.experimental.ops.threading_options import ThreadingOptions
from tensorflow.python.data.experimental.ops.unique import unique
from tensorflow.python.data.experimental.ops.writers import TFRecordWriter
from tensorflow.python.data.ops.dataset_ops import AUTOTUNE
from tensorflow.python.data.ops.dataset_ops import DatasetStructure
from tensorflow.python.data.ops.dataset_ops import from_variant
from tensorflow.python.data.ops.dataset_ops import get_structure
from tensorflow.python.data.ops.dataset_ops import to_variant
from tensorflow.python.data.ops.iterator_ops import get_next_as_optional
from tensorflow.python.data.ops.optional_ops import Optional
from tensorflow.python.data.ops.optional_ops import OptionalStructure
from tensorflow.python.data.util.structure import NestedStructure
from tensorflow.python.data.util.structure import RaggedTensorStructure
from tensorflow.python.data.util.structure import SparseTensorStructure
from tensorflow.python.data.util.structure import Structure
from tensorflow.python.data.util.structure import TensorArrayStructure
from tensorflow.python.data.util.structure import TensorStructure
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/python/data/experimental/__init__.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.take_while()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import take_while_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class TakeWhileTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.parameters((14, 2), (15, 2), (100, 3))
def testTakeWhileDataset(self, num_elements, window_size):
def _predicate_func(elem):
return array_ops.shape(elem)[0] > (window_size - 1)
take_while = take_while_ops.take_while(_predicate_func)
dataset = dataset_ops.Dataset.range(num_elements).batch(window_size)
dataset = dataset.apply(take_while).flat_map(
dataset_ops.Dataset.from_tensor_slices)
expected_num_elements = int(num_elements / window_size) * window_size
self.assertDatasetProduces(dataset, np.arange(expected_num_elements))
@parameterized.parameters((10, 2, False), (16, 7, False), (100, 99, False),
(100, 101, True), (0, 1, True))
def testTakeWhileDatasetRange(self, num_elements, upper_bound, out_of_bounds):
dataset = dataset_ops.Dataset.range(num_elements).apply(
take_while_ops.take_while(lambda x: x < upper_bound))
if out_of_bounds:
with self.assertRaises(errors.OutOfRangeError):
self.assertDatasetProduces(dataset, np.arange(upper_bound))
else:
self.assertDatasetProduces(dataset, np.arange(upper_bound))
def testTakeWhileDatasetString(self):
def not_equal(string):
return lambda x: math_ops.not_equal(x, constant_op.constant(string))
string = ["this", "is", "the", "test", "for", "strings"]
dataset = dataset_ops.Dataset.from_tensor_slices(string).apply(
take_while_ops.take_while(not_equal("test")))
next_element = self.getNext(dataset)
self.assertEqual(b"this", self.evaluate(next_element()))
self.assertEqual(b"is", self.evaluate(next_element()))
self.assertEqual(b"the", self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.assertEqual(b"test", self.evaluate(next_element()))
@parameterized.parameters((5, 3), (10, 0), (100, 5), (8, 7))
def testTakewhileDatasetShortCircuit(self, size, index):
def _predicate_func(data_elem):
return data_elem
boolean_array = [True] * size
boolean_array[index] = False
dataset = dataset_ops.Dataset.from_tensor_slices(boolean_array).apply(
take_while_ops.take_while(_predicate_func))
next_element = self.getNext(dataset)
for _ in range(index):
self.assertTrue(self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testTakeWhileDatasetWithRepeat(self):
dataset = dataset_ops.Dataset.range(10).apply(
take_while_ops.take_while(lambda x: x < 2)).repeat(5)
self.assertDatasetProduces(dataset, np.tile([0, 1], 5))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/take_while_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.shuffle_and_repeat()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import shuffle_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ShuffleAndRepeatTest(test_base.DatasetTestBase):
def _build_ds(self, seed, count=5, num_elements=20):
return dataset_ops.Dataset.range(num_elements).apply(
shuffle_ops.shuffle_and_repeat(buffer_size=5, count=count, seed=seed))
def _gen_outputs(self, ds_fn, num_outputs, verify_exhausted=True):
get_next = self.getNext(ds_fn())
outputs = []
for _ in range(num_outputs):
outputs.append(self.evaluate(get_next()))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
return outputs
def testCorrectOutput(self):
output = self._gen_outputs(lambda: self._build_ds(10), 100)
self.assertSequenceEqual(
sorted(output), sorted(
np.array([range(20) for _ in range(5)]).flatten()))
for i in range(5):
self.assertSequenceEqual(sorted(output[i * 20:(i + 1) * 20]), range(20))
def testReshuffling(self):
# Check that the output orders of different epochs are indeed different.
output = self._gen_outputs(lambda: self._build_ds(10), 100)
for i in range(4):
epoch1 = output[i * 20:(i + 1) * 20]
epoch2 = output[(i + 1) * 20:(i + 2) * 20]
self.assertNotEqual(epoch1, epoch2)
def testSameOrderForSameSeeds(self):
output1 = self._gen_outputs(lambda: self._build_ds(10), 100)
output2 = self._gen_outputs(lambda: self._build_ds(10), 100)
self.assertEqual(output1, output2)
def testDifferentOrderForDifferentSeeds(self):
output1 = self._gen_outputs(lambda: self._build_ds(10), 100)
output2 = self._gen_outputs(lambda: self._build_ds(20), 100)
self.assertNotEqual(output1, output2)
self.assertEqual(sorted(output1), sorted(output2))
def testCountNone(self):
output1 = self._gen_outputs(
lambda: self._build_ds(10, count=None), 100, verify_exhausted=False)
output2 = self._gen_outputs(
lambda: self._build_ds(20, count=None), 100, verify_exhausted=False)
self.assertNotEqual(output1, output2)
self.assertEqual(sorted(output1), sorted(output2))
def testCountMinusOne(self):
output1 = self._gen_outputs(
lambda: self._build_ds(10, count=-1), 100, verify_exhausted=False)
output2 = self._gen_outputs(
lambda: self._build_ds(20, count=-1), 100, verify_exhausted=False)
self.assertNotEqual(output1, output2)
self.assertEqual(sorted(output1), sorted(output2))
def testInfiniteOutputs(self):
# Asserting the iterator is exhausted after producing 100 items should fail.
with self.assertRaises(AssertionError):
self._gen_outputs(lambda: self._build_ds(10, count=None), 100)
with self.assertRaises(AssertionError):
self._gen_outputs(lambda: self._build_ds(10, count=-1), 100)
def testInfiniteEmpty(self):
with self.assertRaises(errors.OutOfRangeError):
self._gen_outputs(lambda: self._build_ds(10, count=None, num_elements=0),
100)
with self.assertRaises(errors.OutOfRangeError):
self._gen_outputs(lambda: self._build_ds(10, count=-1, num_elements=0),
100)
def testLargeBufferSize(self):
ds = dataset_ops.Dataset.range(20).apply(
shuffle_ops.shuffle_and_repeat(buffer_size=21))
get_next = self.getNext(ds)
self.evaluate(get_next())
def testVeryLargeBufferSize(self):
num_epochs = 1000 * 1000
# Each element being shuffled and repeated has shape (100,). This will OOM
# or timeout if we actually load everything into the buffer.
ds = dataset_ops.Dataset.range(500).batch(100).apply(
shuffle_ops.shuffle_and_repeat(
buffer_size=5 * num_epochs, count=num_epochs))
# Verify two epochs worth of output.
output = self._gen_outputs(lambda: ds, 2 * 5, verify_exhausted=False)
for i in range(2):
sorted_epoch = sorted(
output[i * 5:(i + 1) * 5], key=lambda batch: batch[0])
self.assertAllEqual(sorted_epoch, np.arange(500).reshape([5, 100]))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/shuffle_and_repeat_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.group_by_window()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
# NOTE(mrry): These tests are based on the tests in bucket_ops_test.py.
# Currently, they use a constant batch size, though should be made to use a
# different batch size per key.
@test_util.run_all_in_graph_and_eager_modes
class GroupByWindowTest(test_base.DatasetTestBase):
def _dynamicPad(self, bucket, window, window_size):
# TODO(mrry): To match `tf.contrib.training.bucket()`, implement a
# generic form of padded_batch that pads every component
# dynamically and does not rely on static shape information about
# the arguments.
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(bucket),
window.padded_batch(
32, (tensor_shape.TensorShape([]), tensor_shape.TensorShape(
[None]), tensor_shape.TensorShape([3])))))
def testSingleBucket(self):
def _map_fn(v):
return (v, array_ops.fill([v], v),
array_ops.fill([3], string_ops.as_string(v)))
input_dataset = dataset_ops.Dataset.from_tensor_slices(
math_ops.range(32)).map(_map_fn)
bucketed_dataset = input_dataset.apply(
grouping.group_by_window(
lambda x, y, z: 0,
lambda k, bucket: self._dynamicPad(k, bucket, 32), 32))
get_next = self.getNext(bucketed_dataset)
which_bucket, bucketed_values = self.evaluate(get_next())
self.assertEqual(0, which_bucket)
expected_scalar_int = np.arange(32, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31)).astype(np.int64)
for i in range(32):
expected_unk_int64[i, :i] = i
expected_vec3_str = np.vstack(3 * [np.arange(32).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values[0])
self.assertAllEqual(expected_unk_int64, bucketed_values[1])
self.assertAllEqual(expected_vec3_str, bucketed_values[2])
def testEvenOddBuckets(self):
def _map_fn(v):
return (v, array_ops.fill([v], v),
array_ops.fill([3], string_ops.as_string(v)))
input_dataset = dataset_ops.Dataset.from_tensor_slices(
math_ops.range(64)).map(_map_fn)
bucketed_dataset = input_dataset.apply(
grouping.group_by_window(
lambda x, y, z: math_ops.cast(x % 2, dtypes.int64),
lambda k, bucket: self._dynamicPad(k, bucket, 32), 32))
get_next = self.getNext(bucketed_dataset)
# Get two minibatches (one containing even values, one containing odds)
which_bucket_even, bucketed_values_even = self.evaluate(get_next())
which_bucket_odd, bucketed_values_odd = self.evaluate(get_next())
# Count number of bucket_tensors.
self.assertEqual(3, len(bucketed_values_even))
self.assertEqual(3, len(bucketed_values_odd))
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, which_bucket_even)
self.assertAllEqual(1, which_bucket_odd)
# Test the first bucket outputted, the events starting at 0
expected_scalar_int = np.arange(0, 32 * 2, 2, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31 * 2)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i] = 2 * i
expected_vec3_str = np.vstack(
3 * [np.arange(0, 32 * 2, 2).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values_even[0])
self.assertAllEqual(expected_unk_int64, bucketed_values_even[1])
self.assertAllEqual(expected_vec3_str, bucketed_values_even[2])
# Test the second bucket outputted, the odds starting at 1
expected_scalar_int = np.arange(1, 32 * 2 + 1, 2, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31 * 2 + 1)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i + 1] = 2 * i + 1
expected_vec3_str = np.vstack(
3 * [np.arange(1, 32 * 2 + 1, 2).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values_odd[0])
self.assertAllEqual(expected_unk_int64, bucketed_values_odd[1])
self.assertAllEqual(expected_vec3_str, bucketed_values_odd[2])
def testEvenOddBucketsFilterOutAllOdd(self):
def _map_fn(v):
return {
"x": v,
"y": array_ops.fill([v], v),
"z": array_ops.fill([3], string_ops.as_string(v))
}
def _dynamic_pad_fn(bucket, window, _):
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(bucket),
window.padded_batch(
32, {
"x": tensor_shape.TensorShape([]),
"y": tensor_shape.TensorShape([None]),
"z": tensor_shape.TensorShape([3])
})))
input_dataset = dataset_ops.Dataset.from_tensor_slices(math_ops.range(
128)).map(_map_fn).filter(lambda d: math_ops.equal(d["x"] % 2, 0))
bucketed_dataset = input_dataset.apply(
grouping.group_by_window(
lambda d: math_ops.cast(d["x"] % 2, dtypes.int64),
lambda k, bucket: _dynamic_pad_fn(k, bucket, 32), 32))
get_next = self.getNext(bucketed_dataset)
# Get two minibatches ([0, 2, ...] and [64, 66, ...])
which_bucket0, bucketed_values_even0 = self.evaluate(get_next())
which_bucket1, bucketed_values_even1 = self.evaluate(get_next())
# Ensure that bucket 1 was completely filtered out
self.assertAllEqual(0, which_bucket0)
self.assertAllEqual(0, which_bucket1)
self.assertAllEqual(
np.arange(0, 64, 2, dtype=np.int64), bucketed_values_even0["x"])
self.assertAllEqual(
np.arange(64, 128, 2, dtype=np.int64), bucketed_values_even1["x"])
def testDynamicWindowSize(self):
components = np.arange(100).astype(np.int64)
# Key fn: even/odd
# Reduce fn: batches of 5
# Window size fn: even=5, odd=10
def window_size_func(key):
window_sizes = constant_op.constant([5, 10], dtype=dtypes.int64)
return window_sizes[key]
dataset = dataset_ops.Dataset.from_tensor_slices(components).apply(
grouping.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(20),
None, window_size_func))
get_next = self.getNext(dataset)
with self.assertRaises(errors.OutOfRangeError):
batches = 0
while True:
result = self.evaluate(get_next())
is_even = all(x % 2 == 0 for x in result)
is_odd = all(x % 2 == 1 for x in result)
self.assertTrue(is_even or is_odd)
expected_batch_size = 5 if is_even else 10
self.assertEqual(expected_batch_size, result.shape[0])
batches += 1
self.assertEqual(batches, 15)
def testSimple(self):
components = np.random.randint(100, size=(200,)).astype(np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(
components).map(lambda x: x * x).apply(
grouping.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(4),
4))
get_next = self.getNext(dataset)
counts = []
with self.assertRaises(errors.OutOfRangeError):
while True:
result = self.evaluate(get_next())
self.assertTrue(
all(x % 2 == 0 for x in result) or all(x % 2 == 1) for x in result)
counts.append(result.shape[0])
self.assertEqual(len(components), sum(counts))
num_full_batches = len([c for c in counts if c == 4])
self.assertGreaterEqual(num_full_batches, 24)
self.assertTrue(all(c == 4 for c in counts[:num_full_batches]))
def testImmediateOutput(self):
components = np.array(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0], dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat(
-1).apply(
grouping.group_by_window(lambda x: x % 3, lambda _, xs: xs.batch(4),
4))
get_next = self.getNext(dataset)
# The input is infinite, so this test demonstrates that:
# 1. We produce output without having to consume the entire input,
# 2. Different buckets can produce output at different rates, and
# 3. For deterministic input, the output is deterministic.
for _ in range(3):
self.assertAllEqual([0, 0, 0, 0], self.evaluate(get_next()))
self.assertAllEqual([1, 1, 1, 1], self.evaluate(get_next()))
self.assertAllEqual([2, 2, 2, 2], self.evaluate(get_next()))
self.assertAllEqual([0, 0, 0, 0], self.evaluate(get_next()))
def testSmallGroups(self):
components = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).apply(
grouping.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(4), 4))
get_next = self.getNext(dataset)
self.assertAllEqual([0, 0, 0, 0], self.evaluate(get_next()))
self.assertAllEqual([1, 1, 1, 1], self.evaluate(get_next()))
# The small outputs at the end are deterministically produced in key
# order.
self.assertAllEqual([0, 0, 0], self.evaluate(get_next()))
self.assertAllEqual([1], self.evaluate(get_next()))
def testEmpty(self):
dataset = dataset_ops.Dataset.range(4).apply(
grouping.group_by_window(lambda _: 0, lambda _, xs: xs, 0))
get_next = self.getNext(dataset)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Window size must be greater than zero, but got 0."):
print(self.evaluate(get_next()))
def testReduceFuncError(self):
components = np.random.randint(100, size=(200,)).astype(np.int64)
def reduce_func(_, xs):
# Introduce an incorrect padded shape that cannot (currently) be
# detected at graph construction time.
return xs.padded_batch(
4,
padded_shapes=(tensor_shape.TensorShape([]),
constant_op.constant([5], dtype=dtypes.int64) * -1))
dataset = dataset_ops.Dataset.from_tensor_slices(
components).map(lambda x: (x, ops.convert_to_tensor([x * x]))).apply(
grouping.group_by_window(lambda x, _: x % 2, reduce_func, 32))
get_next = self.getNext(dataset)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
def testConsumeWindowDatasetMoreThanOnce(self):
components = np.random.randint(50, size=(200,)).astype(np.int64)
def reduce_func(key, window):
# Apply two different kinds of padding to the input: tight
# padding, and quantized (to a multiple of 10) padding.
return dataset_ops.Dataset.zip((
window.padded_batch(
4, padded_shapes=tensor_shape.TensorShape([None])),
window.padded_batch(
4, padded_shapes=ops.convert_to_tensor([(key + 1) * 10])),
))
dataset = dataset_ops.Dataset.from_tensor_slices(
components
).map(lambda x: array_ops.fill([math_ops.cast(x, dtypes.int32)], x)).apply(
grouping.group_by_window(
lambda x: math_ops.cast(array_ops.shape(x)[0] // 10, dtypes.int64),
reduce_func, 4))
get_next = self.getNext(dataset)
counts = []
with self.assertRaises(errors.OutOfRangeError):
while True:
tight_result, multiple_of_10_result = self.evaluate(get_next())
self.assertEqual(0, multiple_of_10_result.shape[1] % 10)
self.assertAllEqual(tight_result,
multiple_of_10_result[:, :tight_result.shape[1]])
counts.append(tight_result.shape[0])
self.assertEqual(len(components), sum(counts))
def testShortCircuit(self):
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
grouping.group_by_window(lambda x: x, lambda _, window: window.batch(1),
1))
self.assertDatasetProduces(
dataset, expected_output=[[i] for i in range(10)])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/group_by_window_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.sleep()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.data.experimental.ops import sleep
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class SleepTest(test_base.DatasetTestBase):
def testSleep(self):
self.skipTest("b/123597912")
sleep_microseconds = 100
dataset = dataset_ops.Dataset.range(10).apply(
sleep.sleep(sleep_microseconds))
next_element = self.getNext(dataset)
start_time = time.time()
for i in range(10):
self.assertEqual(i, self.evaluate(next_element()))
end_time = time.time()
self.assertGreater(end_time - start_time, (10 * sleep_microseconds) / 1e6)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/sleep_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_RebatchDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.experimental.ops import sleep
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _flat_shapes(dataset):
return nest.flatten(dataset_ops.get_legacy_output_shapes(dataset))
@parameterized.named_parameters(("WithDropRemainder", True),
("WithoutDropRemainder", False))
@test_util.run_all_in_graph_and_eager_modes
class RebatchDatasetTest(test_base.DatasetTestBase):
def testBasic(self, drop_remainder):
dataset = dataset_ops.Dataset.range(1024).batch(
32, drop_remainder=drop_remainder)
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
self.assertEqual(
[[32 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(dataset)])
self.assertEqual(
[[8 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(rebatched_dataset)])
expected_output = [[k for k in range(i, i + 8)] for i in range(0, 1024, 8)] # pylint: disable=g-complex-comprehension
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testScalarInputError(self, _):
dataset = dataset_ops.Dataset.range(1024)
with self.assertRaisesRegexp(ValueError, "at least one dimension"):
distribute._RebatchDataset(dataset, num_workers=4)
def testNotDivisible(self, drop_remainder):
dataset = dataset_ops.Dataset.range(1024).batch(
32, drop_remainder=drop_remainder)
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=5)
expected_output = [[k for k in range(i, i + 7)] for i in range(0, 1022, 7)] # pylint: disable=g-complex-comprehension
if not drop_remainder:
expected_output.append([1022, 1023])
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testTupleOutput(self, drop_remainder):
dataset = (
dataset_ops.Dataset.range(1024).map(lambda x: (x, x)).batch(
32, drop_remainder=drop_remainder))
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
expected_output = [([k for k in range(i, i + 8)], # pylint: disable=g-complex-comprehension
[k for k in range(i, i + 8)])
for i in range(0, 1024, 8)]
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testNestedDictionaryOutput(self, drop_remainder):
dataset = dataset_ops.Dataset.range(1024).map(
lambda x: {"a": x, "b": {"c": x}}).batch(
32, drop_remainder=drop_remainder)
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
expected_output = [{"a": [k for k in range(i, i + 8)], # pylint: disable=g-complex-comprehension
"b": {"c": [k for k in range(i, i + 8)]}}
for i in range(0, 1024, 8)]
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testFinalPartialBatchOriginal(self, drop_remainder):
dataset = dataset_ops.Dataset.range(1032).batch(
32, drop_remainder=drop_remainder)
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
self.assertEqual(
[[32 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(dataset)])
self.assertEqual(
[[8 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(rebatched_dataset)])
expected_output = [[k for k in range(i, i + 8)] for i in range(0, 1032, 8)] # pylint: disable=g-complex-comprehension
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testFinalPartialBatchAfterRebatch(self, drop_remainder):
dataset = dataset_ops.Dataset.range(34).batch(
32, drop_remainder=drop_remainder)
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
self.assertEqual(
[[32 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(dataset)])
self.assertEqual(
[[8 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(rebatched_dataset)])
expected_output = [[k for k in range(i, i + 8)] for i in range(0, 32, 8)] # pylint: disable=g-complex-comprehension
if not drop_remainder:
expected_output += [[32, 33]]
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testMultipleBatches(self, drop_remainder):
dataset = dataset_ops.Dataset.range(128).batch(
4, drop_remainder=drop_remainder)
dataset = dataset.batch(8, drop_remainder=drop_remainder)
self.assertEqual(
[[8, 4]] if drop_remainder else [[None, None]],
[ts.as_list() for ts in _flat_shapes(dataset)])
# Each element is a list of 8 elements where each element is a list of 4.
expected_output = [[[j, j + 1, j + 2, j + 3] # pylint: disable=g-complex-comprehension
for j in range(i, i + 32, 4)] # generates 8 elements
for i in range(0, 128, 32)]
self.assertDatasetProduces(dataset, expected_output)
rebatched_dataset = distribute._RebatchDataset(dataset, 4)
self.assertEqual(
[[2, 4]] if drop_remainder else [[None, None]],
[ts.as_list() for ts in _flat_shapes(rebatched_dataset)])
# Each element is a list of 2 elements where each element is a list of 4.
expected_output = [[[j, j + 1, j + 2, j + 3] # pylint: disable=g-complex-comprehension
for j in range(i, i + 8, 4)] # generates 2 elements
for i in range(0, 128, 8)]
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testMapAndBatch(self, drop_remainder):
dataset = dataset_ops.Dataset.range(1024).apply(
batching.map_and_batch(
math_ops.square, 32, drop_remainder=drop_remainder))
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
self.assertEqual(
[[32 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(dataset)])
self.assertEqual(
[[8 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(rebatched_dataset)])
expected_output = [[k**2 for k in range(i, i + 8)] # pylint: disable=g-complex-comprehension
for i in range(0, 1024, 8)]
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testMapAndBatchWithCapturedInput(self, drop_remainder):
captured_t = variables.Variable(42)
dataset = dataset_ops.Dataset.range(1024).apply(
batching.map_and_batch(
lambda x: captured_t, 32, drop_remainder=drop_remainder))
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
self.assertEqual([[32 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(dataset)])
self.assertEqual([[8 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(rebatched_dataset)])
expected_output = [[42 for _ in range(i, i + 8)] # pylint: disable=g-complex-comprehension
for i in range(0, 1024, 8)]
self.evaluate(variables.global_variables_initializer())
self.assertDatasetProduces(
rebatched_dataset, expected_output, requires_initialization=True)
def testPaddedBatch(self, drop_remainder):
dataset = dataset_ops.Dataset.range(128).batch(4).padded_batch(
8, padded_shapes=[5], drop_remainder=drop_remainder)
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
self.assertEqual(
[[8, 5]] if drop_remainder else [[None, 5]],
[ts.as_list() for ts in _flat_shapes(dataset)])
# Each element is a list of 8 elements in which each element is a list of 5
# elements, first four are numbers and the last one is a padded zero.
expected_output = [[[j, j + 1, j + 2, j + 3, 0] # pylint: disable=g-complex-comprehension
for j in range(i, i + 32, 4)] # generates 8 elements
for i in range(0, 128, 32)]
self.assertDatasetProduces(dataset, expected_output)
self.assertEqual(
[[2, 5]] if drop_remainder else [[None, 5]],
[ts.as_list() for ts in _flat_shapes(rebatched_dataset)])
# Each element is a list of 2 elements in which each element is a list of 5
# elements, first four are numbers and the last one is a padded zero.
expected_output = [[[j, j + 1, j + 2, j + 3, 0] # pylint: disable=g-complex-comprehension
for j in range(i, i + 8, 4)] # generates 2 elements
for i in range(0, 128, 8)]
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testConcatenate(self, drop_remainder):
dataset1 = dataset_ops.Dataset.range(64).batch(
8, drop_remainder=drop_remainder)
dataset2 = dataset_ops.Dataset.range(32).batch(
8, drop_remainder=drop_remainder)
dataset = dataset1.concatenate(dataset2)
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
self.assertEqual(
[[8 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(dataset)])
self.assertEqual(
[[2 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(rebatched_dataset)])
expected_output = ([[i, i + 1] for i in range(0, 64, 2)] +
[[i, i + 1] for i in range(0, 32, 2)])
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testConcatenateDifferentShapes(self, drop_remainder):
dataset1 = dataset_ops.Dataset.range(64).batch(
16, drop_remainder=drop_remainder)
dataset2 = dataset_ops.Dataset.range(32).batch(
8, drop_remainder=drop_remainder)
dataset = dataset1.concatenate(dataset2)
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
self.assertEqual(
[[None]], [ts.as_list() for ts in _flat_shapes(dataset)])
self.assertEqual(
[[None]],
[ts.as_list() for ts in _flat_shapes(rebatched_dataset)])
expected_output = ([[i, i + 1, i + 2, i + 3] for i in range(0, 64, 4)] +
[[i, i + 1] for i in range(0, 32, 2)])
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testZip(self, drop_remainder):
dataset1 = dataset_ops.Dataset.range(64).batch(
8, drop_remainder=drop_remainder)
dataset2 = dataset_ops.Dataset.range(32).batch(
8, drop_remainder=drop_remainder)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
self.assertEqual(
[[8], [8]] if drop_remainder else [[None], [None]],
[ts.as_list() for ts in _flat_shapes(dataset)])
self.assertEqual(
[[2], [2]] if drop_remainder else [[None], [None]],
[ts.as_list() for ts in _flat_shapes(rebatched_dataset)])
expected_output = [([i, i + 1], [i, i + 1]) for i in range(0, 32, 2)]
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testZipDifferentShapes(self, drop_remainder):
dataset1 = dataset_ops.Dataset.range(64).batch(
16, drop_remainder=drop_remainder)
dataset2 = dataset_ops.Dataset.range(32).batch(
8, drop_remainder=drop_remainder)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
self.assertEqual(
[[16], [8]] if drop_remainder else [[None], [None]],
[ts.as_list() for ts in _flat_shapes(dataset)])
self.assertEqual(
[[4], [2]] if drop_remainder else [[None], [None]],
[ts.as_list() for ts in _flat_shapes(rebatched_dataset)])
expected_output = [([2 * i, 2 * i + 1, 2 * i + 2, 2 * i + 3], [i, i + 1])
for i in range(0, 32, 2)]
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testUnsupportedTransformError(self, drop_remainder):
dataset = dataset_ops.Dataset.range(1024).batch(
32, drop_remainder=drop_remainder).apply(sleep.sleep(10))
with self.assertRaises(errors.InvalidArgumentError):
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
next_element = self.getNext(rebatched_dataset)
self.evaluate(next_element())
def testFlatMapBatching(self, drop_remainder):
dataset = dataset_ops.Dataset.range(
2).flat_map(lambda _: dataset_ops.Dataset.range(32).batch( # pylint: disable=g-long-lambda
32, drop_remainder=drop_remainder))
self.assertEqual(
[[32 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(dataset)])
# Two elements where each element is range(32)
expected_output = [[k for k in range(32)] for _ in range(2)] # pylint: disable=g-complex-comprehension
self.assertDatasetProduces(dataset, expected_output)
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
self.assertEqual(
[[8 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(rebatched_dataset)])
# Two elements where each element is a list of 4 elements where each element
# is a list of 8.
expected_output = [[k for k in range(i, i + 8)] # pylint: disable=g-complex-comprehension
for _ in range(2)
for i in range(0, 32, 8)] # generates 4 elements
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testInterleaveBatching(self, drop_remainder):
dataset = dataset_ops.Dataset.range(
2).interleave(lambda _: dataset_ops.Dataset.range(32).batch( # pylint: disable=g-long-lambda
32, drop_remainder=drop_remainder), cycle_length=2)
self.assertEqual(
[[32 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(dataset)])
# Two elements where each element is range(32)
expected_output = [[k for k in range(32)] for _ in range(2)] # pylint: disable=g-complex-comprehension
self.assertDatasetProduces(dataset, expected_output)
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
self.assertEqual(
[[8 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(rebatched_dataset)])
# List of 4 elements where each element is a list of 8 numbering from 0 to
# 31 repeated twice.
expected_output = [[k for k in range(i, i + 8)] # pylint: disable=g-complex-comprehension
for i in range(0, 32, 8) # generates 4 elements
for _ in range(2)]
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testParallelInterleaveBatching(self, drop_remainder):
dataset = dataset_ops.Dataset.range(
2).interleave(lambda _: dataset_ops.Dataset.range(32).batch( # pylint: disable=g-long-lambda
32, drop_remainder=drop_remainder), cycle_length=2,
num_parallel_calls=2)
self.assertEqual(
[[32 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(dataset)])
# Two elements where each element is range(32)
expected_output = [[k for k in range(32)] for _ in range(2)] # pylint: disable=g-complex-comprehension
self.assertDatasetProduces(dataset, expected_output)
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=4)
self.assertEqual(
[[8 if drop_remainder else None]],
[ts.as_list() for ts in _flat_shapes(rebatched_dataset)])
# List of 4 elements where each element is a list of 8 numbering from 0 to
# 31 repeated twice in collated fashion i.e [0...8], [0...8] etc.
expected_output = [[k for k in range(i, i + 8)] # pylint: disable=g-complex-comprehension
for i in range(0, 32, 8) # generates 4 elements
for _ in range(2)]
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testGroupByWindowStaticBatch(self, drop_remainder):
dataset = dataset_ops.Dataset.from_tensor_slices(
[[array_ops.constant(i, dtype=dtypes.int64)] * 3 for i in range(40)])
reduce_fn = lambda bucket_id, ds: ds.batch( # pylint: disable=g-long-lambda
batch_size=10, drop_remainder=drop_remainder)
dataset = dataset.apply(
grouping.group_by_window(
key_func=lambda x: x[0] % 4, reduce_func=reduce_fn, window_size=10))
rebatched_dataset = distribute._RebatchDataset(dataset, num_workers=2)
self.assertEqual([[5, 3] if drop_remainder else [None, 3]],
[ts.as_list() for ts in _flat_shapes(rebatched_dataset)])
# pylint: disable=g-complex-comprehension
expected_output = [[[j + i * 4 + k * 20] * 3
for i in range(5)]
for j in range(4)
for k in range(2)]
self.assertDatasetProduces(rebatched_dataset, expected_output)
def testGroupByWindowDynamicBatch(self, drop_remainder):
dataset = dataset_ops.Dataset.range(40).map(lambda x: x % 2)
reduce_fn = lambda bucket_id, ds: ds.batch( # pylint: disable=g-long-lambda
batch_size=(bucket_id + 1) * 5, drop_remainder=drop_remainder)
dataset = dataset.apply(
grouping.group_by_window(
key_func=lambda x: x, reduce_func=reduce_fn, window_size=10))
dataset = distribute._RebatchDataset(dataset, num_workers=2)
self.assertEqual([[None]],
[ts.as_list() for ts in _flat_shapes(dataset)])
pairs = [(3, 0), (3, 0), (3, 0)]
if not drop_remainder:
pairs.extend([(1, 0)])
pairs.extend([(5, 1), (5, 1)])
pairs = pairs * 2
expected_output = [[value] * batch_size for batch_size, value in pairs]
self.assertDatasetProduces(dataset, expected_output)
def testScanAfterBatch(self, drop_remainder):
dataset = dataset_ops.Dataset.range(40).batch(10).apply(
scan_ops.scan(np.int64(2), lambda state, value: (state, value * state)))
dataset = distribute._RebatchDataset(dataset, num_workers=2)
self.assertEqual([[None]],
[ts.as_list() for ts in _flat_shapes(dataset)])
expected_output = [[i * 2 for i in range(j*5, (j+1)*5)] for j in range(8)] # pylint: disable=g-complex-comprehension
self.assertDatasetProduces(dataset, expected_output)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/rebatch_dataset_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for testing `tf.data.experimental.SqlDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sqlite3
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.platform import test
class SqlDatasetTestBase(test_base.DatasetTestBase):
"""Base class for setting up and testing SqlDataset."""
def _createSqlDataset(self,
query,
output_types,
driver_name="sqlite",
num_repeats=1):
dataset = readers.SqlDataset(driver_name, self.data_source_name, query,
output_types).repeat(num_repeats)
return dataset
def setUp(self):
self.data_source_name = os.path.join(test.get_temp_dir(), "tftest.sqlite")
conn = sqlite3.connect(self.data_source_name)
c = conn.cursor()
c.execute("DROP TABLE IF EXISTS students")
c.execute("DROP TABLE IF EXISTS people")
c.execute("DROP TABLE IF EXISTS townspeople")
c.execute(
"CREATE TABLE IF NOT EXISTS students (id INTEGER NOT NULL PRIMARY KEY, "
"first_name VARCHAR(100), last_name VARCHAR(100), motto VARCHAR(100), "
"school_id VARCHAR(100), favorite_nonsense_word VARCHAR(100), "
"desk_number INTEGER, income INTEGER, favorite_number INTEGER, "
"favorite_big_number INTEGER, favorite_negative_number INTEGER, "
"favorite_medium_sized_number INTEGER, brownie_points INTEGER, "
"account_balance INTEGER, registration_complete INTEGER)")
c.executemany(
"INSERT INTO students (first_name, last_name, motto, school_id, "
"favorite_nonsense_word, desk_number, income, favorite_number, "
"favorite_big_number, favorite_negative_number, "
"favorite_medium_sized_number, brownie_points, account_balance, "
"registration_complete) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
[("John", "Doe", "Hi!", "123", "n\0nsense", 9, 0, 2147483647,
9223372036854775807, -2, 32767, 0, 0, 1),
("Jane", "Moe", "Hi again!", "1000", "nonsense\0", 127, -20000,
-2147483648, -9223372036854775808, -128, -32768, 255, 65535, 0)])
c.execute(
"CREATE TABLE IF NOT EXISTS people (id INTEGER NOT NULL PRIMARY KEY, "
"first_name VARCHAR(100), last_name VARCHAR(100), state VARCHAR(100))")
c.executemany(
"INSERT INTO PEOPLE (first_name, last_name, state) VALUES (?, ?, ?)",
[("Benjamin", "Franklin", "Pennsylvania"), ("John", "Doe",
"California")])
c.execute(
"CREATE TABLE IF NOT EXISTS townspeople (id INTEGER NOT NULL PRIMARY "
"KEY, first_name VARCHAR(100), last_name VARCHAR(100), victories "
"FLOAT, accolades FLOAT, triumphs FLOAT)")
c.executemany(
"INSERT INTO townspeople (first_name, last_name, victories, "
"accolades, triumphs) VALUES (?, ?, ?, ?, ?)",
[("George", "Washington", 20.00,
1331241.321342132321324589798264627463827647382647382643874,
9007199254740991.0),
("John", "Adams", -19.95,
1331241321342132321324589798264627463827647382647382643874.0,
9007199254740992.0)])
conn.commit()
conn.close()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/sql_dataset_test_base.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.Counter`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import counter
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class CounterTest(test_base.DatasetTestBase):
def testCounter(self):
"""Test dataset construction using `count`."""
dataset = counter.Counter(start=3, step=4)
self.assertEqual(
[], dataset_ops.get_legacy_output_shapes(dataset).as_list())
self.assertEqual(dtypes.int64, dataset_ops.get_legacy_output_types(dataset))
get_next = self.getNext(dataset)
negative_dataset = counter.Counter(start=0, step=-1)
negative_get_next = self.getNext(negative_dataset)
self.assertEqual(3, self.evaluate(get_next()))
self.assertEqual(3 + 4, self.evaluate(get_next()))
self.assertEqual(3 + 2 * 4, self.evaluate(get_next()))
self.assertEqual(0, self.evaluate(negative_get_next()))
self.assertEqual(-1, self.evaluate(negative_get_next()))
self.assertEqual(-2, self.evaluate(negative_get_next()))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/counter_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for testing reader datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import parsing_ops
from tensorflow.python.util import compat
class FixedLengthRecordDatasetTestBase(test_base.DatasetTestBase):
"""Base class for setting up and testing FixedLengthRecordDataset."""
def setUp(self):
super(FixedLengthRecordDatasetTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
def _record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
for j in range(self._num_records):
f.write(self._record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
class MakeBatchedFeaturesDatasetTestBase(test_base.DatasetTestBase):
"""Base class for setting up and testing `make_batched_features_dataset`."""
def setUp(self):
super(MakeBatchedFeaturesDatasetTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
def make_batch_feature(self,
filenames,
num_epochs,
batch_size,
label_key=None,
reader_num_threads=1,
parser_num_threads=1,
shuffle=False,
shuffle_seed=None,
drop_final_batch=False):
self.filenames = filenames
self.num_epochs = num_epochs
self.batch_size = batch_size
return readers.make_batched_features_dataset(
file_pattern=self.filenames,
batch_size=self.batch_size,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
"keywords": parsing_ops.VarLenFeature(dtypes.string),
"label": parsing_ops.FixedLenFeature([], dtypes.string),
},
label_key=label_key,
reader=core_readers.TFRecordDataset,
num_epochs=self.num_epochs,
shuffle=shuffle,
shuffle_seed=shuffle_seed,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads,
drop_final_batch=drop_final_batch)
def _record(self, f, r, l):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
"file":
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[f])),
"record":
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[r])),
"keywords":
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=self._get_keywords(f, r))),
"label":
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[compat.as_bytes(l)]))
}))
return example.SerializeToString()
def _get_keywords(self, f, r):
num_keywords = 1 + (f + r) % 2
keywords = []
for index in range(num_keywords):
keywords.append(compat.as_bytes("keyword%d" % index))
return keywords
def _sum_keywords(self, num_files):
sum_keywords = 0
for i in range(num_files):
for j in range(self._num_records):
sum_keywords += 1 + (i + j) % 2
return sum_keywords
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j, "fake-label"))
writer.close()
return filenames
def _run_actual_batch(self, outputs, label_key_provided=False):
if label_key_provided:
# outputs would be a tuple of (feature dict, label)
features, label = self.evaluate(outputs())
else:
features = self.evaluate(outputs())
label = features["label"]
file_out = features["file"]
keywords_indices = features["keywords"].indices
keywords_values = features["keywords"].values
keywords_dense_shape = features["keywords"].dense_shape
record = features["record"]
return ([
file_out, keywords_indices, keywords_values, keywords_dense_shape,
record, label
])
def _next_actual_batch(self, label_key_provided=False):
return self._run_actual_batch(self.outputs, label_key_provided)
def _interleave(self, iterators, cycle_length):
pending_iterators = iterators
open_iterators = []
num_open = 0
for i in range(cycle_length):
if pending_iterators:
open_iterators.append(pending_iterators.pop(0))
num_open += 1
while num_open:
for i in range(min(cycle_length, len(open_iterators))):
if open_iterators[i] is None:
continue
try:
yield next(open_iterators[i])
except StopIteration:
if pending_iterators:
open_iterators[i] = pending_iterators.pop(0)
else:
open_iterators[i] = None
num_open -= 1
def _next_expected_batch(self,
file_indices,
batch_size,
num_epochs,
cycle_length=1):
def _next_record(file_indices):
for j in file_indices:
for i in range(self._num_records):
yield j, i, compat.as_bytes("fake-label")
def _next_record_interleaved(file_indices, cycle_length):
return self._interleave([_next_record([i]) for i in file_indices],
cycle_length)
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
label_batch = []
for _ in range(num_epochs):
if cycle_length == 1:
next_records = _next_record(file_indices)
else:
next_records = _next_record_interleaved(file_indices, cycle_length)
for record in next_records:
f = record[0]
r = record[1]
label_batch.append(record[2])
file_batch.append(f)
record_batch.append(r)
keywords = self._get_keywords(f, r)
keywords_batch_values.extend(keywords)
keywords_batch_indices.extend(
[[batch_index, i] for i in range(len(keywords))])
batch_index += 1
keywords_batch_max_len = max(keywords_batch_max_len, len(keywords))
if len(file_batch) == batch_size:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[batch_size, keywords_batch_max_len], record_batch, label_batch
]
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
label_batch = []
if file_batch:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[len(file_batch), keywords_batch_max_len], record_batch, label_batch
]
def verify_records(self,
batch_size,
file_index=None,
num_epochs=1,
label_key_provided=False,
interleave_cycle_length=1):
if file_index is not None:
file_indices = [file_index]
else:
file_indices = range(self._num_files)
for expected_batch in self._next_expected_batch(
file_indices,
batch_size,
num_epochs,
cycle_length=interleave_cycle_length):
actual_batch = self._next_actual_batch(
label_key_provided=label_key_provided)
for i in range(len(expected_batch)):
self.assertAllEqual(expected_batch[i], actual_batch[i])
class TextLineDatasetTestBase(test_base.DatasetTestBase):
"""Base class for setting up and testing TextLineDataset."""
def _lineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _createFiles(self,
num_files,
num_lines,
crlf=False,
compression_type=None):
filenames = []
for i in range(num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
contents = []
for j in range(num_lines):
contents.append(self._lineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it
if j + 1 != num_lines or i == 0:
contents.append(b"\r\n" if crlf else b"\n")
contents = b"".join(contents)
if not compression_type:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
return filenames
class TFRecordDatasetTestBase(test_base.DatasetTestBase):
"""Base class for setting up and testing TFRecordDataset."""
def _interleave(self, iterators, cycle_length):
pending_iterators = iterators
open_iterators = []
num_open = 0
for i in range(cycle_length):
if pending_iterators:
open_iterators.append(pending_iterators.pop(0))
num_open += 1
while num_open:
for i in range(min(cycle_length, len(open_iterators))):
if open_iterators[i] is None:
continue
try:
yield next(open_iterators[i])
except StopIteration:
if pending_iterators:
open_iterators[i] = pending_iterators.pop(0)
else:
open_iterators[i] = None
num_open -= 1
def _next_expected_batch(self, file_indices, batch_size, num_epochs,
cycle_length, drop_final_batch, use_parser_fn):
def _next_record(file_indices):
for j in file_indices:
for i in range(self._num_records):
yield j, i
def _next_record_interleaved(file_indices, cycle_length):
return self._interleave([_next_record([i]) for i in file_indices],
cycle_length)
record_batch = []
batch_index = 0
for _ in range(num_epochs):
if cycle_length == 1:
next_records = _next_record(file_indices)
else:
next_records = _next_record_interleaved(file_indices, cycle_length)
for f, r in next_records:
record = self._record(f, r)
if use_parser_fn:
record = record[1:]
record_batch.append(record)
batch_index += 1
if len(record_batch) == batch_size:
yield record_batch
record_batch = []
batch_index = 0
if record_batch and not drop_final_batch:
yield record_batch
def _verify_records(self, outputs, batch_size, file_index, num_epochs,
interleave_cycle_length, drop_final_batch, use_parser_fn):
if file_index is not None:
if isinstance(file_index, list):
file_indices = file_index
else:
file_indices = [file_index]
else:
file_indices = range(self._num_files)
for expected_batch in self._next_expected_batch(
file_indices, batch_size, num_epochs, interleave_cycle_length,
drop_final_batch, use_parser_fn):
actual_batch = self.evaluate(outputs())
self.assertAllEqual(expected_batch, actual_batch)
def setUp(self):
super(TFRecordDatasetTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
def _record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/reader_dataset_ops_test_base.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `MatchingFilesDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.python.data.experimental.ops import matching_files
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class MatchingFilesDatasetTest(test_base.DatasetTestBase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _touchTempFiles(self, filenames):
for filename in filenames:
open(os.path.join(self.tmp_dir, filename), 'a').close()
def testNonExistingDirectory(self):
"""Test the MatchingFiles dataset with a non-existing directory."""
self.tmp_dir = os.path.join(self.tmp_dir, 'nonexistingdir')
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset, expected_error=(errors.NotFoundError, ''))
def testEmptyDirectory(self):
"""Test the MatchingFiles dataset with an empty directory."""
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset, expected_error=(errors.NotFoundError, ''))
def testSimpleDirectory(self):
"""Test the MatchingFiles dataset with a simple directory."""
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(os.path.join(self.tmp_dir, filename))
for filename in filenames
],
assert_items_equal=True)
def testFileSuffixes(self):
"""Test the MatchingFiles dataset using the suffixes of filename."""
filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']
self._touchTempFiles(filenames)
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*.py'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(os.path.join(self.tmp_dir, filename))
for filename in filenames[1:-1]
],
assert_items_equal=True)
def testFileMiddles(self):
"""Test the MatchingFiles dataset using the middles of filename."""
filenames = ['aa.txt', 'bb.py', 'bbc.pyc', 'cc.pyc']
self._touchTempFiles(filenames)
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, 'b*.py*'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(os.path.join(self.tmp_dir, filename))
for filename in filenames[1:3]
],
assert_items_equal=True)
def testNestedDirectories(self):
"""Test the MatchingFiles dataset with nested directories."""
filenames = []
width = 8
depth = 4
for i in range(width):
for j in range(depth):
new_base = os.path.join(self.tmp_dir, str(i),
*[str(dir_name) for dir_name in range(j)])
os.makedirs(new_base)
child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']
for f in child_files:
filename = os.path.join(new_base, f)
filenames.append(filename)
open(filename, 'w').close()
patterns = [
os.path.join(self.tmp_dir, os.path.join(*['**' for _ in range(depth)]),
suffix) for suffix in ['*.txt', '*.log']
]
dataset = matching_files.MatchingFilesDataset(patterns)
next_element = self.getNext(dataset)
expected_filenames = [
compat.as_bytes(filename)
for filename in filenames
if filename.endswith('.txt') or filename.endswith('.log')
]
actual_filenames = []
while True:
try:
actual_filenames.append(compat.as_bytes(self.evaluate(next_element())))
except errors.OutOfRangeError:
break
self.assertItemsEqual(expected_filenames, actual_filenames)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/matching_files_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.bucket_by_sequence_length()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def _element_length_fn(x, y=None):
del y
return array_ops.shape(x)[0]
def _to_sparse_tensor(record):
return sparse_tensor.SparseTensor(**record)
def _format_record(array, sparse):
if sparse:
return {
"values": array,
"indices": [[i] for i in range(len(array))],
"dense_shape": (len(array),)
}
return array
def _get_record_type(sparse):
if sparse:
return {
"values": dtypes.int64,
"indices": dtypes.int64,
"dense_shape": dtypes.int64
}
return dtypes.int32
def _get_record_shape(sparse):
if sparse:
return {
"values": tensor_shape.TensorShape([None,]),
"indices": tensor_shape.TensorShape([None, 1]),
"dense_shape": tensor_shape.TensorShape([1,])
}
return tensor_shape.TensorShape([None])
@test_util.run_all_in_graph_and_eager_modes
class BucketBySequenceLengthTest(test_base.DatasetTestBase,
parameterized.TestCase):
@parameterized.named_parameters(
("WithoutPadding", True),
("WithPadding", False),
)
def testBucketDropReminder(self, param_no_padding):
boundaries = [10, 20, 30]
batch_sizes = [10, 8, 4, 2]
lengths = [8, 13, 25, 35]
n_bucket_elements = [28, 7, 6, 5]
n_expected_batches = 5
# Expected sequence lengths of the individual batches.
expected_lengths = []
# Expected sum of all batches with an equal sequence length.
# <seq-length>: <expected-total-sum>
expected_sums = {}
# Expected batch sizes of batches depending on the sequence length.
# <seq-length>: [batch1_size, ..., batchN_size]
expected_batch_sizes = {}
for length, batch_size, bucket_elements in zip(lengths, batch_sizes,
n_bucket_elements):
# Calculate the expected sum across all batches of a specific sequence length.
expected_sums[length] = \
(bucket_elements - bucket_elements % batch_size) * length
# Calculate the expected occurrence of individual batch sizes.
expected_batch_sizes[length] = \
[batch_size] * (bucket_elements // batch_size)
# Calculate the expected occurence of individual sequence lengths.
expected_lengths.extend([length] * (bucket_elements // batch_size))
def build_dataset(sparse):
def _generator():
# Produce 1 batch for each bucket
elements = []
for bucket_elements, length in zip(n_bucket_elements, lengths):
# Using only full sequences (opposed to the strategy employed in `testBucket`) makes
# checking the sum a lot easier.
record_len = length
for _ in range(bucket_elements):
elements.append([1] * record_len)
random.shuffle(elements)
for el in elements:
yield (_format_record(el, sparse),)
dataset = dataset_ops.Dataset.from_generator(
_generator, (_get_record_type(sparse),), (_get_record_shape(sparse),))
if sparse:
dataset = dataset.map(lambda x: (_to_sparse_tensor(x),))
return dataset
def _test_bucket_by_padding(no_padding):
dataset = build_dataset(sparse=no_padding)
dataset = dataset.apply(
grouping.bucket_by_sequence_length(
_element_length_fn,
boundaries,
batch_sizes,
no_padding=no_padding,
drop_remainder=True))
get_next = self.getNext(dataset)
batches = []
for _ in range(n_expected_batches):
batch, = self.evaluate(get_next())
batches.append(batch)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
generated_lengths = []
# <seq-length>: <total-sum>
generated_sums = {}
# <seq-length>: [<batch_size>, ...]
generated_batch_sizes = {}
for length, batch_size, bucket_elements in zip(lengths, batch_sizes,
n_bucket_elements):
# Initialize the sum across all batches.
generated_sums[length] = 0
# Initialize the individual batch sizes.
generated_batch_sizes[length] = []
for batch in batches:
shape = batch.dense_shape if no_padding else batch.shape
length = shape[1]
generated_lengths.append(length)
batch_size = shape[0]
generated_batch_sizes[length].append(batch_size)
batch_sum = batch.values.sum() if no_padding else batch.sum()
generated_sums[length] += batch_sum
for l in lengths:
# Make sure the sum of the batch contents is correct for the individual sequence lengths.
self.assertEqual(
generated_sums[l], expected_sums[l], "Tensor sums did not match! "
"expected: {}, generated: {}".format(expected_sums, generated_sums))
# Make sure the individual batch sizes are generated as expected.
self.assertEqual(
sorted(generated_batch_sizes[l]), sorted(expected_batch_sizes[l]),
"Batch-sizes did not match! "
"expected: {}, generated: {}".format(
sorted(expected_batch_sizes[l]),
sorted(generated_batch_sizes[l])))
# Make sure the generated sequence lengths appear as often as expected.
self.assertEqual(
sorted(generated_lengths), sorted(expected_lengths),
"The generated sequence lengths did not match! "
"expected: {}, generated: {}".format(
sorted(expected_lengths), sorted(generated_lengths)))
_test_bucket_by_padding(param_no_padding)
@parameterized.named_parameters(
("WithoutPadding", True),
("WithPadding", False),
)
def testBucket(self, param_no_padding):
boundaries = [10, 20, 30]
batch_sizes = [10, 8, 4, 2]
lengths = [8, 13, 25, 35]
def build_dataset(sparse):
def _generator():
# Produce 1 batch for each bucket
elements = []
for batch_size, length in zip(batch_sizes, lengths):
record_len = length - 1
for _ in range(batch_size):
elements.append([1] * record_len)
record_len = length
random.shuffle(elements)
for el in elements:
yield (_format_record(el, sparse),)
dataset = dataset_ops.Dataset.from_generator(
_generator,
(_get_record_type(sparse),),
(_get_record_shape(sparse),))
if sparse:
dataset = dataset.map(lambda x: (_to_sparse_tensor(x),))
return dataset
def _test_bucket_by_padding(no_padding):
dataset = build_dataset(sparse=no_padding)
dataset = dataset.apply(
grouping.bucket_by_sequence_length(
_element_length_fn,
boundaries,
batch_sizes,
no_padding=no_padding))
get_next = self.getNext(dataset)
batches = []
for _ in range(4):
batch, = self.evaluate(get_next())
batches.append(batch)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
batch_sizes_val = []
lengths_val = []
for batch in batches:
shape = batch.dense_shape if no_padding else batch.shape
batch_size = shape[0]
length = shape[1]
batch_sizes_val.append(batch_size)
lengths_val.append(length)
if not context.executing_eagerly():
sum_check = batch.values.sum() if no_padding else batch.sum()
self.assertEqual(sum_check, batch_size * length - 1)
self.assertEqual(sum(batch_sizes_val), sum(batch_sizes))
self.assertEqual(sorted(batch_sizes), sorted(batch_sizes_val))
self.assertEqual(sorted(lengths), sorted(lengths_val))
_test_bucket_by_padding(param_no_padding)
def testPadToBoundary(self):
boundaries = [10, 20, 30]
batch_sizes = [10, 8, 4, 2]
lengths = [8, 13, 25]
def element_gen():
# Produce 1 batch for each bucket
elements = []
for batch_size, length in zip(batch_sizes[:-1], lengths):
for _ in range(batch_size):
elements.append([1] * length)
random.shuffle(elements)
for el in elements:
yield (el,)
for _ in range(batch_sizes[-1]):
el = [1] * (boundaries[-1] + 5)
yield (el,)
element_len = lambda el: array_ops.shape(el)[0]
dataset = dataset_ops.Dataset.from_generator(
element_gen, (dtypes.int64,), ([None],)).apply(
grouping.bucket_by_sequence_length(
element_len, boundaries, batch_sizes,
pad_to_bucket_boundary=True))
get_next = self.getNext(dataset)
batches = []
for _ in range(3):
batch, = self.evaluate(get_next())
batches.append(batch)
with self.assertRaisesOpError("bucket_boundaries"):
self.evaluate(get_next())
batch_sizes_val = []
lengths_val = []
for batch in batches:
batch_size = batch.shape[0]
length = batch.shape[1]
batch_sizes_val.append(batch_size)
lengths_val.append(length)
batch_sizes = batch_sizes[:-1]
self.assertEqual(sum(batch_sizes_val), sum(batch_sizes))
self.assertEqual(sorted(batch_sizes), sorted(batch_sizes_val))
self.assertEqual([boundary - 1 for boundary in sorted(boundaries)],
sorted(lengths_val))
def testPadToBoundaryNoExtraneousPadding(self):
boundaries = [3, 7, 11]
batch_sizes = [2, 2, 2, 2]
lengths = range(1, 11)
def element_gen():
for length in lengths:
yield ([1] * length,)
element_len = lambda element: array_ops.shape(element)[0]
dataset = dataset_ops.Dataset.from_generator(
element_gen, (dtypes.int64,), ([None],)).apply(
grouping.bucket_by_sequence_length(
element_len, boundaries, batch_sizes,
pad_to_bucket_boundary=True))
get_next = self.getNext(dataset)
batches = []
for _ in range(5):
batch, = self.evaluate(get_next())
batches.append(batch)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(batches[0], [[1, 0],
[1, 1]])
self.assertAllEqual(batches[1], [[1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0]])
self.assertAllEqual(batches[2], [[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1]])
self.assertAllEqual(batches[3], [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0]])
self.assertAllEqual(batches[4], [[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
@parameterized.named_parameters(
("WithoutPadding", True),
("WithPadding", False),
)
def testTupleElements(self, param_no_padding):
def build_dataset(sparse):
def _generator():
text = [[1, 2, 3], [3, 4, 5, 6, 7], [1, 2], [8, 9, 0, 2, 3]]
label = [1, 2, 1, 2]
for x, y in zip(text, label):
yield (_format_record(x, sparse), y)
dataset = dataset_ops.Dataset.from_generator(
generator=_generator,
output_types=(_get_record_type(sparse), dtypes.int32),
output_shapes=(_get_record_shape(sparse),
tensor_shape.TensorShape([])))
if sparse:
dataset = dataset.map(lambda x, y: (_to_sparse_tensor(x), y))
return dataset
def _test_tuple_elements_by_padding(no_padding):
dataset = build_dataset(sparse=no_padding)
dataset = dataset.apply(grouping.bucket_by_sequence_length(
element_length_func=_element_length_fn,
bucket_batch_sizes=[2, 2, 2],
bucket_boundaries=[0, 8],
no_padding=no_padding))
shapes = dataset_ops.get_legacy_output_shapes(dataset)
self.assertEqual([None, None], shapes[0].as_list())
self.assertEqual([None], shapes[1].as_list())
_test_tuple_elements_by_padding(param_no_padding)
@parameterized.named_parameters(
("DoDropRemainder", True),
("DoNotDropRemainder", False),
)
def testBucketSparse(self, param_drop_remainder): # pylint: disable=g-doc-args
"""Tests bucketing of sparse tensors (case where `no_padding` == True).
Test runs on following dataset:
[
[0],
[0, 1],
[0, 1, 2]
...
[0, ..., max_len - 1]
]
Sequences are bucketed by length and batched with
`batch_size` < `bucket_size`.
"""
min_len = 0
max_len = 100
batch_size = 7
bucket_size = 10
def _build_dataset():
input_data = [range(i+1) for i in range(min_len, max_len)]
def generator_fn():
for record in input_data:
yield _format_record(record, sparse=True)
dataset = dataset_ops.Dataset.from_generator(
generator=generator_fn,
output_types=_get_record_type(sparse=True))
dataset = dataset.map(_to_sparse_tensor)
return dataset
def _compute_expected_batches(drop_remainder):
"""Computes expected batch outputs and stores in a set."""
all_expected_sparse_tensors = set()
for bucket_start_len in range(min_len, max_len, bucket_size):
if drop_remainder:
batch_offsets = [0]
else:
batch_offsets = range(0, bucket_size, batch_size)
for batch_offset in batch_offsets:
batch_start_len = bucket_start_len + batch_offset
batch_end_len = min(batch_start_len + batch_size,
bucket_start_len + bucket_size)
expected_indices = []
expected_values = []
for length in range(batch_start_len, batch_end_len):
for val in range(length + 1):
expected_indices.append((length - batch_start_len, val))
expected_values.append(val)
expected_sprs_tensor = (tuple(expected_indices),
tuple(expected_values))
all_expected_sparse_tensors.add(expected_sprs_tensor)
return all_expected_sparse_tensors
def _compute_batches(dataset):
"""Computes actual batch outputs of dataset and stores in a set."""
batch = self.getNext(dataset)
all_sparse_tensors = set()
with self.assertRaises(errors.OutOfRangeError):
while True:
output = self.evaluate(batch())
sprs_tensor = (tuple([tuple(idx) for idx in output.indices]),
tuple(output.values))
all_sparse_tensors.add(sprs_tensor)
return all_sparse_tensors
dataset = _build_dataset()
boundaries = range(min_len + bucket_size + 1, max_len, bucket_size)
dataset = dataset.apply(
grouping.bucket_by_sequence_length(
_element_length_fn,
boundaries, [batch_size] * (len(boundaries) + 1),
no_padding=True,
drop_remainder=param_drop_remainder))
batches = _compute_batches(dataset)
expected_batches = _compute_expected_batches(param_drop_remainder)
self.assertEqual(batches, expected_batches)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.prefetch_to_device()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.experimental.ops import prefetching_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
# TODO(b/117581999): add eager coverage when supported.
class PrefetchToDeviceTest(test_base.DatasetTestBase):
@test_util.deprecated_graph_mode_only
def testPrefetchToDevice(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(device_dataset)))
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(iterator)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testPrefetchToSameDevice(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device(
"/job:localhost/replica:0/task:0/device:CPU:0"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(device_dataset)))
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(iterator)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testPrefetchDictToDevice(self):
host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(device_dataset)))
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(iterator)))
self.assertEqual(dtypes.int64, next_element["a"].dtype)
self.assertEqual([], next_element["a"].shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual({"a": i}, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testPrefetchSparseTensorsToDevice(self):
def make_tensor(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0]], values=(i*[1]), dense_shape=[2, 2])
host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(device_dataset)))
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(iterator)))
self.assertEqual(dtypes.int64, next_element.dtype)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
actual = self.evaluate(next_element)
self.assertAllEqual([i], actual.values)
self.assertAllEqual([[0, 0]], actual.indices)
self.assertAllEqual([2, 2], actual.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testPrefetchToDeviceGpu(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/gpu:0"))
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testPrefetchToDeviceWithReInit(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(device_dataset)))
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(iterator)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testPrefetchToDeviceGpuWithReInit(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/gpu:0"))
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/prefetch_to_device_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.ignore_errors()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.data.experimental.ops import error_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
_NUMPY_RANDOM_SEED = 42
@test_util.run_all_in_graph_and_eager_modes
class IgnoreErrorsTest(test_base.DatasetTestBase):
def testMapIgnoreError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (
dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message")).apply(
error_ops.ignore_errors()))
get_next = self.getNext(dataset)
for x in [1., 2., 3., 5.]:
self.assertEqual(x, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testParallelMapIgnoreError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (
dataset_ops.Dataset.from_tensor_slices(components).map(
lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2).prefetch(2).apply(error_ops.ignore_errors()))
get_next = self.getNext(dataset)
for x in [1., 2., 3., 5.]:
self.assertEqual(x, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testReadFileIgnoreError(self):
def write_string_to_file(value, filename):
with open(filename, "w") as f:
f.write(value)
filenames = [
os.path.join(self.get_temp_dir(), "file_%d.txt" % i) for i in range(5)
]
for filename in filenames:
write_string_to_file(filename, filename)
dataset = (
dataset_ops.Dataset.from_tensor_slices(filenames).map(
io_ops.read_file,
num_parallel_calls=2).prefetch(2).apply(error_ops.ignore_errors()))
get_next = self.getNext(dataset)
# All of the files are present.
for filename in filenames:
self.assertEqual(compat.as_bytes(filename), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Delete one of the files.
os.remove(filenames[0])
# Attempting to read filenames[0] will fail, but ignore_errors()
# will catch the error.
get_next = self.getNext(dataset)
for filename in filenames[1:]:
self.assertEqual(compat.as_bytes(filename), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testTFRecordDatasetIgnoreError(self):
filenames = []
for i in range(5):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(10):
writer.write(b"record")
writer.close()
# Append corrupted data
with open(fn, "a") as f:
f.write("corrupted data")
dataset = readers.TFRecordDataset(filenames).apply(
error_ops.ignore_errors())
get_next = self.getNext(dataset)
# All of the files are present.
for filename in filenames:
for j in range(10):
self.assertEqual(b"record", self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/ignore_errors_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.copy_to_device()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import prefetching_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat as util_compat
# TODO(b/117581999): add eager coverage when supported.
class CopyToDeviceTest(test_base.DatasetTestBase):
@test_util.deprecated_graph_mode_only
def testCopyToDevice(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(device_dataset)))
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(iterator)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceInt32(self):
host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(device_dataset)))
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(iterator)))
self.assertEqual(dtypes.int32, next_element.dtype)
self.assertEqual((4,), next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
self.assertAllEqual([0, 1, 2, 3], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToSameDevice(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:0"))
with ops.device("/cpu:0"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(device_dataset)))
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(iterator)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceWithPrefetch(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(device_dataset)))
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(iterator)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyDictToDevice(self):
host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(device_dataset)))
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(iterator)))
self.assertEqual(dtypes.int64, next_element["a"].dtype)
self.assertEqual([], next_element["a"].shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual({"a": i}, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyDictToDeviceWithPrefetch(self):
host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(device_dataset)))
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(iterator)))
self.assertEqual(dtypes.int64, next_element["a"].dtype)
self.assertEqual([], next_element["a"].shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual({"a": i}, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopySparseTensorsToDevice(self):
def make_tensor(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0]], values=(i * [1]), dense_shape=[2, 2])
host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(device_dataset)))
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(iterator)))
self.assertEqual(dtypes.int64, next_element.dtype)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
actual = self.evaluate(next_element)
self.assertAllEqual([i], actual.values)
self.assertAllEqual([[0, 0]], actual.indices)
self.assertAllEqual([2, 2], actual.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopySparseTensorsToDeviceWithPrefetch(self):
def make_tensor(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0]], values=(i * [1]), dense_shape=[2, 2])
host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(device_dataset)))
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(iterator)))
self.assertEqual(dtypes.int64, next_element.dtype)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
actual = self.evaluate(next_element)
self.assertAllEqual([i], actual.values)
self.assertAllEqual([[0, 0]], actual.indices)
self.assertAllEqual([2, 2], actual.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpu(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuWithPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuWithMap(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
def generator():
for i in range(10):
yield i, float(i), str(i)
host_dataset = dataset_ops.Dataset.from_generator(
generator, output_types=(dtypes.int32, dtypes.float32, dtypes.string))
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
def gpu_map_func(x, y, z):
return math_ops.square(x), math_ops.square(y), z
device_dataset = device_dataset.apply(
prefetching_ops.map_on_gpu(gpu_map_func))
options = dataset_ops.Options()
options.experimental_optimization.autotune = False
device_dataset = device_dataset.with_options(options)
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(10):
x, y, z = self.evaluate(next_element)
self.assertEqual(i**2, x)
self.assertEqual(float(i**2), y)
self.assertEqual(util_compat.as_bytes(str(i)), z)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuInt32(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuInt32AndPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuStrings(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
self.assertAllEqual([b"a", b"b", b"c"], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuStringsAndPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
self.assertAllEqual([b"a", b"b", b"c"], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDevicePingPongCPUGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with compat.forward_compatibility_horizon(2018, 8, 4):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0", source_device="/cpu:0"))
back_to_cpu_dataset = device_dataset.apply(
prefetching_ops.copy_to_device("/cpu:0", source_device="/gpu:0"))
with ops.device("/cpu:0"):
iterator = dataset_ops.make_initializable_iterator(back_to_cpu_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceWithReInit(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(device_dataset)))
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(iterator)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceWithReInitAndPrefetch(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(device_dataset)))
self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with(
dataset_ops.get_structure(iterator)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuWithReInit(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuWithReInitAndPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testIteratorGetNextAsOptionalOnGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(3)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_elem = iterator_ops.get_next_as_optional(iterator)
elem_has_value_t = next_elem.has_value()
elem_value_t = next_elem.get_value()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
# Before initializing the iterator, evaluating the optional fails with
# a FailedPreconditionError.
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(elem_has_value_t)
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(elem_value_t)
# For each element of the dataset, assert that the optional evaluates to
# the expected value.
self.evaluate(iterator.initializer)
for i in range(3):
elem_has_value, elem_value = self.evaluate(
[elem_has_value_t, elem_value_t])
self.assertTrue(elem_has_value)
self.assertEqual(i, elem_value)
# After exhausting the iterator, `next_elem.has_value()` will evaluate to
# false, and attempting to get the value will fail.
for _ in range(2):
self.assertFalse(self.evaluate(elem_has_value_t))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_value_t)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/copy_to_device_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.