python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RaggedTensor operator dispatch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_bitwise_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_dispatch
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
# Constants listing various op types to test. Each operation
# should be included in at least one list below, or tested separately if
# necessary (e.g., because it expects additional arguments).
UNARY_FLOAT_OPS = [
math_ops.abs,
math_ops.acos,
math_ops.acosh,
math_ops.angle,
math_ops.asin,
math_ops.asinh,
math_ops.atan,
math_ops.atanh,
math_ops.ceil,
math_ops.conj,
math_ops.cos,
math_ops.cosh,
math_ops.digamma,
math_ops.erf,
math_ops.erfc,
math_ops.exp,
math_ops.expm1,
math_ops.floor,
math_ops.imag,
math_ops.is_finite,
math_ops.is_inf,
math_ops.is_nan,
math_ops.lgamma,
math_ops.log,
math_ops.log1p,
math_ops.log_sigmoid,
math_ops.negative,
math_ops.real,
math_ops.reciprocal,
math_ops.rint,
math_ops.round,
math_ops.rsqrt,
math_ops.sign,
math_ops.sin,
math_ops.sinh,
math_ops.sqrt,
math_ops.square,
math_ops.tan,
array_ops.identity,
array_ops.ones_like,
array_ops.zeros_like,
]
UNARY_BOOL_OPS = [
math_ops.logical_not,
]
UNARY_STRING_OPS = [
string_ops.decode_base64,
string_ops.encode_base64,
string_ops.string_strip,
parsing_ops.decode_compressed,
]
BINARY_FLOAT_OPS = [
math_ops.add,
math_ops.atan2,
math_ops.complex,
math_ops.div_no_nan,
math_ops.divide,
math_ops.equal,
math_ops.floordiv,
math_ops.floormod,
math_ops.greater,
math_ops.greater_equal,
math_ops.less,
math_ops.less_equal,
math_ops.maximum,
math_ops.minimum,
math_ops.multiply,
math_ops.not_equal,
math_ops.pow,
math_ops.realdiv,
math_ops.squared_difference,
math_ops.subtract,
math_ops.truediv,
]
BINARY_BOOL_OPS = [
math_ops.logical_and,
math_ops.logical_or,
math_ops.logical_xor,
]
UNARY_INT_OPS = [
gen_bitwise_ops.invert,
string_ops.unicode_script,
]
BINARY_INT_OPS = [
gen_bitwise_ops.bitwise_and,
gen_bitwise_ops.bitwise_or,
gen_bitwise_ops.bitwise_xor,
gen_bitwise_ops.left_shift,
gen_bitwise_ops.right_shift,
math_ops.truncatediv,
math_ops.truncatemod,
]
@test_util.run_all_in_graph_and_eager_modes
class RaggedElementwiseOpsTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def assertSameShape(self, x, y):
"""Checks that x and y have the same shape (including ragged shapes)."""
if isinstance(x, ragged_tensor.RaggedTensor):
self.assertIsInstance(y, ragged_tensor.RaggedTensor)
self.assertEqual(x.ragged_rank, y.ragged_rank)
for (x_splits, y_splits) in zip(x.nested_row_splits, y.nested_row_splits):
self.assertAllEqual(x_splits, y_splits)
self.assertAllEqual(
array_ops.shape(x.flat_values), array_ops.shape(y.flat_values))
else:
self.assertIsInstance(y, ops.Tensor)
self.assertAllEqual(array_ops.shape(x), array_ops.shape(y))
@parameterized.parameters(
#=========================================================================
# Test different input shapes.
#=========================================================================
[
# 0-dimensional input
{'x': 12},
# 1-dimensional input
{'x': [1, -2, 3]},
# 2-dimensional input
{'x': [[-2, 3], [-3, 4]]},
{'x': ragged_factory_ops.constant_value(
[[-2, 3], [-3]], ragged_rank=1)},
# 3-dimensional inputs
{'x': [[[-2, 3], [3, 4]], [[7, 6], [5, 4]]]},
{'x': ragged_factory_ops.constant_value(
[[[-2, 3], [3, 4]], [[7, 6]]],
ragged_rank=1)},
{'x': ragged_factory_ops.constant_value(
[[[-2, 3, 4], []], [[7, 6]], []],
ragged_rank=2)},
] +
#=========================================================================
# Test each unary op.
#=========================================================================
[{'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]), 'op': op}
for op in UNARY_FLOAT_OPS] +
[{'x': ragged_factory_ops.constant_value([[True, False], [True]]),
'op': op}
for op in UNARY_BOOL_OPS] +
[{'x': ragged_factory_ops.constant_value([[18, 512], [12412]], np.int32),
'op': op}
for op in UNARY_INT_OPS] +
[{'x': ragged_factory_ops.constant_value([['abcd', 'efgh'],
['aabbccdd']]),
'op': op}
for op in UNARY_STRING_OPS] +
[
{'op': clip_ops.clip_by_value,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'clip_value_min': 0.1, 'clip_value_max': 4.0},
{'op': math_ops.cast,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'dtype': dtypes.int32},
{'op': math_ops.saturate_cast,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'dtype': dtypes.int32},
{'op': string_ops.string_to_hash_bucket,
'x': ragged_factory_ops.constant_value(
[['abcd', 'efgh'], ['aabbccdd']]),
'num_buckets': 1000},
{'op': string_ops.string_to_hash_bucket_fast,
'x': ragged_factory_ops.constant_value(
[['abcd', 'efgh'], ['aabbccdd']]),
'num_buckets': 1000},
{'op': string_ops.string_to_hash_bucket_strong,
'x': ragged_factory_ops.constant_value(
[['abcd', 'efgh'], ['aabbccdd']]),
'num_buckets': 1000,
'key': [1231, 12512]},
{'op': string_ops.string_to_number,
'x': ragged_factory_ops.constant_value([['-2.0', '3.0'], ['-3.0']])},
{'op': string_ops.regex_full_match,
'x': ragged_factory_ops.constant_value([['hello', '123'], ['1+1']]),
'pattern': r'\w+'},
{'op': string_ops.regex_replace,
'x': ragged_factory_ops.constant_value([['hello', '123'], ['1+1']]),
'pattern': r'\d',
'rewrite': '#'},
{'op': string_ops.substr,
'x': ragged_factory_ops.constant_value([['hello', '123'], ['1+1']]),
'pos': 2, 'len': 3},
{'op': array_ops.check_numerics,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'message': 'check-numerics'},
]
) # pyformat: disable
def testUnaryElementwiseOp(self, x, op=math_ops.abs, **extra_args):
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x)
result = op(x, **extra_args)
# Run the wrapped op on the dense values, for comparison.
dense_x = x.flat_values if isinstance(x, ragged_tensor.RaggedTensor) else x
expected_flat_values = array_ops.reshape(op(dense_x, **extra_args), [-1])
# Check that the result has the expected shape.
self.assertSameShape(x, result)
# Check that the result has the expected (flattened) values.
if isinstance(result, ragged_tensor.RaggedTensor):
result_flat_values = array_ops.reshape(result.flat_values, [-1])
else:
result_flat_values = array_ops.reshape(result, [-1])
self.assertAllEqual(expected_flat_values, result_flat_values)
@parameterized.parameters(
[
#=====================================================================
# Without broadcasting -- i.e., shapes match exactly.
#=====================================================================
# Shapes: x:(), y:()
{'x': 12,
'y': 8},
# Shapes: x:(3,), y:(3,)
{'x': [7, 8, 9],
'y': [1, -2, 3]},
# Shapes: x:(2, 2), y:(2, 2)
{'x': [[-2, 3], [-3, -4]],
'y': [[1, 2], [3, 4]]},
# Shapes: x:(2, None), y:(2, None)
{'x': ragged_factory_ops.constant_value([[-2, 3], [-3]]),
'y': ragged_factory_ops.constant_value([[5, 6], [7]])},
# Shapes: x:(2, 2, 2), y:(2, 2, 2)
{'x': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
'y': [[[9, 3], [3, 4]], [[5, 2], [7, 6]]]},
# Shapes: x:(2, None, None), y: (2, None, None)
{'x': ragged_factory_ops.constant_value(
[[[1, 2], [3], [4]], [[], [5, 7, 8]]]),
'y': ragged_factory_ops.constant_value(
[[[3, 8], [2], [5]], [[], [1, 9, 8]]])},
# Shapes: x:(2, None, 2), y: (2, None, 2)
{'x': ragged_factory_ops.constant_value(
[[[1, 2]], [[3, 4], [5, 6], [7, 8]]],
ragged_rank=1),
'y': ragged_factory_ops.constant_value(
[[[9, 3]], [[5, 2], [3, 4], [7, 6]]],
ragged_rank=1)},
#=====================================================================
# With broadcasting
#=====================================================================
# Shapes: x:(), y:(3,)
{'x': 12, # Broadcast () -> (3,)
'y': [1, -2, 3]},
# Shapes: x:(1,), y:(3,)
{'x': [12], # Broadcast (1,) -> (3,)
'y': [1, -2, 3]},
# Shapes: x:(), y:(2, 2)
{'x': 12, # Broadcast () -> (2, 2)
'y': [[1, 2], [3, 4]]},
# Shapes: x:(1,), y:(2, 2)
{'x': 12, # Broadcast (1,) -> (2, 2)
'y': [[1, 2], [3, 4]]},
# Shapes: x:(2, 1), y:(2, 2)
{'x': [[10], [20]], # Broadcast (2, 1) -> (2, 2)
'y': [[1, 2], [3, 4]]},
# Shapes: x:(), y:(2, None)
{'x': 10, # Broadcast () -> (2, None)
'y': ragged_factory_ops.constant_value(
[[1, 2], [3]], dtype=np.int32)},
# TODO(edloper): Add tests for more advanced broadcasting, once we add
# support for it.
#=====================================================================
# Keyword Args
#=====================================================================
{'x': ragged_factory_ops.constant_value(
[[[1, 2], [3], [4]], [[], [5, 7, 8]]]),
'y': ragged_factory_ops.constant_value(
[[[3, 8], [2], [5]], [[], [1, 9, 8]]]),
'use_kwargs': ('x', 'y')},
{'x': ragged_factory_ops.constant_value(
[[[1, 2]], [[3, 4], [5, 6], [7, 8]]],
ragged_rank=1),
'y': ragged_factory_ops.constant_value(
[[[9, 3]], [[5, 2], [3, 4], [7, 6]]],
ragged_rank=1),
'use_kwargs': ('x', 'y')},
{'x': ragged_factory_ops.constant_value(
[[[1, 2]], [[3, 4], [5, 6], [7, 8]]],
ragged_rank=1),
'y': ragged_factory_ops.constant_value(
[[[9, 3]], [[5, 2], [3, 4], [7, 6]]],
ragged_rank=1),
'use_kwargs': ('x',)},
] +
#=========================================================================
# Test each unary op.
#=========================================================================
[{'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'y': ragged_factory_ops.constant_value([[5.0, 1.0], [12.0]]),
'op': op}
for op in BINARY_FLOAT_OPS] +
[{'x': ragged_factory_ops.constant_value([[-2, 3], [-3]]),
'y': ragged_factory_ops.constant_value([[5, 1], [12]]),
'op': op}
for op in BINARY_INT_OPS] +
[{'x': ragged_factory_ops.constant_value([[True, True], [False]]),
'y': ragged_factory_ops.constant_value([[False, True], [False]]),
'op': op}
for op in BINARY_BOOL_OPS]
) # pyformat: disable
def testBinaryElementwiseOp(self, x, y, op=math_ops.add, **extra_args):
use_kwargs = extra_args.pop('use_kwargs', ())
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x)
y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y)
if 'x' in use_kwargs and 'y' in use_kwargs:
result = op(x=x, y=y, **extra_args)
elif 'y' in use_kwargs:
result = op(x, y=y, **extra_args)
else:
result = op(x, y, **extra_args)
# Run the wrapped op on the dense values, for comparison.
dense_x = x.flat_values if isinstance(x, ragged_tensor.RaggedTensor) else x
dense_y = y.flat_values if isinstance(y, ragged_tensor.RaggedTensor) else y
expected_flat_values = array_ops.reshape(
op(dense_x, dense_y, **extra_args), [-1])
# Check that the result has the expected shape.
self.assertSameShape(y, result)
# Check that the result has the expected (flattened) values.
if isinstance(result, ragged_tensor.RaggedTensor):
result_flat_values = array_ops.reshape(result.flat_values, [-1])
else:
result_flat_values = array_ops.reshape(result, [-1])
self.assertAllEqual(expected_flat_values, result_flat_values)
@parameterized.parameters(
[
{'inputs': (12, 8, 3)},
{'inputs': ([1, 2, 3], [7, 8, 9], [3, 6, 9])},
{'inputs': ([[1, 2]], [[3, 4]], [[5, 6]])},
{'inputs': (ragged_factory_ops.constant_value([[1, 3], [-3]]),
ragged_factory_ops.constant_value([[4, 7], [88]]),
ragged_factory_ops.constant_value([[2, 9], [12]]))},
{'inputs': (ragged_factory_ops.constant_value(
[[[1, 3], [-3]], [[1]]]),
ragged_factory_ops.constant_value(
[[[4, 7], [88]], [[2]]]),
ragged_factory_ops.constant_value(
[[[2, 9], [12]], [[8]]]))},
{'inputs': (
ragged_factory_ops.constant_value([[[1, 3], [3, 4]], [[1, 5]]],
ragged_rank=1),
ragged_factory_ops.constant_value([[[4, 7], [1, 2]], [[2, 2]]],
ragged_rank=1),
ragged_factory_ops.constant_value([[[2, 9], [5, 2]], [[8, 0]]],
ragged_rank=1))},
{'inputs': (
ragged_factory_ops.constant_value([[[1, 3], [-3]], [[1]]]),
ragged_factory_ops.constant_value([[[4, 7], [88]], [[2]]]),
ragged_factory_ops.constant_value([[[2, 9], [12]], [[8]]])),
'use_kwargs': True},
] + [
{'op': math_ops.add_n,
'inputs': (ragged_factory_ops.constant_value([[1, 3], [-3]]),
ragged_factory_ops.constant_value([[4, 7], [88]]),
ragged_factory_ops.constant_value([[2, 9], [12]]))},
{'op': string_ops.string_join,
'inputs': (
ragged_factory_ops.constant_value([['a', 'b'], ['c']]),
ragged_factory_ops.constant_value([['foo', 'bar'], ['baz']]),
ragged_factory_ops.constant_value([['2', '9'], ['12']]))},
]) # pyformat: disable
def testListValuedElementwiseOp(self, inputs, op=math_ops.add_n,
**extra_args):
use_kwargs = extra_args.pop('use_kwargs', False)
inputs = [
ragged_tensor.convert_to_tensor_or_ragged_tensor(x) for x in inputs
]
if use_kwargs:
result = op(inputs=inputs, **extra_args)
else:
result = op(inputs, **extra_args)
# Run the wrapped op on the dense values, for comparison.
dense_inputs = [
x.flat_values if isinstance(x, ragged_tensor.RaggedTensor) else x
for x in inputs
]
expected_flat_values = array_ops.reshape(
op(dense_inputs, **extra_args), [-1])
# Check that the result has the expected shape.
self.assertSameShape(inputs[0], result)
# Check that the result has the expected (flattened) values.
if isinstance(result, ragged_tensor.RaggedTensor):
result_flat_values = array_ops.reshape(result.flat_values, [-1])
else:
result_flat_values = array_ops.reshape(result, [-1])
self.assertAllEqual(expected_flat_values, result_flat_values)
def testElementwiseOpUnknownRankError(self):
if context.executing_eagerly():
return
x = ragged_factory_ops.constant([[1, 2], [3]])
y = ragged_tensor.RaggedTensor.from_row_splits(
array_ops.placeholder_with_default([1, 2, 3], shape=None), x.row_splits)
with self.assertRaisesRegexp(ValueError,
r'Unable to broadcast: unknown rank'):
math_ops.add(x, y)
@parameterized.parameters([
dict(
x=ragged_factory_ops.constant_value([[1, 2], [3]]),
y=[[10]],
expected=[[11, 12], [13]]),
dict(
x=ragged_factory_ops.constant_value([[[1, 2], [3, 4]], [[5]]],
ragged_rank=2),
y=ragged_factory_ops.constant_value([[[10], [20]], [[30]]],
ragged_rank=1),
expected=[[[11, 12], [23, 24]], [[35]]]),
dict(
x=ragged_factory_ops.constant_value([[[1]]]),
y=ragged_factory_ops.constant_value([[1]]),
expected=[[[2]]]),
])
def testElementwiseOpBroadcast(self, x, y, expected):
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, dtype=dtypes.int32)
y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, dtype=dtypes.int32)
result = x + y
self.assertAllEqual(result, expected)
def testElementwiseOpShapeMismatch(self):
x = ragged_factory_ops.constant([[1, 2, 3], [4, 5]])
y = ragged_factory_ops.constant([[1, 2, 3], [4, 5, 6]])
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(math_ops.add(x, y))
def testBinaryOpSparseAndRagged(self):
x = ragged_factory_ops.constant([[1, 2, 3], [4, 5]])
y = sparse_tensor.SparseTensor([[0, 0], [0, 1], [2, 0]], [1, 2, 3], [3, 2])
with self.assertRaises((TypeError, ValueError)):
self.evaluate(math_ops.add(x, y))
with self.assertRaises((TypeError, ValueError)):
self.evaluate(math_ops.add_n([x, y]))
@parameterized.parameters([
dict(
op=array_ops.batch_gather,
args=(ragged_factory_ops.constant_value([[5, 6, 7], [8, 9]]),
ragged_factory_ops.constant_value([[2, 1, 0], [1]])),
expected=ragged_factory_ops.constant_value([[7, 6, 5], [9]])),
dict(
op=array_ops.concat,
args=([
ragged_factory_ops.constant_value([[1, 2, 3], [4]],
dtype=np.int32),
np.array([[5, 6]], dtype=np.int32)
],),
kwargs={'axis': 0},
expected=ragged_factory_ops.constant_value([[1, 2, 3], [4], [5, 6]])),
dict(
op=array_ops.expand_dims,
kwargs={
'input': ragged_factory_ops.constant_value([[1, 2], [3]]),
'axis': 0
},
expected=ragged_factory_ops.constant_value([[[1, 2], [3]]])),
dict(
op=array_ops.expand_dims_v2,
kwargs={
'input': ragged_factory_ops.constant_value([[1, 2], [3]]),
'axis': -1
},
expected=ragged_factory_ops.constant_value([[[1], [2]], [[3]]],
ragged_rank=1),
),
dict(
op=array_ops.gather,
kwargs={
'params': ragged_factory_ops.constant_value([[1, 2], [3]]),
'indices': [1, 0, 1]
},
expected=ragged_factory_ops.constant_value([[3], [1, 2], [3]])),
dict(
op=array_ops.gather_v2,
kwargs={
'params': ragged_factory_ops.constant_value([[1, 2], [3]]),
'indices': ragged_factory_ops.constant_value([[1, 0], [1]])
},
expected=ragged_factory_ops.constant_value([[[3], [1, 2]], [[3]]])),
dict(
op=array_ops.gather_nd,
kwargs={
'params': ragged_factory_ops.constant_value([[7, 8], [9]]),
'indices': [[0, 1], [1, 0], [0, 0]]
},
expected=ragged_factory_ops.constant_value([8, 9, 7])),
dict(
op=array_ops.one_hot,
kwargs={
'indices':
ragged_factory_ops.constant_value([[1, 2, 3], [0]],
dtype=np.int32),
'depth':
4,
'axis':
1
},
expected=ragged_factory_ops.constant_value(
[[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], [[1, 0, 0, 0]]],
ragged_rank=1)),
dict(
op=array_ops.stack,
args=([
ragged_factory_ops.constant_value([[1, 2, 3], [4]],
dtype=np.int32),
np.array([[5, 6]], dtype=np.int32)
],),
expected=ragged_factory_ops.constant_value([[[1, 2, 3], [4]],
[[5, 6]]])),
dict(
op=array_ops.tile,
args=([
ragged_factory_ops.constant_value([[1, 2], [3]], dtype=np.int32),
[2, 3]
]),
expected=ragged_factory_ops.constant_value([[1, 2, 1, 2, 1, 2],
[3, 3, 3],
[1, 2, 1, 2, 1, 2],
[3, 3, 3]])),
dict(
op=array_ops.where,
args=(ragged_factory_ops.constant_value([[True, False], [True]]),
ragged_factory_ops.constant_value([[b'A', b'B'], [b'C']]),
ragged_factory_ops.constant_value([[b'a', b'b'], [b'c']])),
expected=ragged_factory_ops.constant_value([[b'A', b'b'], [b'C']])),
dict(
op=array_ops.where,
args=(ragged_factory_ops.constant_value([[True, False], [True]]),),
expected=[[0, 0], [1, 0]]),
dict(
op=math_ops.unsorted_segment_sum,
kwargs={
'data': ragged_factory_ops.constant_value([[1, 2], [3]]),
'segment_ids': ragged_factory_ops.constant_value([[0, 2], [0]]),
'num_segments': 3
},
expected=[4, 0, 2]),
dict(
op=math_ops.unsorted_segment_prod,
kwargs={
'data': ragged_factory_ops.constant_value([[1, 2], [3]]),
'segment_ids': ragged_factory_ops.constant_value([[0, 2], [0]]),
'num_segments': 3
},
expected=[3, 1, 2]),
dict(
op=math_ops.unsorted_segment_min,
kwargs={
'data': ragged_factory_ops.constant_value([[1, 2], [3]]),
'segment_ids': ragged_factory_ops.constant_value([[0, 1], [0]]),
'num_segments': 2
},
expected=[1, 2]),
dict(
op=math_ops.unsorted_segment_max,
kwargs={
'data': ragged_factory_ops.constant_value([[1, 2], [3]]),
'segment_ids': ragged_factory_ops.constant_value([[0, 1], [0]]),
'num_segments': 2
},
expected=[3, 2]),
dict(
op=math_ops.unsorted_segment_mean,
kwargs={
'data': ragged_factory_ops.constant_value([[1, 2], [3]]),
'segment_ids': ragged_factory_ops.constant_value([[0, 1], [0]]),
'num_segments': 2
},
expected=[2, 2]),
dict(
op=math_ops.unsorted_segment_sqrt_n,
kwargs={
'data':
ragged_factory_ops.constant_value([[1.0, 2.0],
[3.0, 4.0, 6.0]]),
'segment_ids':
ragged_factory_ops.constant_value([[0, 1], [0, 0, 0]]),
'num_segments':
2
},
expected=[7.0, 2.0]),
dict(
op=math_ops.reduce_sum,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 2], [3, 4, 5]]),
'axis':
1
},
expected=[3, 12]),
dict(
op=math_ops.reduce_prod,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 2], [3, 4, 5]]),
'axis':
1
},
expected=[2, 60]),
dict(
op=math_ops.reduce_min,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 2], [3, 4, 5]]),
'axis':
1
},
expected=[1, 3]),
dict(
op=math_ops.reduce_max,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 2], [3, 4, 5]]),
'axis':
1
},
expected=[2, 5]),
dict(
op=math_ops.reduce_mean,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 3], [3, 4, 5]]),
'axis':
1
},
expected=[2, 4]),
dict(
op=math_ops.reduce_any,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[True, False],
[True, True, True]]),
'axis':
1
},
expected=[True, True]),
dict(
op=string_ops.reduce_join,
kwargs={
'inputs':
ragged_factory_ops.constant_value([[
b'this', b'is', b'a', b'test', b'for', b'ragged',
b'tensors'
], [b'please', b'do', b'not', b'panic', b'!']]),
'axis':
0,
'keepdims':
False,
'separator':
''
},
expected=[
b'thisplease', b'isdo', b'anot', b'testpanic', b'for!', b'ragged',
b'tensors'
]),
dict(
op=math_ops.reduce_all,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[True, False],
[True, True, True]]),
'axis':
1
},
expected=[False, True]),
dict(
op=array_ops.rank,
kwargs={'input': ragged_factory_ops.constant_value([[8, 3], [5]])},
expected=2),
dict(
op=array_ops.size,
kwargs={'input': ragged_factory_ops.constant_value([[8, 3], [5]])},
expected=3),
dict(
op=array_ops.size_v2,
kwargs={'input': ragged_factory_ops.constant_value([[8, 3], [5]])},
expected=3),
dict(
op=array_ops.squeeze,
kwargs={
'input': ragged_factory_ops.constant_value([[[1, 2, 3], [4, 5]]]),
'axis': [0]
},
expected=ragged_factory_ops.constant_value([[1, 2, 3], [4, 5]])),
dict(
op=array_ops.squeeze_v2,
kwargs={
'input': ragged_factory_ops.constant_value([[[1, 2, 3], [4, 5]]]),
'axis': [0]
},
expected=ragged_factory_ops.constant_value([[1, 2, 3], [4, 5]])),
dict(
op=data_flow_ops.dynamic_partition,
kwargs={
'data': ragged_factory_ops.constant_value([[1], [2, 3, 4], [5]]),
'partitions': [2, 1, 1],
'num_partitions': 3
},
expected=[
ragged_factory_ops.constant_value([], ragged_rank=1),
ragged_factory_ops.constant_value([[2, 3, 4], [5]]),
ragged_factory_ops.constant_value([[1]])
],
result_is_list=True),
])
def testRaggedDispatch(self, op, expected, args=(), result_is_list=False,
kwargs=None):
if kwargs is None: kwargs = {}
result = op(*args, **kwargs)
if result_is_list:
self.assertLen(result, len(expected))
for (r, e) in zip(result, expected):
self.assertAllEqual(r, e)
else:
self.assertAllEqual(result, expected)
def test_ragged_op_list(self):
# Ops that should be listed as supported in both v1 and v2.
supported_ops = [
'bitwise.bitwise_and', 'bitwise.bitwise_or', 'bitwise.bitwise_xor',
'bitwise.invert', 'bitwise.left_shift', 'bitwise.right_shift',
'clip_by_value', 'concat', 'debugging.check_numerics', 'cast',
'dtypes.complex', 'dtypes.saturate_cast', 'expand_dims', 'gather_nd',
'gather', 'identity', 'io.decode_base64', 'io.decode_compressed',
'io.encode_base64', 'math.abs', 'math.acos', 'math.acosh', 'math.add_n',
'math.add', 'math.angle', 'math.asin', 'math.asinh', 'math.atan2',
'math.atan', 'math.atanh', 'math.ceil', 'math.conj', 'math.cos',
'math.cosh', 'math.digamma', 'math.divide_no_nan', 'math.divide',
'math.equal', 'math.erf', 'math.erfc', 'math.exp', 'math.expm1',
'math.floor', 'math.floordiv', 'math.floormod', 'math.greater_equal',
'math.greater', 'math.imag', 'math.is_finite', 'math.is_inf',
'math.is_nan', 'math.less_equal', 'math.less', 'math.lgamma',
'math.log1p', 'math.log_sigmoid', 'math.log', 'math.logical_and',
'math.logical_not', 'math.logical_or', 'math.logical_xor',
'math.maximum', 'math.minimum', 'math.multiply', 'math.negative',
'math.not_equal', 'math.pow', 'math.real', 'math.reciprocal',
'math.reduce_any', 'math.reduce_max', 'math.reduce_mean',
'math.reduce_min', 'math.reduce_prod', 'math.reduce_sum', 'math.rint',
'math.round', 'math.rsqrt', 'math.sign', 'math.sin', 'math.sinh',
'math.sqrt', 'math.square', 'math.squared_difference', 'math.subtract',
'math.tan', 'math.truediv', 'math.unsorted_segment_max',
'math.unsorted_segment_mean', 'math.unsorted_segment_min',
'math.unsorted_segment_prod', 'math.unsorted_segment_sqrt_n',
'math.unsorted_segment_sum', 'one_hot', 'ones_like', 'rank', 'realdiv',
'reduce_all', 'size', 'squeeze', 'stack', 'strings.as_string',
'strings.join', 'strings.length', 'strings.reduce_join',
'strings.regex_full_match', 'strings.regex_replace', 'strings.strip',
'strings.substr', 'strings.to_hash_bucket_fast',
'strings.to_hash_bucket_strong', 'strings.to_hash_bucket',
'strings.to_number', 'strings.unicode_script', 'tile', 'truncatediv',
'truncatemod', 'zeros_like', 'dynamic_partition'
]
# Ops that should be listed as supported in v1 only.
# TODO(edloper): Add a dispatch for where_v2.
supported_ops_v1 = ['batch_gather', 'where']
# Ops that should be listed as supported in v2 only.
supported_ops_v2 = []
v1_ragged_ops = ragged_dispatch.ragged_op_list(tf_version=1)
for element in supported_ops + supported_ops_v1:
self.assertIn(element, v1_ragged_ops)
for element in supported_ops_v2:
self.assertNotIn(element, v1_ragged_ops)
v2_ragged_ops = ragged_dispatch.ragged_op_list(tf_version=2)
for element in supported_ops + supported_ops_v2:
self.assertIn(element, v2_ragged_ops)
for element in supported_ops_v1:
self.assertNotIn(element, v2_ragged_ops)
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/ragged/ragged_dispatch_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.stack_dynamic_partitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedSegmentStackOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
dict( # empty inputs
data=[],
partitions=[],
num_partitions=0,
expected=[],
expected_ragged_rank=1),
dict( # empty data, num_partitions>0
data=[],
partitions=[],
num_partitions=3,
expected=[[], [], []]),
dict( # 1D data, 1D partitions (docstring example)
data=['a', 'b', 'c', 'd', 'e'],
partitions=[3, 0, 2, 2, 3],
num_partitions=5,
expected=[['b'], [], ['c', 'd'], ['a', 'e'], []]),
dict( # 2D data, 1D partitions
data=[['a', 'b'], ['c', 'd'], ['e', 'f'], ['g', 'h']],
data_ragged_rank=0,
partitions=[2, 1, 2, 3],
num_partitions=4,
expected=[[], [['c', 'd']], [['a', 'b'], ['e', 'f']], [['g', 'h']]],
expected_ragged_rank=1),
dict( # 2D ragged data, 1D partitions
data=[['a'], ['b', 'c', 'd'], [], ['e', 'f']],
data_ragged_rank=1,
partitions=[2, 1, 2, 3],
num_partitions=4,
expected=[[], [['b', 'c', 'd']], [['a'], []], [['e', 'f']]],
expected_ragged_rank=2),
dict( # 2D data, 2D partitions
data=[['a', 'b'], ['c', 'd'], ['e', 'f'], ['g', 'h']],
data_ragged_rank=0,
partitions=[[3, 0], [2, 2], [4, 3], [2, 0]],
num_partitions=5,
expected=[['b', 'h'], [], ['c', 'd', 'g'], ['a', 'f'], ['e']]),
dict( # 2D ragged data, 2D ragged partitions
data=[['a', 'b'], ['c', 'd'], ['e', 'f'], ['g', 'h']],
data_ragged_rank=0,
partitions=[[3, 0], [2, 2], [4, 3], [2, 0]],
num_partitions=5,
expected=[['b', 'h'], [], ['c', 'd', 'g'], ['a', 'f'], ['e']]),
dict( # 3D data, 1d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f'], ['g', 'h']]],
data_ragged_rank=0,
partitions=[1, 0],
num_partitions=2,
expected=[[[['e', 'f'], ['g', 'h']]], [[['a', 'b'], ['c', 'd']]]],
expected_ragged_rank=1),
dict( # 3D data (ragged_rank=1), 1d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f']]],
data_ragged_rank=1,
partitions=[2, 0],
num_partitions=3,
expected=[[[['e', 'f']]], [], [[['a', 'b'], ['c', 'd']]]],
expected_ragged_rank=2),
dict( # 3D data (ragged_rank=2), 1d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f', 'g', 'h']]],
data_ragged_rank=2,
partitions=[2, 0],
num_partitions=3,
expected=[[[['e', 'f', 'g', 'h']]], [], [[['a', 'b'], ['c', 'd']]]],
expected_ragged_rank=3),
dict( # 3D data, 2d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f'], ['g', 'h']]],
data_ragged_rank=0,
partitions=[[1, 0], [0, 3]],
segment_ids_ragged_rank=0,
num_partitions=4,
expected=[[['c', 'd'], ['e', 'f']], [['a', 'b']], [], [['g', 'h']]],
expected_ragged_rank=1),
dict( # 3D data (ragged_rank=1), 2d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f']]],
data_ragged_rank=1,
partitions=[[1, 0], [0]],
segment_ids_ragged_rank=1,
num_partitions=2,
expected=[[['c', 'd'], ['e', 'f']], [['a', 'b']]],
expected_ragged_rank=1),
dict( # 3D data (ragged_rank=2), 2d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f', 'g', 'h']]],
data_ragged_rank=2,
partitions=[[1, 0], [0]],
segment_ids_ragged_rank=1,
num_partitions=3,
expected=[[['c', 'd'], ['e', 'f', 'g', 'h']], [['a', 'b']], []],
expected_ragged_rank=2),
dict( # 3D data (ragged_rank=2), 3d partitions (ragged_rank=2)
data=[[['a', 'b'], ['c', 'd']], [['e', 'f', 'g', 'h']]],
data_ragged_rank=2,
partitions=[[[3, 0], [1, 2]], [[1, 1, 0, 1]]],
segment_ids_ragged_rank=2,
num_partitions=4,
expected=[['b', 'g'], ['c', 'e', 'f', 'h'], ['d'], ['a']]),
dict( # 0D data, 0D partitions
data='a',
partitions=3,
num_partitions=5,
expected=[[], [], [], ['a'], []]),
dict( # 1D data, 0D partitions
data=['a', 'b', 'c'],
partitions=3,
num_partitions=5,
expected=[[], [], [], [['a', 'b', 'c']], []],
expected_ragged_rank=1),
dict( # 2D data, 0D partitions
data=[['a', 'b'], ['c', 'd']],
data_ragged_rank=0,
partitions=3,
num_partitions=5,
expected=[[], [], [], [[['a', 'b'], ['c', 'd']]], []],
expected_ragged_rank=1),
dict( # 2D data (ragged_rank=1), 0D partitions
data=[['a', 'b'], ['c']],
data_ragged_rank=1,
partitions=3,
num_partitions=5,
expected=[[], [], [], [[['a', 'b'], ['c']]], []],
expected_ragged_rank=3),
])
def testRaggedSegmentStack(self,
data,
partitions,
num_partitions,
expected,
data_ragged_rank=None,
segment_ids_ragged_rank=None,
expected_ragged_rank=None):
for seg_dtype in [dtypes.int32, dtypes.int64]:
data_tensor = ragged_factory_ops.constant(
data, row_splits_dtype=seg_dtype, ragged_rank=data_ragged_rank)
segment_ids_tensor = ragged_factory_ops.constant(
partitions,
dtype=seg_dtype,
row_splits_dtype=seg_dtype,
ragged_rank=segment_ids_ragged_rank)
expected_tensor = ragged_factory_ops.constant(
expected,
row_splits_dtype=seg_dtype,
ragged_rank=expected_ragged_rank)
result = ragged_array_ops.stack_dynamic_partitions(
data_tensor, segment_ids_tensor, num_partitions)
self.assertAllEqual(result, expected_tensor)
# Check that it's equivalent to tf.stack(dynamic_partition(...)),
# where applicable.
if (data_ragged_rank == 0 and segment_ids_ragged_rank == 0 and
seg_dtype == dtypes.int32):
equiv = ragged_concat_ops.stack(
data_flow_ops.dynamic_partition(data_tensor, segment_ids_tensor,
num_partitions))
self.assertAllEqual(result, self.evaluate(equiv).to_list())
@parameterized.parameters([
dict(
data=['a', 'b', 'c'],
partitions=[2, -1, 0],
num_partitions=10,
error='must be non-negative'),
dict(
data=['a', 'b', 'c'],
partitions=[2, 10, 0],
num_partitions=1,
error='partitions must be less than num_partitions'),
dict(
data=['a', 'b', 'c'],
partitions=[2, 10, 0],
num_partitions=10,
error='partitions must be less than num_partitions'),
dict(
data=[['a', 'b'], ['c']],
partitions=[[2], [3, 0]],
num_partitions=10,
error='data and partitions have incompatible ragged shapes'),
])
def testRuntimeError(self, data, partitions, num_partitions, error):
data = ragged_factory_ops.constant(data)
partitions = ragged_factory_ops.constant(partitions, dtype=dtypes.int64)
with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
error):
self.evaluate(
ragged_array_ops.stack_dynamic_partitions(data, partitions,
num_partitions))
@parameterized.parameters([
dict(
data=['a', 'b', 'c'],
partitions=[1, 2],
num_partitions=10,
error=r'Shapes \(2,\) and \(3,\) are incompatible'),
dict(
data=[['a', 'b'], ['c', 'd']],
partitions=[[1, 2, 3], [4, 5, 6]],
num_partitions=10,
error=r'Shapes \(2, 3\) and \(2, 2\) are incompatible'),
dict(
data=['a', 'b', 'c'],
partitions=[1, 2, 3],
num_partitions=[1, 2, 3],
error='must have rank 0'),
])
def testStaticError(self, data, partitions, num_partitions, error):
with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
error):
ragged_array_ops.stack_dynamic_partitions(data, partitions,
num_partitions)
def testUnknownRankError(self):
if context.executing_eagerly():
return
partitions = array_ops.placeholder(dtypes.int32, None)
with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
'partitions must have known rank'):
ragged_array_ops.stack_dynamic_partitions(['a', 'b', 'c'], partitions, 10)
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/ragged/ragged_dynamic_partition_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Jacobian ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gradients as gradient_ops
from tensorflow.python.ops.parallel_for import control_flow_ops
from tensorflow.python.util import nest
def jacobian(output, inputs, use_pfor=True, parallel_iterations=None):
"""Computes jacobian of `output` w.r.t. `inputs`.
Args:
output: A tensor.
inputs: A tensor or a nested structure of tensor objects.
use_pfor: If true, uses pfor for computing the jacobian. Else uses
tf.while_loop.
parallel_iterations: A knob to control how many iterations and dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
A tensor or a nested strucutre of tensors with the same structure as
`inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding
value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has
shape [x_1, ..., x_m], the corresponding jacobian has shape
[y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is
sparse (IndexedSlices), jacobian function currently makes it dense and
returns a Tensor instead. This may change in the future.
"""
flat_inputs = nest.flatten(inputs)
output_tensor_shape = output.shape
output_shape = array_ops.shape(output)
output = array_ops.reshape(output, [-1])
def loop_fn(i):
y = array_ops.gather(output, i)
return gradient_ops.gradients(y, flat_inputs)
try:
output_size = int(output.shape[0])
except TypeError:
output_size = array_ops.shape(output)[0]
if use_pfor:
pfor_outputs = control_flow_ops.pfor(
loop_fn, output_size, parallel_iterations=parallel_iterations)
else:
pfor_outputs = control_flow_ops.for_loop(
loop_fn,
[output.dtype] * len(flat_inputs),
output_size,
parallel_iterations=parallel_iterations)
for i, out in enumerate(pfor_outputs):
if isinstance(out, ops.Tensor):
new_shape = array_ops.concat(
[output_shape, array_ops.shape(out)[1:]], axis=0)
out = array_ops.reshape(out, new_shape)
out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape))
pfor_outputs[i] = out
return nest.pack_sequence_as(inputs, pfor_outputs)
def batch_jacobian(output, inp, use_pfor=True, parallel_iterations=None):
"""Computes and stacks jacobians of `output[i,...]` w.r.t. `input[i,...]`.
e.g.
x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
y = x * x
jacobian = batch_jacobian(y, x)
# => [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]
Args:
output: A tensor with shape [b, y1, ..., y_n]. `output[i,...]` should
only depend on `inp[i,...]`.
inp: A tensor with shape [b, x1, ..., x_m]
use_pfor: If true, uses pfor for computing the Jacobian. Else uses a
tf.while_loop.
parallel_iterations: A knob to control how many iterations and dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`
is the jacobian of `output[i, ...]` w.r.t. `inp[i, ...]`, i.e. stacked
per-example jacobians.
Raises:
ValueError: if first dimension of `output` and `inp` do not match.
"""
output_shape = output.shape
if not output_shape[0].is_compatible_with(inp.shape[0]):
raise ValueError("Need first dimension of output shape (%s) and inp shape "
"(%s) to match." % (output.shape, inp.shape))
if output_shape.is_fully_defined():
batch_size = int(output_shape[0])
output_row_size = output_shape.num_elements() // batch_size
else:
output_shape = array_ops.shape(output)
batch_size = output_shape[0]
output_row_size = array_ops.size(output) // batch_size
inp_shape = array_ops.shape(inp)
# Flatten output to 2-D.
with ops.control_dependencies(
[check_ops.assert_equal(batch_size, inp_shape[0])]):
output = array_ops.reshape(output, [batch_size, output_row_size])
def loop_fn(i):
y = array_ops.gather(output, i, axis=1)
return gradient_ops.gradients(y, inp)[0]
if use_pfor:
pfor_output = control_flow_ops.pfor(loop_fn, output_row_size,
parallel_iterations=parallel_iterations)
else:
pfor_output = control_flow_ops.for_loop(
loop_fn, output.dtype,
output_row_size,
parallel_iterations=parallel_iterations)
if pfor_output is None:
return None
pfor_output = array_ops.reshape(pfor_output,
[output_row_size, batch_size, -1])
output = array_ops.transpose(pfor_output, [1, 0, 2])
new_shape = array_ops.concat([output_shape, inp_shape[1:]], axis=0)
return array_ops.reshape(output, new_shape)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/parallel_for/gradients.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pfor and for_loop."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import time
from absl import flags
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.keras.layers import core as keras_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients as gradient_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
from tensorflow.python.util import nest
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class PForTest(PForTestCase):
def test_op_conversion_fallback_to_while_loop(self):
# Note that we used top_k op for this test. If a converter gets defined for
# it, we will need to find another op for which a converter has not been
# defined.
x = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return nn.top_k(x_i)
with self.assertRaisesRegexp(ValueError, "No converter defined"):
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
flags.FLAGS.op_conversion_fallback_to_while_loop = True
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
flags.FLAGS.op_conversion_fallback_to_while_loop = False
def test_parallel_iterations(self):
for parallel_iterations in [2, 3, 8, 10]:
x = random_ops.random_uniform([8, 3])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return array_ops.gather(x, i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 8, parallel_iterations=parallel_iterations)
self._test_loop_fn(loop_fn, 4 * constant_op.constant(2),
parallel_iterations=parallel_iterations)
def test_parallel_iterations_zero(self):
with self.assertRaisesRegexp(ValueError, "positive integer"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=0)
with self.assertRaisesRegexp(TypeError, "positive integer"):
pfor_control_flow_ops.for_loop(lambda i: 1, dtypes.int32, 8,
parallel_iterations=0)
def test_parallel_iterations_one(self):
with self.assertRaisesRegexp(ValueError, "Use for_loop instead"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=1)
def test_vectorized_map(self):
def compute(x):
return math_ops.reduce_mean(x, axis=0, keepdims=True)
result = pfor_control_flow_ops.vectorized_map(
compute, array_ops.ones((10, 5, 3)))
self.run_and_assert_equal(result, array_ops.ones((10, 1, 3)))
def test_vectorized_map_example_1(self):
def outer_product(a):
return math_ops.tensordot(a, a, 0)
batch_size = 100
a = array_ops.ones((batch_size, 32, 32))
c = pfor_control_flow_ops.vectorized_map(outer_product, a)
self.assertAllEqual((batch_size, 32, 32, 32, 32), c.shape)
def test_vectorized_map_example_2(self):
batch_size = 10
num_features = 32
layer = keras_core.Dense(1)
def model_fn(arg):
with backprop.GradientTape() as g:
inp, label = arg
inp = array_ops.expand_dims(inp, 0)
label = array_ops.expand_dims(label, 0)
prediction = layer(inp)
loss = nn.l2_loss(label - prediction)
return g.gradient(loss, (layer.kernel, layer.bias))
inputs = random_ops.random_uniform([batch_size, num_features])
labels = random_ops.random_uniform([batch_size, 1])
per_example_gradients = pfor_control_flow_ops.vectorized_map(
model_fn, (inputs, labels))
self.assertAllEqual(per_example_gradients[0].shape,
(batch_size, num_features, 1))
self.assertAllEqual(per_example_gradients[1].shape, (batch_size, 1))
@test_util.run_all_in_graph_and_eager_modes
class IndexedSlicesTest(PForTestCase):
def test_indexed_slices(self):
def loop_fn(i):
return indexed_slices.IndexedSlices(
indices=i,
values=array_ops.reshape(i, [1]),
dense_shape=[3, 1])
self._test_loop_fn(loop_fn, 2, loop_fn_dtypes=[dtypes.int32])
def test_indexed_slices_components(self):
def loop_fn(i):
slices = indexed_slices.IndexedSlices(
indices=i,
values=array_ops.reshape(i, [1]),
dense_shape=[3, 1])
# Note that returning the components inside the slice avoids
# densification, which may be more efficient.
return slices.values, slices.indices
self._test_loop_fn(loop_fn, 2, loop_fn_dtypes=[dtypes.int32] * 2)
@test_util.run_all_in_graph_and_eager_modes
class ReductionTest(PForTestCase):
def test_reduce_concat(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
vectorized_value = pfor_config.reduce_concat(x_i)
mean_value = math_ops.reduce_mean(vectorized_value, axis=0)
return x_i - mean_value
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_mean(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_sum(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_sum(x_i)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_sum(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_class(self):
x = random_ops.random_uniform([8, 3])
class LoopFn(object):
def __init__(self):
pass
def __call__(self, i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
output = pfor_control_flow_ops.pfor(LoopFn(), 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_functools_partial(self):
x = random_ops.random_uniform([8, 3])
def fn(i, pfor_config, dummy=None):
del dummy
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
loop_fn = functools.partial(fn, dummy=1)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_parallel_iterations(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return pfor_config.reduce_sum(x_i)
with self.assertRaisesRegexp(
ValueError, "parallel_iterations currently unsupported"):
pfor_control_flow_ops.pfor(loop_fn, 8, parallel_iterations=2)
@test_util.run_all_in_graph_and_eager_modes
class BitwiseTest(PForTestCase):
def test_unary_cwise(self):
for op in [bitwise_ops.invert]:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
return op(x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_binary_cwise(self):
binary_ops = [
bitwise_ops.bitwise_and,
bitwise_ops.bitwise_or,
bitwise_ops.bitwise_xor,
bitwise_ops.left_shift,
bitwise_ops.right_shift,
]
for op in binary_ops:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
y = random_ops.random_uniform([3, 5], maxval=10, dtype=dtypes.int32)
output_dtypes = []
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
@test_util.run_all_in_graph_and_eager_modes
class NNTest(PForTestCase):
def test_conv2d(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
filt = random_ops.random_uniform([3, 3, 3, 7])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return nn.conv2d(
x1, filt, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_input(self):
x_shape = [2, 12, 12, 3]
filt = random_ops.random_uniform([3, 3, 3, 7])
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
grad1 = array_ops.gather(grad, i)
return nn.conv2d_backprop_input(
x_shape,
filt,
grad1,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_filter(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
x_0 = array_ops.gather(x, 0)
filter_sizes = [3, 3, 3, 7]
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
x_i = array_ops.gather(x, i)
grad_i = array_ops.gather(grad, i)
return [
nn.conv2d_backprop_filter(
inp,
filter_sizes,
grad_i,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC") for inp in [x_i, x_0]
]
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_avg_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.avg_pool(
x1, ksize, strides=[1, 2, 2, 1], padding="VALID",
data_format="NHWC")
loss = nn.l2_loss(output)
return output, g.gradient(loss, x1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_max_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
strides = [1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool(
x1, ksize, strides=strides, padding="VALID", data_format="NHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_max_pool3d(self):
if test.is_built_with_rocm():
self.skipTest("Pooling with 3D tensors is not supported in ROCm")
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 1, 3, 3, 1]
strides = [1, 1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool3d(
x1, ksize, strides=strides, padding="VALID", data_format="NDHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_fused_batch_norm(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
data_formats = ["NHWC"]
if test.is_gpu_available():
data_formats.append("NCHW")
for is_training in (True, False):
for data_format in data_formats:
with backprop.GradientTape(persistent=True) as g:
if data_format == "NCHW":
x = random_ops.random_uniform([3, 1, 2, 5, 5])
else:
x = random_ops.random_uniform([3, 1, 5, 5, 2])
g.watch(x)
scale = random_ops.random_uniform([2])
g.watch(scale)
offset = random_ops.random_uniform([2])
g.watch(offset)
mean = None if is_training else random_ops.random_uniform([2])
variance = None if is_training else random_ops.random_uniform([2])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
outputs = nn.fused_batch_norm(
x1,
scale,
offset,
mean=mean,
variance=variance,
epsilon=0.01,
data_format=data_format,
is_training=is_training)
outputs = list(outputs)
# We only test the first value of outputs when is_training is
# False. It looks like CPU and GPU have different outputs for
# batch_mean and batch_variance for this case.
if not is_training:
outputs[1] = constant_op.constant(0.)
outputs[2] = constant_op.constant(0.)
loss = nn.l2_loss(outputs[0])
if is_training:
gradients = g.gradient(loss, [x1, scale, offset])
else:
gradients = [constant_op.constant(0.)] * 3
return outputs + gradients
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 6)
def test_log_softmax(self):
logits = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
return (nn.log_softmax(logits_i),
nn.log_softmax(logits_i, axis=0),
nn.log_softmax(logits_i, axis=-1))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_softmax(self):
logits = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
return (nn.softmax(logits_i),
nn.softmax(logits_i, axis=0),
nn.softmax(logits_i, axis=-1))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_softmax_cross_entropy_with_logits(self):
with backprop.GradientTape(persistent=True) as g:
logits = random_ops.random_uniform([3, 2, 4])
g.watch(logits)
labels = random_ops.random_uniform([3, 2, 4])
labels /= math_ops.reduce_sum(labels, axis=[2], keepdims=True)
def loop_fn(i):
with g:
logits_i = array_ops.gather(logits, i)
labels_i = array_ops.gather(labels, i)
loss = nn.softmax_cross_entropy_with_logits(
labels=labels_i, logits=logits_i)
total_loss = math_ops.reduce_sum(loss)
return loss, g.gradient(total_loss, logits_i)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
class RandomTest(PForTestCase):
# The random values generated in the two implementations are not guaranteed to
# match. So we only check the returned shapes.
def run_and_assert_equal(self, targets1, targets2):
outputs = self._run_targets(targets1, targets2)
n = len(outputs) // 2
for i in range(n):
self.assertAllEqual(outputs[i].shape, outputs[i + n].shape)
def test_random_uniform(self):
def loop_fn(_):
return random_ops.random_uniform([3])
self._test_loop_fn(loop_fn, 5)
def test_random_uniform_int(self):
def loop_fn(_):
return random_ops.random_uniform([3], maxval=1, dtype=dtypes.int32)
self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=dtypes.int32)
def test_random_standard_normal(self):
def loop_fn(_):
return random_ops.random_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_truncated_normal(self):
def loop_fn(_):
return random_ops.truncated_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_random_gamma_invariant_alpha(self):
def loop_fn(_):
return random_ops.random_gamma([3], alpha=[0.5])
self._test_loop_fn(loop_fn, 5)
def test_random_gamma_varying_alpha(self):
alphas = math_ops.exp(random_ops.random_normal([5, 3, 2]))
def loop_fn(i):
alphas_i = array_ops.gather(alphas, i)
# Test both scalar and non-scalar params and shapes.
return (random_ops.random_gamma(alpha=alphas_i[0, 0], shape=[]),
random_ops.random_gamma(alpha=alphas_i, shape=[]),
random_ops.random_gamma(alpha=alphas_i[0, 0], shape=[3]),
random_ops.random_gamma(alpha=alphas_i, shape=[3]))
self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=[dtypes.float32] * 4)
def test_random_poisson_v2_invariant_rate(self):
def loop_fn(_):
return random_ops.random_poisson(lam=[1.3], shape=[3])
self._test_loop_fn(loop_fn, 5)
def test_random_poisson_v2_varying_rate(self):
rates = math_ops.exp(random_ops.random_normal([5, 3, 2]))
def loop_fn(i):
rates_i = array_ops.gather(rates, i)
# Test both scalar and non-scalar params and shapes.
return (random_ops.random_poisson(lam=rates_i[0, 0], shape=[]),
random_ops.random_poisson(lam=rates_i, shape=[]),
random_ops.random_poisson(lam=rates_i[0, 0], shape=[3]),
random_ops.random_poisson(lam=rates_i, shape=[3]))
self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=[dtypes.float32] * 4)
def test_random_multinomial_invariant_logits(self):
def loop_fn(_):
return random_ops.categorical(logits=[[1., -1.]], num_samples=3)
self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=[dtypes.int64])
def test_random_multinomial_varying_logits(self):
logits = random_ops.random_normal([5, 3, 2])
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
return random_ops.categorical(logits_i, num_samples=3)
self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=[dtypes.int64])
class LoggingTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_print(self):
x = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return logging_ops.Print(
x1, [x1, "x1", array_ops.shape(x1)], summarize=10)
self._test_loop_fn(loop_fn, 3)
def test_assert(self):
def loop_fn(i):
return control_flow_ops.Assert(i < 10, [i, [10], [i + 1]])
# TODO(agarwal): make this work with for_loop.
with session.Session() as sess:
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3))
class TensorArrayTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_read(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.read(i), ta.read(0)
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_gather(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.gather([i]), ta.gather([0, 1])
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_write_and_scatter(self):
t = tensor_array_ops.TensorArray(dtypes.int32, 10, clear_after_read=False)
handle = t.handle
def loop_fn(i):
ta = t.write(i + 2, 2 * i).write(i, 5)
ta = ta.scatter([4 + i], [4]).scatter([6 + i, 8 + i], [6 + i, 8 + i])
return ta.flow
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
out1 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t1[-1]).stack()
output1 = self._run_targets(out1)
t2 = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, iters=2)
out2 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t2[-1]).stack()
output2 = self._run_targets(out2)
self.assertAllClose(output2, output1)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_write(self):
def loop_fn(i):
# TODO(agarwal): switching the order of writes to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).write(0, i).write(
1, 1)
ta2 = tensor_array_ops.TensorArray(dtypes.int32, 1).write(0, 1)
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_scatter(self):
def loop_fn(i):
# TODO(agarwal): switching the order of scatter to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).scatter(
[0], [[i, 2]]).scatter([1], [[1, 2]])
ta2 = tensor_array_ops.TensorArray(dtypes.int32,
2).scatter([0], [3]).scatter([1], [4])
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_read(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.read(0), ta2.read(0), ta2.read(i)
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 3)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_gather(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.gather([0, 1]), ta2.gather([0, 1]), ta2.gather([i])
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 3)
@test_util.run_v1_only("b/122612051")
def test_grad(self):
x = random_ops.random_uniform([3, 2])
ta = tensor_array_ops.TensorArray(
dtypes.float32, 3, clear_after_read=False).unstack(x)
y = math_ops.square(ta.stack())
def loop_fn(i):
y_i = array_ops.gather(y, i)
grad = gradient_ops.gradients(y_i, x)[0]
return array_ops.gather(grad, i)
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# y = x * x. Hence dy/dx = 2 * x.
actual_grad = 2.0 * x
with session.Session() as sess:
actual_grad, computed_grad = sess.run([t1, actual_grad])
self.assertAllClose(actual_grad, computed_grad)
class StackTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_stack_inside_loop_invariant(self):
def loop_fn(_):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, 1)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_stack_inside_push_loop_dependent(self):
def loop_fn(i):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, i)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_stack_outside_pop(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op = data_flow_ops.stack_push_v2(s, 5)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 6)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 7)
def loop_fn(_):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e1]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
with ops.control_dependencies([op]):
e1, e2 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
with ops.control_dependencies([e1, e2]):
e3 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
v1, v2, v3 = self._run_targets([e1, e2, e3], run_init=False)
self.assertAllEqual([7, 7], v1)
self.assertAllEqual([6, 6], v2)
self.assertAllEqual(5, v3)
@test_util.run_v1_only("b/122612051")
def test_stack_outside_push(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
def loop_fn(_):
return data_flow_ops.stack_push_v2(s, 7)
with self.assertRaisesRegexp(ValueError, "StackPushV2 not allowed.*"):
pfor_control_flow_ops.pfor(loop_fn, iters=2)
# TODO(agarwal): test nested while_loops. This currently requires converting a
# tf.cond.
class ControlFlowTest(PForTestCase):
def test_while_outside_loop(self):
x = control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
def loop_fn(i):
return x + i
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_invariant_while(self):
def loop_fn(_):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_invariant_while_with_control_dependency(self):
def loop_fn(i):
with ops.control_dependencies([i]):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1,
[0])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_while_with_stateful_ops(self):
def loop_fn(_):
return control_flow_ops.while_loop(
lambda j, x: j < 4,
lambda j, x: (j + 1, x + random_ops.random_uniform([])), [0, 0.])[0]
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_while_unstacked_condition(self):
def loop_fn(i):
return control_flow_ops.while_loop(lambda j, x: j < 4,
lambda j, x: (j + 1, x + i), [0, 0])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_while(self):
x = random_ops.random_uniform([3, 5])
lengths = constant_op.constant([4, 0, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
lengths_i = array_ops.gather(lengths, i)
_, total = control_flow_ops.while_loop(
lambda j, _: j < lengths_i,
lambda j, t: (j + 1, t + array_ops.gather(x_i, j)), [0, 0.])
return total
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32])
@test_util.run_v1_only("b/122612051")
def test_while_jacobian(self):
x = random_ops.random_uniform([1, 3])
y = random_ops.random_uniform([3, 3])
# out = x @ y @ y @ y @ y, where @ is matmul operator.
_, out = control_flow_ops.while_loop(
lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
[0, x])
def loop_fn(i):
out_i = array_ops.gather(out, i, axis=1)
return array_ops.reshape(gradient_ops.gradients(out_i, x)[0], [-1])
out = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# The above code does not work with tf.while_loop instead of pfor. So we
# manually compute the expected output here.
# Note that gradient of output w.r.t is (y @ y @ y @ y)^T.
expected_output = y
for _ in range(3):
expected_output = math_ops.matmul(expected_output, y)
expected_output = array_ops.transpose(expected_output, [1, 0])
with session.Session() as sess:
out, expected = sess.run([out, expected_output])
self.assertAllClose(expected, out)
@test_util.run_v1_only("b/122612051")
def test_tensor_array_as_loop_variable(self):
def loop_fn(i):
def body(j, ta):
ta = ta.write(j, i + j * j)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.int32, size=4)))
return ta.stack()
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_read_tensor_array_partitioned_indices(self):
# Note that tensor array values are pfor loop dependent, and the while loop
# termination condition is also dependent on pfor iteration.
def loop_fn(i):
ta = tensor_array_ops.TensorArray(dtypes.int32, size=6)
ta = ta.unstack(i + list(range(5)))
def body(j, s):
return j + 1, s + ta.read(j)
_, s = control_flow_ops.while_loop(lambda j, _: j < i,
body,
(0, 0))
return s
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_external_while_loop_grad(self):
# Here we test that external while_loops that are extended from inside pfor
# (due to gradient calls) are not actually converted. If the below was
# converted all pfor iterations would write to the same tensor array
# indices.
x = constant_op.constant(1.)
def body(j, ta):
ta = ta.write(j, x)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.float32, size=4)))
out = ta.stack()
def loop_fn(i):
out_i = array_ops.gather(out, i)
return gradient_ops.gradients(out_i, x)[0]
with session.Session() as sess:
# out is [x, x, x]. Hence the gradients should be [1, 1, 1].
self.assertAllEqual([1, 1, 1],
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3)))
@test_util.run_v1_only("b/122612051")
def test_tensor_array_grad(self):
inp = constant_op.constant(np.random.rand(3, 4, 2), dtype=dtypes.float32)
ta = tensor_array_ops.TensorArray(dtypes.float32, size=3)
ta = ta.unstack(inp)
def loop_fn(i):
def body(j, x):
value = ta.gather([j])
value = array_ops.gather(array_ops.reshape(value, [4, 2]), i)
return j + 1, x + value
_, out = control_flow_ops.while_loop(lambda j, _: j < 3, body,
(0, array_ops.zeros([2])))
out = math_ops.reduce_prod(out)
return out, gradient_ops.gradients(out, inp)[0]
pfor_out, pfor_out_grad = pfor_control_flow_ops.pfor(loop_fn, 4)
# Note that tf.while_loop does not work in the setup above. So we manually
# construct the equivalent computation of the above loops here.
real_out = math_ops.reduce_sum(inp, axis=[0])
real_out = math_ops.reduce_prod(real_out, axis=[1])
# Note that gradients of real_out will accumulate the gradients across the
# output value. Hence we do the same aggregation on pfor_out_grad.
real_out_grad = gradient_ops.gradients(real_out, inp)[0]
sum_pfor_out_grad = math_ops.reduce_sum(pfor_out_grad, axis=[0])
with session.Session() as sess:
v1, v2, v1_grad, v2_grad = sess.run(
[pfor_out, real_out, sum_pfor_out_grad, real_out_grad])
self.assertAllClose(v1, v2)
self.assertAllClose(v1_grad, v2_grad)
def dynamic_lstm_input_fn(batch_size, state_size, max_steps):
# We make inputs and sequence_length constant so that multiple session.run
# calls produce the same result.
inputs = constant_op.constant(
np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)
sequence_length = np.random.randint(0, size=[batch_size], high=max_steps + 1)
sequence_length = constant_op.constant(sequence_length, dtype=dtypes.int32)
return inputs, sequence_length
def create_dynamic_lstm(cell_fn, batch_size, state_size, max_steps):
cell = cell_fn(state_size)
inputs, sequence_length = dynamic_lstm_input_fn(batch_size,
state_size,
max_steps)
inputs_ta = tensor_array_ops.TensorArray(
dtypes.float32, size=max_steps, element_shape=[batch_size, state_size])
inputs_time_major = array_ops.transpose(inputs, [1, 0, 2])
inputs_ta = inputs_ta.unstack(inputs_time_major)
zeros = array_ops.zeros([state_size])
def loop_fn(i):
sequence_length_i = array_ops.gather(sequence_length, i)
def body_fn(t, state, ta):
inputs_t = array_ops.expand_dims(
array_ops.gather(inputs_ta.read(t), i), 0)
output, new_state = cell(inputs_t, state)
output = array_ops.reshape(output, [-1])
# TODO(agarwal): one optimization that dynamic_rnn uses is to avoid the
# array_ops.where when t < min(sequence_length). Doing that requires
# supporting tf.cond pfor conversion.
done = t >= sequence_length_i
output = array_ops.where(done, zeros, output)
ta = ta.write(t, output)
new_state = [array_ops.where(done, s, ns) for s, ns in
zip(nest.flatten(state), nest.flatten(new_state))]
new_state = nest.pack_sequence_as(state, new_state)
return t + 1, new_state, ta
def condition_fn(t, _, unused):
del unused
return t < max_steps
initial_state = cell.zero_state(1, dtypes.float32)
_, state, ta = control_flow_ops.while_loop(condition_fn, body_fn, [
0, initial_state,
tensor_array_ops.TensorArray(dtypes.float32, max_steps)
])
new_state = [array_ops.reshape(x, [-1]) for x in nest.flatten(state)]
new_state = nest.pack_sequence_as(initial_state, new_state)
return ta.stack(), new_state
pfor_output = pfor_control_flow_ops.pfor(loop_fn, batch_size)
tf_output = rnn.dynamic_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, dtypes.float32))
return pfor_output, tf_output
class RNNTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_dynamic_rnn(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell,
3, 5, 7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
@test_util.run_v1_only("b/122612051")
def test_dynamic_lstm(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicLSTMCell,
3, 5, 7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
# TODO(agarwal): benchmark numbers on GPU for graphs based on while_loop
# conversion don't look good. Some of it seems like lot of copies between host
# and device. Optimize that.
class Benchmarks(test.Benchmark):
def _run(self, targets, iters, name=None):
def _done(t):
# Note that we don't use tf.control_dependencies since that will not make
# sure that the computation on GPU has actually finished. So we fetch the
# first element of the output, and assume that this will not be called on
# empty tensors.
return array_ops.gather(array_ops.reshape(t, [-1]), 0)
targets = [_done(x) for x in nest.flatten(targets)]
sess = session.Session()
with sess:
init = variables.global_variables_initializer()
sess.run(init)
run_fn = sess.make_callable(targets)
run_fn() # Warm up
begin = time.time()
for _ in range(iters):
run_fn()
end = time.time()
avg_time_ms = 1000 * (end - begin) / iters
self.report_benchmark(iters=iters, wall_time=avg_time_ms, name=name)
return avg_time_ms
def benchmark_sess_run_overhead(self):
with ops.Graph().as_default():
x = constant_op.constant(1.0)
self._run(x, 10000, name="session_run_overhead")
def benchmark_add(self):
with ops.Graph().as_default():
n = 256
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([n, params])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return x_i + y_i
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = x + y
self._run(manual, 1000, name="manual_add")
self._run(pfor_outputs, 1000, name="pfor_add")
self._run(while_outputs, 100, name="while_add")
def benchmark_matmul(self):
with ops.Graph().as_default():
n = 1024
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([params, params])
def loop_fn(i):
x_i = array_ops.expand_dims(array_ops.gather(x, i), 0)
return math_ops.matmul(x_i, y)
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = math_ops.matmul(x, y)
self._run(manual, 1000, name="manual_matmul")
self._run(pfor_outputs, 1000, name="pfor_matmul")
self._run(while_outputs, 100, name="while_matmul")
def benchmark_map_fn(self):
with ops.Graph().as_default():
b = 256
params = 1000
inp = random_ops.random_normal((b, params))
fn = lambda x: x * x
def pfor_map_fn(f, x):
return pfor_control_flow_ops.pfor(
lambda i: f(array_ops.gather(x, i)),
array_ops.shape(x)[0])
map_output = map_fn.map_fn(fn, inp)
pfor_output = pfor_map_fn(fn, inp)
self._run(map_output, 100, name="tf_map_fn")
self._run(pfor_output, 100, name="pfor_map_fn")
def benchmark_basic_while(self):
with ops.Graph().as_default():
def loop_fn(i):
_, s = control_flow_ops.while_loop(
lambda t, x: t < i,
lambda t, x: (t + 1, x + i),
[0, 0])
return s
iters = 50
pfor_output = pfor_control_flow_ops.pfor(loop_fn, iters)
for_loop_output = pfor_control_flow_ops.for_loop(loop_fn, dtypes.int32,
iters)
self._run(pfor_output, 100, name="pfor_basic")
self._run(for_loop_output, 100, name="for_loop_basic")
def benchmark_dynamic_rnn(self):
with ops.Graph().as_default():
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell,
128, 512, 16)
self._run(pfor_outputs, 100, name="pfor_rnn")
self._run(tf_outputs, 100, name="tf_rnn")
def benchmark_reduction(self):
n = 1024
with ops.Graph().as_default():
x = random_ops.random_uniform([n, n])
w = random_ops.random_uniform([n, n])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return math_ops.reduce_sum(
math_ops.matmul(pfor_config.reduce_concat(x_i), w))
# Note that output_reduction will be tiled, so there may be some minor
# overheads compared to output_no_reduction.
output_reduction = pfor_control_flow_ops.pfor(loop_fn, n)
output_no_reduction = math_ops.reduce_sum(math_ops.matmul(x, w))
# Benchmark to test that reduction does not add overhead and its output is
# treated as loop invariant.
self._run(output_reduction, 30, name="matmul_reduction")
self._run(output_no_reduction, 30, name="matmul_no_reduction")
class SparseTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_var_loop_len(self):
num_iters = array_ops.placeholder(dtypes.int32)
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
with self.cached_session() as sess:
sess.run(pfor, feed_dict={num_iters: 3})
@test_util.run_v1_only("b/122612051")
def test_sparse_result_none_stacked(self):
num_iters = 10
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
indices = [[i, j] for i in range(num_iters) for j in range(3)]
values = [4, 5, 6] * num_iters
dense_shapes = [num_iters, 3]
# Expected result: [[4, 5, 6], [4, 5, 6], [4, 5, 6], ...]
manual = sparse_tensor.SparseTensor(indices, values, dense_shapes)
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_all_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, i, i + 1) # [0, ..., 0, i]
# Expected result: [[0], [0, 1], [0, 0, 2], [0, 0, 0, 3], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_indices_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, [1], [num_iters])
# Expected result: identity matrix size num_iters * num_iters
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_values_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], i, [num_iters]) # [i, 0, ..., 0]
# Expected result: [[1, 0, ...], [2, 0, ...], [3, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_shapes_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], [1], i + 1) # [1, 0, ..., 0]
# Expected result: [[1, 0, 0, ...], [1, 0, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_shapes_stacked_2D(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i + 1, dtypes.int64), 0)
shape = array_ops.concat([i, i], 0)
return sparse_tensor.SparseTensor([[0, 0]], [1], shape) # [1, 0, ..., 0]
# Expected result: [[[1, 0, ...], [0, ..., 0], [0, ..., 0], ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0, 0] for i in range(num_iters)],
[1] * num_iters,
(num_iters, num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
class ParsingTest(PForTestCase):
def test_decode_csv(self):
csv_tensor = constant_op.constant([["1:2:3"], ["::"], ["7:8:9"]])
kwargs = {"record_defaults": [[10], [20], [30]], "field_delim": ":"}
def loop_fn(i):
line = array_ops.gather(csv_tensor, i)
return parsing_ops.decode_csv(line, **kwargs)
self._test_loop_fn(loop_fn, iters=3, loop_fn_dtypes=[dtypes.int32] * 3)
@test_util.run_v1_only("b/122612051")
def test_parse_single_example(self):
def _int64_feature(*values):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))
def _bytes_feature(*values):
return feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[v.encode("utf-8") for v in values]))
examples = constant_op.constant([
example_pb2.Example(
features=feature_pb2.Features(
feature={
"dense_int": _int64_feature(i),
"dense_str": _bytes_feature(str(i)),
"sparse_int": _int64_feature(i, i * 2, i * 4, i * 8),
"sparse_str": _bytes_feature(*["abc"] * i)
})).SerializeToString() for i in range(10)
])
features = {
"dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
"dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
"sparse_int": parsing_ops.VarLenFeature(dtypes.int64),
"sparse_str": parsing_ops.VarLenFeature(dtypes.string),
}
def loop_fn(i):
example_proto = array_ops.gather(examples, i)
f = parsing_ops.parse_single_example(example_proto, features)
return f
pfor = pfor_control_flow_ops.pfor(loop_fn, iters=10)
manual = parsing_ops.parse_example(examples, features)
self.run_and_assert_equal(pfor, manual)
class PartitionedCallTest(PForTestCase):
def test_simple(self):
@def_function.function
def f(x):
return math_ops.square(x) + 1
z = random_ops.random_uniform([4])
def loop_fn(i):
return f(array_ops.gather(z, i))
self._test_loop_fn(loop_fn, 4)
def test_nested_calls(self):
@def_function.function
def inner(x):
return math_ops.square(x)
@def_function.function
def outer(y):
return math_ops.reduce_sum(inner(y)) + 2
z = random_ops.random_uniform([4, 2])
def loop_fn(i):
return outer(array_ops.gather(z, i))
self._test_loop_fn(loop_fn, 4)
def test_nested_definition(self):
@def_function.function
def outer(y):
@def_function.function
def inner(x):
return math_ops.square(x) + 1
return math_ops.reduce_sum(inner(y)) + 2
z = random_ops.random_uniform([4, 2])
def loop_fn(i):
return outer(array_ops.gather(z, i))
self._test_loop_fn(loop_fn, 4)
def test_gradients(self):
@def_function.function
def f(x):
return math_ops.square(x) + 1
z = random_ops.random_uniform([4, 2])
def loop_fn(i):
z_i = array_ops.gather(z, i)
with backprop.GradientTape() as g:
g.watch(z_i)
out = f(z_i)
return out, g.gradient(out, z_i)
self._test_loop_fn(loop_fn, 4, [dtypes.float32] * 2)
def test_stateful_with_gradients(self):
z = random_ops.random_uniform([4, 2])
v = variables.Variable(z[0])
@def_function.function
def f(x):
return math_ops.square(x) + v + 1
def loop_fn(i):
z_i = array_ops.gather(z, i)
with backprop.GradientTape() as g:
g.watch(z_i)
out = f(z_i)
return out, g.gradient(out, z_i)
self._test_loop_fn(loop_fn, 4, [dtypes.float32] * 2)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/parallel_for/control_flow_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for pfor, for_loop, jacobian."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.parallel_for import * # pylint: disable=wildcard-import
from tensorflow.python.ops.parallel_for.control_flow_ops import for_loop
from tensorflow.python.ops.parallel_for.control_flow_ops import pfor
from tensorflow.python.ops.parallel_for.gradients import batch_jacobian
from tensorflow.python.ops.parallel_for.gradients import jacobian
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/parallel_for/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""for_loop and pfor ops."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.parallel_for.pfor import PFor
from tensorflow.python.ops.parallel_for.pfor import PForConfig
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
def for_loop(loop_fn, loop_fn_dtypes, iters, parallel_iterations=None):
"""Runs `loop_fn` `iters` times and stacks the outputs.
Runs `loop_fn` `iters` times, with input values from 0 to `iters - 1`, and
stacks corresponding outputs of the different runs.
Args:
loop_fn: A function that takes an int32 scalar tf.Tensor object representing
the iteration number, and returns a possibly nested structure of tensor
objects. The shape of these outputs should not depend on the input.
loop_fn_dtypes: dtypes for the outputs of loop_fn.
iters: Number of iterations for which to run loop_fn.
parallel_iterations: The number of iterations that can be dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
Returns a nested structure of stacked output tensor objects with the same
nested structure as the output of `loop_fn`.
"""
flat_loop_fn_dtypes = nest.flatten(loop_fn_dtypes)
is_none_list = []
def while_body(i, *ta_list):
"""Body of while loop."""
fn_output = nest.flatten(loop_fn(i))
if len(fn_output) != len(flat_loop_fn_dtypes):
raise ValueError(
"Number of expected outputs, %d, does not match the number of "
"actual outputs, %d, from loop_fn" % (len(flat_loop_fn_dtypes),
len(fn_output)))
outputs = []
del is_none_list[:]
is_none_list.extend([x is None for x in fn_output])
for out, ta in zip(fn_output, ta_list):
# TODO(agarwal): support returning Operation objects from loop_fn.
if out is not None:
# out may be a ref tensor, wrap it in identity to get a non-ref tensor.
ta = ta.write(i, array_ops.expand_dims(out, 0))
outputs.append(ta)
return tuple([i + 1] + outputs)
if parallel_iterations is not None:
extra_args = {"parallel_iterations": parallel_iterations}
else:
extra_args = {}
ta_list = control_flow_ops.while_loop(
lambda i, *ta: i < iters,
while_body,
[0] + [tensor_array_ops.TensorArray(dtype.base_dtype, iters)
for dtype in flat_loop_fn_dtypes],
**extra_args)[1:]
# TODO(rachelim): enable this for sparse tensors
output = [None if is_none else ta.concat()
for ta, is_none in zip(ta_list, is_none_list)]
assert len(output) in (0, len(flat_loop_fn_dtypes))
if not output:
# This may happen for the case where iters == 0.
return None
else:
return nest.pack_sequence_as(loop_fn_dtypes, output)
def _flatten_first_two_dims(x):
"""Flattens the first two dimensions of x into a single dimension."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([[old_shape[0] * old_shape[1]], old_shape[2:]],
axis=0)
return array_ops.reshape(x, new_shape)
PFOR_CONFIG_ARG = "pfor_config"
def _is_under_xla_context():
"""Check if we are currently inside an XLA compile context."""
g = ops.get_default_graph()
while g is not None:
control_flow_context = g._get_control_flow_context() # pylint: disable=protected-access
while control_flow_context is not None:
if control_flow_context.IsXLAContext():
return True
else:
control_flow_context = control_flow_context.outer_context
# If g is a FuncGraph, get its outer_graph.
g = getattr(g, "outer_graph", None)
return False
def pfor(loop_fn, iters, parallel_iterations=None):
"""Equivalent to running `loop_fn` `iters` times and stacking the outputs.
`pfor` has functionality similar to `for_loop`, i.e. running `loop_fn` `iters`
times, with input from 0 to `iters - 1`, and stacking corresponding output of
each iteration. However the implementation does not use a tf.while_loop.
Instead it adds new operations to the graph that collectively compute the same
value as what running `loop_fn` in a loop would compute.
This is an experimental feature and currently has a lot of limitations:
- There should be no data dependency between the different iterations. For
example, a future iteration should not depend on a value or side-effect of
a previous iteration.
- Stateful kernels may mostly not be supported since these often imply a
data dependency or ordering of the iterations. We do support a limited set
of such stateful kernels though (like RandomFoo, Variable operations like
reads, etc).
- Conversion works only on a limited set of kernels for which a converter
has been registered.
- loop_fn has limited support for control flow operations. tf.cond in
particular is not supported.
- `loop_fn` should return nested structure of Tensors or Operations. However
if an Operation is returned, it should have zero outputs.
- The shape and dtype of `loop_fn` outputs should not depend on the input
to loop_fn.
Args:
loop_fn: A function that takes an int32 scalar tf.Tensor object representing
the iteration number, and optionally a keyword argument `pfor_config` set
to a PForConfig object. It returns a possibly nested structure of Tensor
or Operation objects. Note that if setting `parallel_iterations` argument
to something other than None, `loop_fn` may be called more than once
during graph construction. So it may need to avoid mutating global state.
iters: Number of iterations for which to run loop_fn.
parallel_iterations: A knob to control how many iterations are vectorized
and dispatched in parallel. The default value of None corresponds to
vectorizing all the iterations. If `parallel_iterations` is smaller than
`iters`, then chunks of at most that many iterations are dispatched in
sequence. This knob can be used to control the total memory usage.
Returns:
Returns a nested structure of stacked tensor objects with the same nested
structure as the output of `loop_fn`.
Raises:
ValueError: If parallel_iterations is not None and not an integer > 1.
"""
def f():
return _pfor_impl(loop_fn, iters, parallel_iterations=parallel_iterations)
# Note that we wrap into a tf.function if in eager execution mode or under
# XLA compilation. The latter is so that we don't compile operations like
# tf.placeholder that are created by the loop body.
if context.executing_eagerly() or _is_under_xla_context():
f = function.defun(f)
return f()
def _loop_fn_has_config(loop_fn):
"""Test if `loop_fn` has a `pfor_config` argument."""
if tf_inspect.isfunction(loop_fn):
argspec = tf_inspect.getargspec(loop_fn)
return PFOR_CONFIG_ARG in argspec.args
elif isinstance(loop_fn, functools.partial):
fn = loop_fn.func
argspec = tf_inspect.getargspec(fn)
return (PFOR_CONFIG_ARG in argspec.args and
PFOR_CONFIG_ARG not in loop_fn.keywords)
else:
loop_class = tf_decorator.unwrap(loop_fn)[1]
if not hasattr(loop_class, "__call__"):
raise ValueError("loop_fn object did not have a __call__ method")
argspec = tf_inspect.getargspec(loop_class.__call__)
return PFOR_CONFIG_ARG in argspec.args
def _pfor_impl(loop_fn, iters, parallel_iterations=None, pfor_config=None):
"""Implementation of pfor."""
loop_fn_has_config = _loop_fn_has_config(loop_fn)
existing_ops = set(ops.get_default_graph().get_operations())
# Run the loop body
with ops.name_scope("loop_body"):
loop_var = array_ops.placeholder(dtypes.int32, shape=[])
if loop_fn_has_config:
if pfor_config is None:
pfor_config = PForConfig()
pfor_config._set_iters(iters) # pylint: disable=protected-access
loop_fn_outputs = loop_fn(loop_var, **{PFOR_CONFIG_ARG: pfor_config})
else:
assert pfor_config is None
loop_fn_outputs = loop_fn(loop_var)
# Convert outputs to Tensor if needed.
tmp_loop_fn_outputs = []
for loop_fn_output in nest.flatten(loop_fn_outputs):
if (loop_fn_output is not None and not isinstance(
loop_fn_output,
(ops.Operation, ops.Tensor, sparse_tensor.SparseTensor))):
if isinstance(loop_fn_output, indexed_slices.IndexedSlices):
logging.warn("Converting %s to a dense representation may make it slow."
" Alternatively, output the indices and values of the"
" IndexedSlices separately, and handle the vectorized"
" outputs directly." % loop_fn_output)
loop_fn_output = ops.convert_to_tensor(loop_fn_output)
tmp_loop_fn_outputs.append(loop_fn_output)
loop_fn_outputs = nest.pack_sequence_as(loop_fn_outputs, tmp_loop_fn_outputs)
new_ops = set(ops.get_default_graph().get_operations()) - existing_ops
iters = ops.convert_to_tensor(iters)
if parallel_iterations is not None:
if parallel_iterations < 1:
raise ValueError("parallel_iterations must be None or a positive integer")
if parallel_iterations == 1:
raise ValueError("Found parallel_iterations == 1. Use for_loop instead.")
iters_value = tensor_util.constant_value(iters)
if iters_value is not None and iters_value < parallel_iterations:
parallel_iterations = None
if parallel_iterations is None:
with ops.name_scope("pfor"):
converter = PFor(loop_var, iters, new_ops, pfor_config=pfor_config)
outputs = []
for loop_fn_output in nest.flatten(loop_fn_outputs):
outputs.append(converter.convert(loop_fn_output))
return nest.pack_sequence_as(loop_fn_outputs, outputs)
else:
if pfor_config is not None and pfor_config._has_reductions(): # pylint: disable=protected-access
raise ValueError("Setting parallel_iterations currently unsupported if"
" reductions across iterations are performed.")
num_tiled_iterations = iters // parallel_iterations
num_remaining_iterations = iters % parallel_iterations
# TODO(agarwal): Avoid calling loop_fn twice. Generate the loop body inside
# a tf.function and extract the graph from there to vectorize it.
with ops.name_scope("pfor_untiled"):
converter = PFor(loop_var, num_remaining_iterations, new_ops,
pfor_config=pfor_config)
remaining_outputs = []
flattened_loop_fn_outputs = nest.flatten(loop_fn_outputs)
for loop_fn_output in flattened_loop_fn_outputs:
remaining_outputs.append(converter.convert(loop_fn_output))
with ops.name_scope("pfor_tiled"):
loop_fn_dtypes = [ops.convert_to_tensor(x).dtype
for x in flattened_loop_fn_outputs]
def tiled_loop_body(j):
offset = j * parallel_iterations + num_remaining_iterations
def tiled_loop_fn(i, pfor_config=None):
if loop_fn_has_config:
return nest.flatten(loop_fn(i + offset, pfor_config=pfor_config))
else:
return nest.flatten(loop_fn(i + offset))
return _pfor_impl(
tiled_loop_fn, parallel_iterations, pfor_config=pfor_config)
tiled_outputs = for_loop(tiled_loop_body, loop_fn_dtypes,
num_tiled_iterations, parallel_iterations=1)
tiled_outputs = [_flatten_first_two_dims(y) for y in tiled_outputs]
with ops.name_scope("pfor"):
iters_value = tensor_util.constant_value(iters)
if iters_value is None or iters_value % parallel_iterations:
outputs = control_flow_ops.cond(
math_ops.equal(num_remaining_iterations, 0),
lambda: tiled_outputs,
lambda: [array_ops.concat([x, y], axis=0)
for x, y in zip(remaining_outputs, tiled_outputs)])
else:
outputs = tiled_outputs
return nest.pack_sequence_as(loop_fn_outputs, nest.flatten(outputs))
@tf_export("vectorized_map")
def vectorized_map(fn, elems):
"""Parallel map on the list of tensors unpacked from `elems` on dimension 0.
This method works similar to tf.map_fn but is optimized to run much faster,
possibly with a much larger memory footprint. The speedups are obtained by
vectorization (see https://arxiv.org/pdf/1903.04243.pdf). The idea behind
vectorization is to semantically launch all the invocations of `fn` in
parallel and fuse corresponding operations across all these invocations. This
fusion is done statically at graph generation time and the generated code is
often similar in performance to a manually fused version.
Because `tf.vectorized_map` fully parallelizes the batch, this method will
generally be significantly faster than using `tf.map_fn`, especially in eager
mode. However this is an experimental feature and currently has a lot of
limitations:
- There should be no data dependency between the different semantic
invocations of `fn`, i.e. it should be safe to map the elements of the
inputs in any order.
- Stateful kernels may mostly not be supported since these often imply a
data dependency. We do support a limited set of such stateful kernels
though (like RandomFoo, Variable operations like reads, etc).
- `fn` has limited support for control flow operations. `tf.cond` in
particular is not supported.
- `fn` should return nested structure of Tensors or Operations. However
if an Operation is returned, it should have zero outputs.
- The shape and dtype of any intermediate or output tensors in the
computation of `fn` should not depend on the input to `fn`.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same (possibly nested) structure as `elems`, and returns a possibly
nested structure of Tensors and Operations, which may be different than
the structure of `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be mapped over by `fn`.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying fn to tensors unpacked from elems along the first
dimension, from first to last.
Examples:
```python
def outer_product(a):
return tf.tensordot(a, a, 0)
batch_size = 100
a = tf.ones((batch_size, 32, 32))
c = tf.vectorized_map(outer_product, a)
assert c.shape == (batch_size, 32, 32, 32, 32)
```
```python
# Computing per-example gradients
batch_size = 10
num_features = 32
layer = tf.keras.layers.Dense(1)
def model_fn(arg):
with tf.GradientTape() as g:
inp, label = arg
inp = tf.expand_dims(inp, 0)
label = tf.expand_dims(label, 0)
prediction = layer(inp)
loss = tf.nn.l2_loss(label - prediction)
return g.gradient(loss, (layer.kernel, layer.bias))
inputs = tf.random_uniform([batch_size, num_features])
labels = tf.random_uniform([batch_size, 1])
per_example_gradients = tf.vectorized_map(model_fn, (inputs, labels))
assert per_example_gradients[0].shape == (batch_size, num_features, 1)
assert per_example_gradients[1].shape == (batch_size, 1)
```
"""
def loop_fn(i):
gathered_elems = nest.map_structure(lambda x: array_ops.gather(x, i), elems)
return fn(gathered_elems)
batch_size = array_ops.shape(nest.flatten(elems)[0])[0]
return pfor(loop_fn, batch_size)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/parallel_for/control_flow_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""XLA tests for pfor."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compiler.xla import xla
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class PForTest(PForTestCase):
def test_xla(self):
def compute(x):
return math_ops.reduce_mean(x, axis=0, keepdims=True)
def vectorized_compute(x):
return pfor_control_flow_ops.vectorized_map(compute, x)
result = xla.compile(
vectorized_compute, inputs=[array_ops.ones((10, 5, 3))])
self.run_and_assert_equal(result, array_ops.ones((10, 1, 3)))
def test_xla_while_loop(self):
def compute(x):
return math_ops.reduce_mean(x, axis=0, keepdims=True)
def vectorized_compute(x, i):
inp = array_ops.gather(x, i)
output = pfor_control_flow_ops.vectorized_map(compute, inp)
output.set_shape([5, 1])
return output
def while_compute(x):
return control_flow_ops.while_loop_v2(
lambda i, _: i < 10,
lambda i, y: (i + 1, y + vectorized_compute(x, i)),
(0, array_ops.zeros([5, 1])))[1]
result = xla.compile(while_compute, inputs=[array_ops.ones((10, 5, 3))])
expected = array_ops.ones([5, 1]) * 10
self.run_and_assert_equal(expected, result)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/parallel_for/xla_control_flow_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import variables
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class PForTestCase(test.TestCase):
"""Base class for test cases."""
def _run_targets(self, targets1, targets2=None, run_init=True):
targets1 = nest.flatten(targets1)
targets2 = ([] if targets2 is None else nest.flatten(targets2))
assert len(targets1) == len(targets2) or not targets2
if run_init:
init = variables.global_variables_initializer()
self.evaluate(init)
return self.evaluate(targets1 + targets2)
def run_and_assert_equal(self, targets1, targets2):
outputs = self._run_targets(targets1, targets2)
outputs = nest.flatten(outputs) # flatten SparseTensorValues
n = len(outputs) // 2
for i in range(n):
if outputs[i + n].dtype != np.object:
self.assertAllClose(outputs[i + n], outputs[i], rtol=1e-4, atol=1e-5)
else:
self.assertAllEqual(outputs[i + n], outputs[i])
def _test_loop_fn(self, loop_fn, iters,
loop_fn_dtypes=dtypes.float32,
parallel_iterations=None):
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=iters,
parallel_iterations=parallel_iterations)
t2 = pfor_control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, iters=iters,
parallel_iterations=parallel_iterations)
self.run_and_assert_equal(t1, t2)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/parallel_for/test_util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for jacobian and batch_jacobian ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training as keras_training
from tensorflow.python.layers import layers as tf_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops as tf_control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradient_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.ops.parallel_for import control_flow_ops
from tensorflow.python.ops.parallel_for import gradients
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class FullyConnectedModel(object):
def __init__(self, activation_size, num_layers):
self._layers = [
tf_layers.Dense(activation_size, activation=nn.relu)
for _ in range(num_layers)
]
def __call__(self, inp):
activation = inp
for layer in self._layers:
activation = layer(activation)
return activation
def fully_connected_model_fn(batch_size, activation_size, num_layers):
model = FullyConnectedModel(activation_size, num_layers)
inp = random_ops.random_normal([batch_size, activation_size])
return inp, model(inp)
def lstm_model_fn(batch_size, state_size, steps, inputs_size=None):
inputs_size = inputs_size or state_size
inputs = [
random_ops.random_normal([batch_size, inputs_size]) for _ in range(steps)
]
cell = rnn_cell.BasicLSTMCell(state_size)
init_state = cell.zero_state(batch_size, dtypes.float32)
state = init_state
for inp in inputs:
_, state = cell(inp, state)
return init_state.c, state.c
def dynamic_lstm_model_fn(batch_size, state_size, max_steps):
# We make inputs and sequence_length constant so that multiple session.run
# calls produce the same result.
inputs = constant_op.constant(
np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)
sequence_length = constant_op.constant(
np.random.randint(0, size=[batch_size], high=max_steps + 1),
dtype=dtypes.int32)
cell = rnn_cell.BasicLSTMCell(state_size)
initial_state = cell.zero_state(batch_size, dtypes.float32)
return inputs, rnn.dynamic_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=initial_state)
def create_fc_batch_jacobian(batch_size, activation_size, num_layers):
inp, output = fully_connected_model_fn(batch_size, activation_size,
num_layers)
pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True)
while_jacobian = gradients.batch_jacobian(output, inp, use_pfor=False)
return pfor_jacobian, while_jacobian
def create_lstm_batch_jacobian(batch_size, state_size, steps, inputs_size=None):
inp, output = lstm_model_fn(batch_size, state_size, steps,
inputs_size=inputs_size)
pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True)
while_jacobian = gradients.batch_jacobian(output, inp, use_pfor=False)
return pfor_jacobian, while_jacobian
def create_dynamic_lstm_batch_jacobian(batch_size, state_size, max_steps):
inp, (_, final_state) = dynamic_lstm_model_fn(batch_size, state_size,
max_steps)
pfor_jacobian = gradients.batch_jacobian(final_state.c, inp, use_pfor=True)
# Note that use_pfor=False does not work above given the current limitations
# on implementation of while_loop. So we statically unroll the looping in the
# jacobian computation.
while_gradients = [
gradient_ops.gradients(array_ops.gather(final_state.c, i, axis=1), inp)[0]
for i in range(state_size)
]
return pfor_jacobian, while_gradients
def create_lstm_batch_hessian(batch_size, state_size, steps):
inp, output = lstm_model_fn(batch_size, state_size, steps)
pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True)
pfor_jacobian = array_ops.reshape(pfor_jacobian, [batch_size, -1])
pfor_hessian = gradients.batch_jacobian(pfor_jacobian, inp, use_pfor=True)
# TODO(agarwal): using two nested while_loop doesn't seem to work here.
# Hence we use pfor_jacobian for computing while_hessian.
while_jacobian = pfor_jacobian
while_hessian = gradients.batch_jacobian(while_jacobian, inp, use_pfor=False)
return pfor_hessian, while_hessian
def create_lstm_hessian(batch_size, state_size, steps):
_, output = lstm_model_fn(batch_size, state_size, steps)
weights = variables.trainable_variables()
pfor_jacobians = gradients.jacobian(output, weights, use_pfor=True)
pfor_hessians = [
gradients.jacobian(x, weights, use_pfor=True) for x in pfor_jacobians
]
# TODO(agarwal): using two nested while_loop doesn't seem to work here.
# Hence we use pfor_jacobians for computing while_hessians.
while_jacobians = pfor_jacobians
while_hessians = [
gradients.jacobian(x, weights, use_pfor=False) for x in while_jacobians
]
return pfor_hessians, while_hessians
def create_fc_per_eg_grad(batch_size, activation_size, num_layers):
inp = random_ops.random_normal([batch_size, activation_size])
layers = [
tf_layers.Dense(activation_size, activation=nn.relu)
for _ in range(num_layers)
]
projection = tf_layers.Dense(1)
def model_fn(activation):
for layer in layers:
activation = layer(activation)
activation = projection(activation)
activation = nn.l2_loss(activation)
return gradient_ops.gradients(activation, variables.trainable_variables())
def loop_fn(i):
return model_fn(array_ops.expand_dims(array_ops.gather(inp, i), 0))
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
loop_fn_dtypes = [x.dtype for x in variables.trainable_variables()]
while_outputs = control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, batch_size)
return pfor_outputs, while_outputs
def create_lstm_per_eg_grad(batch_size, state_size, steps, inputs_size=None):
inputs_size = inputs_size or state_size
inputs = [
random_ops.random_normal([batch_size, inputs_size]) for _ in range(steps)
]
cell = rnn_cell.BasicLSTMCell(state_size)
init_state = cell.zero_state(batch_size, dtypes.float32)
def model_fn(inps, init_state):
state = init_state
for inp in inps:
_, state = cell(inp, state)
output = nn.l2_loss(state.c)
return gradient_ops.gradients(output, variables.trainable_variables())
def loop_fn(i):
loop_inputs = [
array_ops.expand_dims(array_ops.gather(x, i), 0) for x in inputs
]
loop_init_state = rnn_cell.LSTMStateTuple(
*[array_ops.expand_dims(array_ops.gather(x, i), 0) for x in init_state])
return model_fn(loop_inputs, loop_init_state)
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
loop_fn_dtypes = [x.dtype for x in variables.trainable_variables()]
while_outputs = control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, batch_size)
return pfor_outputs, while_outputs
# Importing the code from tensorflow_models seems to cause errors. Hence we
# duplicate the model definition here.
# TODO(agarwal): Use the version in tensorflow_models/official instead.
class Mnist(keras_training.Model):
def __init__(self, data_format):
"""Creates a model for classifying a hand-written digit.
Args:
data_format: Either 'channels_first' or 'channels_last'.
"""
super(Mnist, self).__init__()
if data_format == "channels_first":
self._input_shape = [-1, 1, 28, 28]
else:
assert data_format == "channels_last"
self._input_shape = [-1, 28, 28, 1]
self.conv1 = tf_layers.Conv2D(
32, 5, padding="same", data_format=data_format, activation=nn.relu)
self.conv2 = tf_layers.Conv2D(
64, 5, padding="same", data_format=data_format, activation=nn.relu)
self.fc1 = tf_layers.Dense(1024, activation=nn.relu)
self.fc2 = tf_layers.Dense(10)
self.dropout = tf_layers.Dropout(0.4)
self.max_pool2d = tf_layers.MaxPooling2D(
(2, 2), (2, 2), padding="same", data_format=data_format)
def __call__(self, inputs, training):
"""Add operations to classify a batch of input images.
Args:
inputs: A Tensor representing a batch of input images.
training: A boolean. Set to True to add operations required only when
training the classifier.
Returns:
A logits Tensor with shape [<batch_size>, 10].
"""
y = array_ops.reshape(inputs, self._input_shape)
y = self.conv1(y)
y = self.max_pool2d(y)
y = self.conv2(y)
y = self.max_pool2d(y)
y = tf_layers.flatten(y)
y = self.fc1(y)
y = self.dropout(y, training=training)
return self.fc2(y)
def create_mnist_autobatch(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
model = Mnist(data_format)
manual = model(images, training=training)
def loop_fn(i):
image = array_ops.gather(images, i)
return model(image, training=training)
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
while_outputs = control_flow_ops.for_loop(
loop_fn, dtypes.float32, batch_size)
return pfor_outputs, while_outputs, manual
def create_mnist_per_eg_grad(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
sparse_labels = np.random.randint(
low=0, high=10, size=[batch_size]).astype(np.int32)
labels = np.zeros((batch_size, 10)).astype(np.float32)
labels[np.arange(batch_size), sparse_labels] = 1.
model = Mnist(data_format)
def loop_fn(i):
image = array_ops.gather(images, i)
label = array_ops.gather(labels, i)
logits = array_ops.reshape(model(image, training=training), [-1])
loss = losses.softmax_cross_entropy(
logits=logits, onehot_labels=label, reduction=losses.Reduction.NONE)
return gradient_ops.gradients(loss, variables.trainable_variables())
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
while_outputs = control_flow_ops.for_loop(
loop_fn, [dtypes.float32] * len(variables.trainable_variables()),
batch_size)
return pfor_outputs, while_outputs
def create_mnist_batch_jacobian(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
model = Mnist(data_format)
logits = model(images, training=training)
pfor_jacobian = gradients.batch_jacobian(logits, images, use_pfor=True)
while_jacobian = gradients.batch_jacobian(logits, images, use_pfor=False)
return pfor_jacobian, while_jacobian
def create_mnist_per_eg_jacobian(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
model = Mnist(data_format)
def loop_fn(i, use_pfor):
image = array_ops.gather(images, i)
logits = array_ops.reshape(model(image, training=training), [-1])
return gradients.jacobian(
logits, variables.trainable_variables(), use_pfor=use_pfor)
pfor_outputs = control_flow_ops.pfor(
functools.partial(loop_fn, use_pfor=True),
batch_size)
while_outputs = control_flow_ops.for_loop(
functools.partial(loop_fn, use_pfor=False),
[dtypes.float32] * len(variables.trainable_variables()), batch_size)
return pfor_outputs, while_outputs
def create_fc_per_eg_jacobians(batch_size, activation_size, num_layers):
model = FullyConnectedModel(activation_size=activation_size,
num_layers=num_layers)
inp = random_ops.random_normal([batch_size, activation_size])
output = model(inp)
jacobians = gradients.jacobian(output, variables.trainable_variables())
def loop_fn(i, use_pfor):
inp_i = array_ops.expand_dims(array_ops.gather(inp, i), 0)
output = array_ops.reshape(model(inp_i), [-1])
return gradients.jacobian(
output, variables.trainable_variables(), use_pfor=use_pfor)
per_eg_jacobians_pfor = control_flow_ops.pfor(
functools.partial(loop_fn, use_pfor=True),
batch_size)
per_eg_jacobians_while = control_flow_ops.for_loop(
functools.partial(loop_fn, use_pfor=False),
[dtypes.float32] * len(variables.trainable_variables()), batch_size)
return jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while
@test_util.run_v1_only("b/122612051")
class GradientsTest(test.TestCase):
def run_and_assert_equal(self, targets1, targets2, atol=1e-4, rtol=1e-4):
targets1 = nest.flatten(targets1)
targets2 = nest.flatten(targets2)
assert len(targets1) == len(targets2)
init = variables.global_variables_initializer()
self.evaluate(init)
outputs = self.evaluate(targets1 + targets2)
n = len(outputs) // 2
for i in range(n):
self.assertAllClose(outputs[i], outputs[i + n], rtol=rtol, atol=atol)
def test_no_path(self):
for grad_func in [gradients.jacobian, gradients.batch_jacobian]:
for use_pfor in [True, False]:
x = constant_op.constant([[1.0]])
y = constant_op.constant([[2.0]])
self.assertIsNone(grad_func(y, x, use_pfor=use_pfor))
def test_jacobian_fixed_shape(self):
x = random_ops.random_uniform([2, 2])
y = math_ops.matmul(x, x, transpose_a=True)
jacobian_pfor = gradients.jacobian(y, x, use_pfor=True)
jacobian_while = gradients.jacobian(y, x, use_pfor=False)
answer = ops.convert_to_tensor([[
gradient_ops.gradients(y[0][0], x)[0],
gradient_ops.gradients(y[0][1], x)[0]
], [
gradient_ops.gradients(y[1][0], x)[0],
gradient_ops.gradients(y[1][1], x)[0]
]])
self.run_and_assert_equal(answer, jacobian_pfor)
self.run_and_assert_equal(answer, jacobian_while)
def test_jacobian_scan_shape(self):
# Shape x: [3, 4]
x = random_ops.random_uniform([3, 4])
elems = random_ops.random_uniform([6])
# Shape y: [6, 3, 4]
y = functional_ops.scan(lambda a, e: a + e, elems, initializer=x)
jacobian = gradients.jacobian(y, x)
expected_shape = [6, 3, 4, 3, 4]
self.assertAllEqual(expected_shape, jacobian.shape.as_list())
def test_jacobian_while_loop_shape(self):
# Shape x: [3, 4]
x = random_ops.random_uniform([3, 4])
_, y = tf_control_flow_ops.while_loop(lambda i, a: i > 5.,
lambda i, a: (i + 1, a + i),
(constant_op.constant(0.), x))
# Shape y: [2, 3]
y = y[:2, :3]
jacobian = gradients.jacobian(y, x)
expected_shape = [2, 3, 3, 4]
self.assertAllEqual(expected_shape, jacobian.shape.as_list())
def test_jacobian_unknown_shape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32, shape=[None, None])
y = math_ops.matmul(x, x, transpose_a=True)
jacobian_pfor = gradients.jacobian(y, x, use_pfor=True)
jacobian_while = gradients.jacobian(y, x, use_pfor=False)
answer = ops.convert_to_tensor([[
gradient_ops.gradients(y[0][0], x)[0],
gradient_ops.gradients(y[0][1], x)[0]
], [
gradient_ops.gradients(y[1][0], x)[0],
gradient_ops.gradients(y[1][1], x)[0]
]])
ans, pfor_value, while_value = sess.run(
[answer, jacobian_pfor, jacobian_while],
feed_dict={x: [[1, 2], [3, 4]]})
self.assertAllClose(ans, pfor_value)
self.assertAllClose(ans, while_value)
def test_jacobian_parallel_iterations(self):
x = constant_op.constant([[1., 2], [3, 4]])
y = math_ops.matmul(x, x)
self.assertAllClose(gradients.jacobian(y, x, parallel_iterations=2),
gradients.jacobian(y, x, parallel_iterations=3))
def test_batch_jacobian_bad_shapes(self):
x = random_ops.random_uniform([2, 2])
y = random_ops.random_uniform([3, 2])
with self.assertRaisesRegexp(ValueError, "Need first dimension of output"):
gradients.batch_jacobian(y, x, use_pfor=True)
#def test_batch_jacobian_bad_unknown_shapes(self):
# with self.cached_session() as sess:
# x = array_ops.placeholder(dtypes.float32)
# y = array_ops.concat([x, x], axis=0)
# jacobian = gradients.batch_jacobian(y, x)
# with self.assertRaisesRegexp(errors.InvalidArgumentError,
# "assertion failed"):
# sess.run(jacobian, feed_dict={x: [[1, 2], [3, 4]]})
def test_batch_jacobian_fixed_shape(self):
x = random_ops.random_uniform([2, 3, 5])
y = x * x
batch_jacobian_pfor = gradients.batch_jacobian(y, x, use_pfor=True)
batch_jacobian_while = gradients.batch_jacobian(y, x, use_pfor=False)
two_x = 2 * x
answer = array_ops.stack(
[array_ops.diag(two_x[0]),
array_ops.diag(two_x[1])])
self.run_and_assert_equal(answer, batch_jacobian_pfor)
self.run_and_assert_equal(answer, batch_jacobian_while)
def test_batch_jacobian_unknown_shape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = x * x
batch_jacobian_pfor = gradients.batch_jacobian(y, x, use_pfor=True)
batch_jacobian_while = gradients.batch_jacobian(y, x, use_pfor=False)
two_x = 2 * x
answer = array_ops.stack(
[array_ops.diag(two_x[0]),
array_ops.diag(two_x[1])])
ans, pfor_value, while_value = sess.run(
[answer, batch_jacobian_pfor, batch_jacobian_while],
feed_dict={x: [[1, 2], [3, 4]]})
self.assertAllClose(ans, pfor_value)
self.assertAllClose(ans, while_value)
def test_batch_jacobian_parallel_iterations(self):
x = constant_op.constant([[1., 2], [3, 4]])
w = constant_op.constant([[1., 2, 3, 4], [5, 6, 7, 8]])
y = math_ops.matmul(x, w)
self.assertAllClose(gradients.batch_jacobian(y, x, parallel_iterations=2),
gradients.batch_jacobian(y, x, parallel_iterations=3))
def test_fc_batch_jacobian(self):
pfor_jacobian, while_jacobian = create_fc_batch_jacobian(8, 4, 2)
self.run_and_assert_equal(pfor_jacobian, while_jacobian)
def test_lstm_batch_jacobian(self):
pfor_jacobian, while_jacobian = create_lstm_batch_jacobian(8, 4, 2,
inputs_size=128)
self.run_and_assert_equal(pfor_jacobian, while_jacobian)
@test_util.disable_xla("This test never passed for XLA")
def DISABLED_test_dynamic_lstm_batch_jacobian(self):
pfor_jacobian, while_gradients = create_dynamic_lstm_batch_jacobian(8, 4, 3)
with session.Session() as sess:
init = variables.global_variables_initializer()
self.evaluate(init)
pfor = self.evaluate(pfor_jacobian)
for i in range(4):
while_i = sess.run(while_gradients[i])
self.assertAllClose(while_i, pfor[:, i, ...])
def test_lstm_hessian(self):
pfor_hessian, while_hessian = create_lstm_hessian(2, 2, 2)
self.run_and_assert_equal(pfor_hessian, while_hessian)
def test_lstm_batch_hessian(self):
pfor_hessian, while_hessian = create_lstm_batch_hessian(2, 2, 2)
self.run_and_assert_equal(pfor_hessian, while_hessian)
def test_fc_per_eg_grad(self):
pfor_outputs, while_outputs = create_fc_per_eg_grad(8, 4, 2)
self.run_and_assert_equal(pfor_outputs, while_outputs)
def test_lstm_per_eg_grad(self):
pfor_outputs, while_outputs = create_lstm_per_eg_grad(8, 4, 2)
self.run_and_assert_equal(pfor_outputs, while_outputs)
def test_mnist_per_eg_grad(self):
# It looks like CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED
# configuration of Winograd can cause low precision output resulting in
# tests failing. So we disable that here.
os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "0"
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
# Note that we we are setting training=False here so that dropout produces
# the same result with pfor and with while_loop.
pfor_outputs, while_outputs = create_mnist_per_eg_grad(
4, data_format, training=False)
self.run_and_assert_equal(pfor_outputs, while_outputs, rtol=1e-3, atol=1e-2)
os.environ.pop("TF_ENABLE_WINOGRAD_NONFUSED", None)
def test_mnist_per_eg_jacobian(self):
# It looks like CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED
# configuration of Winograd can cause low precision output resulting in
# tests failing. So we disable that here.
os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "0"
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
# Note that we we are setting training=False here so that dropout produces
# the same result with pfor and with while_loop.
pfor_outputs, while_outputs = create_mnist_per_eg_jacobian(
2, data_format, training=False)
self.run_and_assert_equal(pfor_outputs, while_outputs, rtol=1e-3, atol=1e-2)
os.environ.pop("TF_ENABLE_WINOGRAD_NONFUSED", None)
def test_fc_jacobian(self):
jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while = (
create_fc_per_eg_jacobians(batch_size=8,
activation_size=4,
num_layers=2))
self.run_and_assert_equal(jacobians, per_eg_jacobians_pfor,
rtol=2e-3, atol=1e-3)
self.run_and_assert_equal(jacobians, per_eg_jacobians_while,
rtol=2e-3, atol=1e-3)
def test_indexed_slice(self):
inp = random_ops.random_uniform([3, 2])
output = nn.embedding_lookup(inp, [0, 2])
pfor_jacobian = gradients.jacobian(output, inp, use_pfor=True)
while_jacobian = gradients.jacobian(output, inp, use_pfor=False)
self.run_and_assert_equal(while_jacobian, pfor_jacobian)
class GradientsBenchmarks(test.Benchmark):
def _run(self, targets, iters, name=None):
def _done(t):
# Note that we don't use tf.control_dependencies since that will not make
# sure that the computation on GPU has actually finished. So we fetch the
# first element of the output, and assume that this will not be called on
# empty tensors.
return array_ops.gather(array_ops.reshape(t, [-1]), 0)
targets = [_done(x) for x in nest.flatten(targets)]
sess = session.Session()
with sess:
init = variables.global_variables_initializer()
self.evaluate(init)
self.evaluate(targets)
begin = time.time()
for _ in range(iters):
self.evaluate(targets)
end = time.time()
avg_time_ms = (1000 * (end - begin)) / iters
self.report_benchmark(iters=iters, wall_time=avg_time_ms, name=name)
return avg_time_ms
def benchmark_fc_batch_jacobian(self):
with ops.Graph().as_default():
pfor_jacobian, while_jacobian = create_fc_batch_jacobian(100, 32, 20)
self._run(pfor_jacobian, 100, name="fc_batch_jacobian_pfor")
self._run(while_jacobian, 20, name="fc_batch_jacobian_while")
def benchmark_lstm_batch_jacobian(self):
with ops.Graph().as_default():
pfor_jacobian, while_jacobian = create_lstm_batch_jacobian(
100, 32, 8, inputs_size=128)
self._run(pfor_jacobian, 100, name="lstm_batch_jacobian_pfor")
self._run(while_jacobian, 20, name="lstm_batch_jacobian_while")
def benchmark_lstm_hessian(self):
with ops.Graph().as_default():
pfor_hessian, while_hessian = create_lstm_hessian(2, 2, 10)
self._run(pfor_hessian, 20, name="lstm_hessian_pfor")
self._run(while_hessian, 3, name="lstm_hessian_while_pfor")
def benchmark_lstm_batch_hessian(self):
with ops.Graph().as_default():
pfor_hessian, while_hessian = create_lstm_batch_hessian(4, 4, 10)
self._run(pfor_hessian, 100, name="lstm_batch_hessian_pfor")
self._run(while_hessian, 20, name="lstm_batch_hessian_while_pfor")
def benchmark_fc_per_eg_grad(self):
with ops.Graph().as_default():
pfor_outputs, while_outputs = create_fc_per_eg_grad(100, 32, 3)
self._run(pfor_outputs, 100, name="fc_per_eg_grad_pfor")
self._run(while_outputs, 20, name="fc_per_eg_grad_while")
def benchmark_lstm_per_eg_grad(self):
with ops.Graph().as_default():
pfor_outputs, while_outputs = create_lstm_per_eg_grad(100, 32, 8)
self._run(pfor_outputs, 100, name="lstm_per_eg_grad_pfor")
self._run(while_outputs, 20, name="lstm_per_eg_grad_while")
def benchmark_mnist_autobatch(self):
with ops.Graph().as_default():
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
pfor_outputs, while_outputs, manual = create_mnist_autobatch(
100, data_format, training=False)
self._run(pfor_outputs, 100, name="mnist_pfor")
self._run(while_outputs, 20, name="mnist_while")
self._run(manual, 100, name="mnist_manual")
def benchmark_mnist_per_eg_grad(self):
with ops.Graph().as_default():
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
pfor_outputs, while_outputs = create_mnist_per_eg_grad(
128, data_format, training=True)
self._run(pfor_outputs, 20, name="mnist_per_eg_grad_pfor")
self._run(while_outputs, 20, name="mnist_per_eg_grad_while")
def benchmark_mnist_per_eg_jacobian(self):
with ops.Graph().as_default():
if test.is_gpu_available():
data_format = "channels_first"
else:
data_format = "channels_last"
pfor_outputs, while_outputs = create_mnist_per_eg_jacobian(
16, data_format, training=True)
self._run(pfor_outputs, 20, name="mnist_per_eg_jacobian_pfor")
self._run(while_outputs, 20, name="mnist_per_eg_jacobian_while")
def benchmark_mnist_batch_jacobian(self):
with ops.Graph().as_default():
if test.is_gpu_available():
data_format = "channels_first"
else:
data_format = "channels_last"
pfor_outputs, while_outputs = create_mnist_batch_jacobian(
128, data_format, training=True)
self._run(pfor_outputs, 20, name="mnist_batch_jacobian_pfor")
self._run(while_outputs, 20, name="mnist_batch_jacobian_while")
def benchmark_fc_per_eg_jacobian(self):
with ops.Graph().as_default():
jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while = (
create_fc_per_eg_jacobians(batch_size=128,
activation_size=32,
num_layers=3))
self._run(jacobians, 30, name="fc_jacobians_pfor")
self._run(per_eg_jacobians_pfor, 100,
name="fc_per_eg_jacobians_pfor")
self._run(per_eg_jacobians_while, 10,
name="fc_per_eg_jacobians_while")
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/parallel_for/gradients_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compiled parallel-for loop."""
# pylint: disable=missing-docstring,g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import flags
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
flags.DEFINE_bool(
"op_conversion_fallback_to_while_loop", False,
"If true, falls back to using a while loop for ops for "
"which a converter is not defined.")
def _stack(t, length):
"""stacks `t` `length` times."""
ones = array_ops.ones_like(array_ops.shape(t))
multiples = array_ops.concat([length, ones], 0)
t = array_ops.tile(array_ops.expand_dims(t, 0), multiples)
return wrap(t, True)
# The following stateful ops can be safely called once, and with the same
# signature as the unconverted version, if their inputs are loop invariant.
# TODO(agarwal): implement a strategy for converting Variable reads/writes. The
# plan is to map each read/write in the loop_fn to a corresponding merged
# read/write in the converted graph. Writes need to be mergeable (e.g.
# AssignAdd) to be used in `pfor`. Given a certain read/write order in the
# loop_fn, doing a one-to-one conversion will simulate executing such
# instructions in lock-step across all iterations.
passthrough_stateful_ops = set([
"VariableV2",
"VarHandleOp",
"ReadVariableOp",
"StackV2",
"TensorArrayWriteV3",
"TensorArrayReadV3",
"TensorArraySizeV3",
])
def _is_stateful_pfor_op(op):
if isinstance(op, WhileOp):
return op.is_stateful
if op.type == "Const":
# Const didn't have an op_def.
return False
if op.type in passthrough_stateful_ops:
return False
assert hasattr(op, "op_def") and op.op_def is not None, op
return op.op_def.is_stateful
# pylint: disable=protected-access
class WhileOp(object):
"""Object for storing state for converting the outputs of a while_loop."""
def __init__(self, exit_node, pfor_ops, pfor_config):
"""Initializer.
Args:
exit_node: A tensor output from the while_loop.
pfor_ops: list of ops inside the current pfor loop.
pfor_config: PForConfig object used while constructing loop body.
"""
self._pfor_config = pfor_config
self._pfor_ops = set(pfor_ops)
self._pfor_op_ids = set([x._id for x in pfor_ops])
assert isinstance(exit_node, ops.Tensor)
self._while_context = exit_node.op._get_control_flow_context()
assert isinstance(self._while_context, control_flow_ops.WhileContext)
self._context_name = self._while_context.name
self._condition = self._while_context.pivot.op.inputs[0]
# Parts of an external while_loop could be created inside a pfor loop.
# However for the purpose here, we declare such loops to be external. Also
# note that we check if the condition was created inside or outside to
# determine if the while_loop was first created inside or outside.
# TODO(agarwal): check that the Enter and Exit of this loop are unstacked.
self._is_inside_loop = self.op_is_inside_loop(self._condition.op)
if self._is_inside_loop:
for e in self._while_context.loop_exits:
assert self.op_is_inside_loop(e.op)
# Note the code below tries to reverse engineer an existing while_loop graph
# by assuming the following pattern of nodes.
#
# NextIteration <---- Body <--- Enter
# | ^
# V ___| Y
# Enter -> Merge -> Switch___
# ^ | N
# | V
# LoopCond Exit
# Node that elements in the list below correspond one-to-one with each
# other. i.e. these lists are the same size, and the i_th entry corresponds
# to different Operations/Tensors of a single cycle as illustrated above.
# List of Switch ops (ops.Operation) that feed into an Exit Node.
self._exit_switches = []
# List of inputs (ops.Tensor) to NextIteration.
self._body_outputs = []
# List of list of control inputs of the NextIteration nodes.
self._next_iter_control_inputs = []
# List of Merge ops (ops.Operation).
self._enter_merges = []
# List of output (ops.Tensor) of Exit nodes.
self._outputs = []
# List of Enter Tensors.
# There are two types of Enter nodes:
# - The Enter nodes that are used in the `loop_vars` argument to
# `while_loop` (see
# https://www.tensorflow.org/api_docs/python/tf/while_loop). We collect
# these Enter nodes immediately below by tracing backwards from the Exit
# nodes via Exit <- Switch <- Merge <- Enter. You can see this chain in the
# diagram above. This allows us to have a 1:1 correspondence between the
# self._outputs and the first elements in self._enters.
# - The Enter nodes that are used only by the body. They don't appear in the
# `loop_vars` and are not returned from the `while_loop`. In Python code,
# they are usually captured by the body lambda. We collect them below by
# iterating over all the ops in the graph. They are appended to the end of
# self._enters or self._direct_enters, and don't correspond to any outputs
# in self._outputs. Note that we keep the resource/variant Enter nodes in
# self._direct_enters and the constructed while_loop's body uses them
# directly as opposed to passing them as loop variables. This is done
# because the while_body cannot partition the resource/variant Tensors, so
# it has to leave them unchanged.
self._enters = []
self._direct_enters = []
for e in self._while_context.loop_exits:
self._outputs.append(e.op.outputs[0])
switch = e.op.inputs[0].op
assert switch.type == "Switch", switch
self._exit_switches.append(switch)
merge = switch.inputs[0].op
assert merge.type == "Merge", merge
self._enter_merges.append(merge)
enter = merge.inputs[0].op
assert enter.type == "Enter", enter
self._enters.append(enter.outputs[0])
next_iter = merge.inputs[1].op
assert next_iter.type == "NextIteration", next_iter
self._body_outputs.append(next_iter.inputs[0])
self._next_iter_control_inputs.append(next_iter.control_inputs)
# Collect all the Enter nodes that are not part of `loop_vars`, the second
# category described above.
# Also track whether the loop body has any stateful ops.
self._is_stateful = False
for op in ops.get_default_graph().get_operations():
# TODO(agarwal): make sure this works with nested case.
control_flow_context = op._get_control_flow_context()
if control_flow_context is None:
continue
if control_flow_context.name == self._context_name:
self._is_stateful |= _is_stateful_pfor_op(op)
if op.type == "Enter":
output = op.outputs[0]
if output not in self._enters:
if output.dtype in (dtypes.resource, dtypes.variant):
if output not in self._direct_enters:
self._direct_enters.append(output)
else:
self._enters.append(output)
def __str__(self):
"""String representation."""
return "while_loop(%s)" % self.name
@property
def inputs(self):
"""Input to all the Enter nodes."""
return [x.op.inputs[0] for x in self._enters + self._direct_enters]
@property
def control_inputs(self):
"""Control input to all the Enter nodes."""
control_inputs = []
for x in self._enters + self._direct_enters:
control_inputs.extend(x.op.control_inputs)
return control_inputs
@property
def outputs(self):
"""Outputs of all the Exit nodes."""
return self._outputs
@property
def name(self):
"""Context name for the while loop."""
return self._context_name
@property
def is_inside_loop(self):
"""Returns true if the while_loop was created inside the pfor."""
return self._is_inside_loop
def op_is_inside_loop(self, op):
"""True if op was created inside the pfor loop body."""
assert isinstance(op, ops.Operation)
# Note that we use self._pfor_op_ids for the check and not self._pfor_ops
# since it appears there tensorflow API could return different python
# objects representing the same Operation node.
return op._id in self._pfor_op_ids
@property
def is_stateful(self):
return self._is_stateful
@property
def pfor_converter(self):
"""Return a converter for the while loop."""
return self
def _init_pfor(self, parent_pfor, indices, cond_stacked, inputs,
inputs_stacked):
"""Create a PFor object for converting parts of the while_loop.
Args:
parent_pfor: PFor object being used for converting the while_loop.
indices: int32 Tensor of ids for the iterations that are still active
(i.e. did not exit the while_loop).
cond_stacked: True if the while_loop condition is stacked.
inputs: list of input Tensors corresponding 1-to-1 with self._enters. Note
that these Tensors are a subset of the loop variables for the generated
while_loop.
inputs_stacked: List of booleans corresponding 1-to-1 with `inputs`,
indicating if the value is stacked or not.
Returns:
A PFor instance. The instance is initialized by adding conversion mappings
of nodes that will be external to the conversion that the returned
instance will be used for. e.g. Enter nodes as well as Merge and Switch
outputs are mapped to converted values.
"""
num_outputs = len(self._outputs)
assert len(inputs) == len(self._enters)
assert len(inputs_stacked) == len(self._enters)
loop_var = parent_pfor.loop_var
loop_len = array_ops.size(indices)
pfor = PFor(
loop_var,
loop_len,
pfor_ops=self._pfor_ops,
all_indices=indices,
all_indices_partitioned=cond_stacked,
pfor_config=self._pfor_config)
# Map all inputs of Enter nodes in self._direct_enters to their converted
# values.
for enter in self._direct_enters:
enter_input = enter.op.inputs[0]
converted_enter, stacked, is_sparse_stacked = parent_pfor._convert_helper(
enter_input)
# Since these are resources / variants, they should be unstacked.
assert not stacked and not is_sparse_stacked, (enter, converted_enter)
pfor._add_conversion(enter, wrap(converted_enter, False))
# Map all Enter nodes to the inputs.
for enter, inp, stacked in zip(self._enters, inputs, inputs_stacked):
pfor._add_conversion(enter, wrap(inp, stacked))
# Map outputs of Switch and Merge.
for i in range(num_outputs):
wrapped_inp = wrap(inputs[i], inputs_stacked[i])
merge = self._enter_merges[i]
pfor._add_conversion(merge.outputs[0], wrapped_inp)
# Note that second output of Merge is typically not used, except possibly
# as a control dependency. To avoid trying to output the correct value, we
# employ a hack here. We output a dummy invalid value with an incorrect
# dtype. This will allow control dependency to work but if using it as an
# input, it should typically lead to errors during graph construction due
# to dtype mismatch.
# TODO(agarwal): Check in the original graph to see if there are any
# consumers of this Tensor that use it as an input.
pfor._add_conversion(merge.outputs[1],
wrap(constant_op.constant(-1.0), False))
switch = self._exit_switches[i]
# Don't need to worry about switch.output[0] which will feed to Exit node.
pfor._add_conversion(switch.outputs[1], wrapped_inp)
return pfor
def _convert_enter(self, parent_pfor, enter):
"""Converts an Enter node."""
inp, stacked, _ = parent_pfor._convert_helper(enter.op.inputs[0])
control_inputs = [
parent_pfor._convert_helper(x).t for x in enter.op.control_inputs
]
if control_inputs:
with ops.control_dependencies(control_inputs):
inp = array_ops.identity(inp)
return inp, stacked
def _maybe_stacked(self, cache, inp):
"""Heuristic to figue out if the coverting inp leads to a stacked value.
Args:
cache: map from Tensor to boolean indicating stacked/unstacked.
inp: input Tensor.
Returns:
True if `inp` could get stacked. If the function returns False, the
converted value should be guaranteed to be unstacked. If returning True,
it may or may not be stacked.
"""
if inp in cache:
return cache[inp]
if not self.op_is_inside_loop(inp.op):
return False
op = inp.op
output = False
if op.type in [
"Shape",
"Rank",
"ShapeN",
"ZerosLike",
"TensorArrayV3",
"TensorArraySizeV3",
]:
output = False
elif _is_stateful_pfor_op(op):
# This may be fairly aggressive.
output = True
elif op.type == "Exit":
# This may be fairly aggressive.
output = True
else:
for t in op.inputs:
if self._maybe_stacked(cache, t):
output = True
break
cache[inp] = output
return output
def _create_init_values(self, pfor_input):
"""Create arguments passed to converted while_loop."""
with ops.name_scope("while_init"):
loop_len_vector = pfor_input.pfor.loop_len_vector
loop_len = loop_len_vector[0]
num_outputs = len(self._outputs)
inputs = []
maybe_stacked_cache = {}
# Convert all the Enters. Need to do this before checking for stacking
# below.
for i, enter in enumerate(self._enters):
inp, stacked = self._convert_enter(pfor_input.pfor, enter)
inputs.append(inp)
maybe_stacked_cache[enter] = stacked
# Since this enter node is part of the `loop_vars`, it corresponds to an
# output and its preceding switch. We mark this switch's output the same
# stackness, to act at the base case for the logic below. Below, we will
# be going through the body figuring out which inputs might need to be
# stacked and which inputs can safely remain unstacked.
if i < num_outputs:
maybe_stacked_cache[self._exit_switches[i].outputs[1]] = stacked
# Shape invariants for init_values corresponding to self._enters.
input_shape_invariants = []
# TensorArrays for outputs of converted while loop
output_tas = []
# Shape invariants for output TensorArrays.
ta_shape_invariants = []
# List of booleans indicating stackness of inputs, i.e. tensors
# corresponding to self._enters.
inputs_stacked = []
for i, inp in enumerate(inputs):
enter = self._enters[i]
inp_stacked = self._maybe_stacked(maybe_stacked_cache, enter)
# Note that even when an input is unstacked, the body could make it
# stacked. we use a heuristic below to figure out if body may be making
# it stacked.
if i < num_outputs:
body_output = self._body_outputs[i]
if enter.op in self._pfor_ops:
body_output_stacked = self._maybe_stacked(maybe_stacked_cache,
body_output)
else:
# If constructed outside of pfor loop, then the output would not be
# stacked.
body_output_stacked = False
if body_output_stacked and not inp_stacked:
inp = _stack(inp, loop_len_vector).t
inputs[i] = inp
inp_stacked = True
# TODO(agarwal): other attributes for the TensorArray ?
output_tas.append(tensor_array_ops.TensorArray(inp.dtype, loop_len))
ta_shape_invariants.append(tensor_shape.TensorShape(None))
inputs_stacked.append(inp_stacked)
input_shape_invariants.append(tensor_shape.TensorShape(None))
# See documentation for __call__ for the structure of init_values.
init_values = [True, pfor_input.pfor.all_indices] + inputs + output_tas
# TODO(agarwal): try stricter shape invariants
shape_invariants = (
[tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None)
] + input_shape_invariants + ta_shape_invariants)
return init_values, inputs_stacked, shape_invariants
def _process_cond_unstacked(self, conditions, indices, inputs, output_tas):
"""Handles case when condition is unstacked.
Note that all iterations end together. So we don't need to partition the
inputs. When all iterations are done, we write the inputs to the
TensorArrays. Note that we only write to index 0 of output_tas. Since all
iterations end together, they can all be output together.
"""
not_all_done = array_ops.reshape(conditions, [])
new_output_tas = []
# pylint: disable=cell-var-from-loop
for i, out_ta in enumerate(output_tas):
inp = inputs[i]
new_output_tas.append(
control_flow_ops.cond(not_all_done,
lambda: out_ta,
lambda: out_ta.write(0, inp)))
# pylint: enable=cell-var-from-loop
return not_all_done, indices, inputs, new_output_tas
def _process_cond_stacked(self, conditions, indices, inputs, inputs_stacked,
output_tas):
num_outputs = len(self._outputs)
# Compute if all iterations are done.
not_all_done = math_ops.reduce_any(conditions)
conditions_int = math_ops.cast(conditions, dtypes.int32)
# Partition the indices.
done_indices, new_indices = data_flow_ops.dynamic_partition(
indices, conditions_int, 2)
new_inputs = []
new_output_tas = []
for i, (inp, stacked) in enumerate(zip(inputs, inputs_stacked)):
# Partition the inputs.
if stacked:
done_inp, new_inp = data_flow_ops.dynamic_partition(
inp, conditions_int, 2)
else:
# TODO(agarwal): avoid this stacking. See TODO earlier in
# _process_cond_unstacked.
done_inp = _stack(inp, [array_ops.size(done_indices)]).t
new_inp = inp
new_inputs.append(new_inp)
# For iterations that are done, write them to TensorArrays.
if i < num_outputs:
out_ta = output_tas[i]
# Note that done_indices can be empty. done_inp should also be empty in
# that case.
new_output_tas.append(out_ta.scatter(done_indices, done_inp))
return not_all_done, new_indices, new_inputs, new_output_tas
def _process_body(self, pfor_input, inputs_stacked,
new_indices, cond_stacked, new_inputs,
not_all_done):
"""Convert the body function."""
def true_fn(control_inputs, body_pfor, body_output, stacked):
"""Converts the body function for all but last iteration.
This essentially converts body_output. Additionally, it needs to handle
any control dependencies on the NextIteration node. So it creates another
Identity node with the converted dependencies.
"""
converted_control_inp = []
for x in control_inputs:
for t in x.outputs:
converted_control_inp.append(body_pfor._convert_helper(t).t)
if stacked:
# Note convert always does the stacking.
output = body_pfor.convert(body_output)
else:
output, convert_stacked, _ = body_pfor._convert_helper(body_output)
assert convert_stacked == stacked, body_output
with ops.control_dependencies(converted_control_inp):
return array_ops.identity(output)
body_pfor = self._init_pfor(pfor_input.pfor, new_indices,
cond_stacked, new_inputs,
inputs_stacked)
new_outputs = []
for i, (body_output, stacked) in enumerate(
zip(self._body_outputs, inputs_stacked)):
control_inp = self._next_iter_control_inputs[i]
out_dtype = body_output.dtype
# Note that we want to run the body only if not all pfor iterations are
# done. If all are done, we return empty tensors since these values will
# not be used. Notice that the value returned by the loop is based on
# TensorArrays and not directly on these returned values.
# pylint: disable=cell-var-from-loop
new_output = control_flow_ops.cond(
not_all_done,
lambda: true_fn(control_inp, body_pfor, body_output, stacked),
lambda: constant_op.constant([], dtype=out_dtype))
# pylint: enable=cell-var-from-loop
new_outputs.append(new_output)
return new_outputs
def __call__(self, pfor_input):
"""Converter for the while_loop.
The conversion of a while_loop is another while_loop.
The arguments to this converted while_loop are as follows:
not_all_done: Boolean scalar Tensor indicating if all the pfor iterations
are done.
indices: int32 1-D Tensor storing the id of the iterations that are not
done.
args: Remaining arguments. These can be divided into 3 categories:
- First set of arguments are the tensors that correspond to the initial
elements of self._enters. The elements that appear in original while
loop's `loop_vars`.
- The second set of arguments are the tensors that correspond to the
remaining elements of self._enters. These are the tensors that directly
enter the original while loop body.
- Finally, the last set of arguments are TensorArrays. These TensorArrays
correspond to the outputs of the original while_loop, i.e. to the
elements in self._outputs. Each TensorArray has `PFor.loop_len`
elements, i.e. the number of pfor iterations. At the end, the i'th
element of each TensorArray will contain the output computed by the
i'th iteration of pfor. Note that elements can be written into these
tensors arrays in any order, depending on when the corresponding pfor
iteration is done.
If the original while_loop had `k` tensors in its `loop_vars` and its body
directly captured `m` tensors, the `args` will contain `2 * k + m` values.
In each iteration, the while_loop body recomputes the condition for all
active pfor iterations to see which of them are now done. It then partitions
all the inputs and passes them along to the converted body. Values for all
the iterations that are done are written to TensorArrays indexed by the pfor
iteration number. When all iterations are done, the TensorArrays are stacked
to get the final value.
Args:
pfor_input: A PForInput object corresponding to the output of any Exit
node from this while loop.
Returns:
List of converted outputs.
"""
# Create init_values that will be passed to the while_loop.
init_values, inputs_stacked, shape_invariants = self._create_init_values(
pfor_input)
# Note that we use a list as a hack since we need the nested function body
# to set the value of cond_is_stacked. python2.x doesn't support nonlocal
# variables.
cond_is_stacked = [None]
def cond(not_all_done, *_):
return not_all_done
def body(not_all_done, indices, *args):
# See documentatin for __call__ for the structure of *args.
num_enters = len(self._enters)
inputs = args[:num_enters]
output_tas = args[num_enters:]
# TODO(agarwal): see which outputs have consumers and only populate the
# TensorArrays corresponding to those. Or do those paths get trimmed out
# from inside the while_loop body?
assert len(inputs) >= len(output_tas)
assert len(inputs) == len(inputs_stacked)
# Convert condition
with ops.name_scope("while_cond"):
# Note that we set cond_stacked to True here. At this point we don't
# know if it could be loop invariant, hence the conservative value is
# to assume stacked.
cond_pfor = self._init_pfor(pfor_input.pfor, indices,
cond_stacked=True,
inputs=inputs,
inputs_stacked=inputs_stacked)
conditions, cond_stacked, _ = cond_pfor._convert_helper(self._condition)
cond_is_stacked[0] = cond_stacked
# Recompute the new condition, write outputs of done iterations, and
# partition the inputs if needed.
if not cond_stacked:
(not_all_done, new_indices,
new_inputs, new_output_tas) = self._process_cond_unstacked(
conditions, indices, inputs, output_tas)
else:
(not_all_done, new_indices,
new_inputs, new_output_tas) = self._process_cond_stacked(
conditions, indices, inputs, inputs_stacked, output_tas)
# Convert body
with ops.name_scope("while_body"):
# Compute the outputs from the body.
new_outputs = self._process_body(pfor_input, inputs_stacked,
new_indices, cond_stacked, new_inputs,
not_all_done)
# Note that the first num_outputs new values of inputs are computed using
# the body. Rest of them were direct Enters into the condition/body and
# the partitioning done earlier is sufficient to give the new value.
num_outputs = len(self._outputs)
new_args = ([not_all_done, new_indices] + new_outputs + list(
new_inputs[num_outputs:]) + new_output_tas)
return tuple(new_args)
while_outputs = control_flow_ops.while_loop(
cond, body, init_values, shape_invariants=shape_invariants)
output_tas = while_outputs[-len(self._outputs):]
outputs = []
assert cond_is_stacked[0] is not None
for inp_stacked, ta in zip(inputs_stacked, output_tas):
if cond_is_stacked[0]:
outputs.append(wrap(ta.stack(), True))
else:
# Note that if while_loop condition is unstacked, all iterations exit at
# the same time and we wrote those outputs in index 0 of the tensor
# array.
outputs.append(wrap(ta.read(0), inp_stacked))
return outputs
class _PforInput(object):
"""Input object passed to registered pfor converters."""
def __init__(self, pfor, op, inputs):
"""Creates a _PforInput object.
Args:
pfor: PFor converter object.
op: the Operation object that is being converted.
inputs: list of WrappedTensor objects representing converted values of the
inputs of `op`.
"""
self.pfor = pfor
self._op = op
self._inputs = inputs
def stack_inputs(self, stack_indices=None):
"""Stacks unstacked inputs at `stack_indices`.
Args:
stack_indices: indices of inputs at which stacking is done. If None,
stacking is done at all indices.
"""
if stack_indices is None:
stack_indices = range(len(self._inputs))
length = self.pfor.loop_len_vector
for i in stack_indices:
inp = self._inputs[i]
if not inp.is_stacked:
self._inputs[i] = _stack(inp.t, length)
def expanddim_inputs_for_broadcast(self):
"""Reshapes stacked inputs to prepare them for broadcast.
Since stacked inputs have an extra leading dimension, automatic broadcasting
rules could incorrectly try to expand dimensions before that leading
dimension. To avoid that, we reshape these stacked inputs to the maximum
rank they will need to be broadcasted to.
"""
if not self._inputs:
return
# Find max rank
def _get_rank(x):
rank = array_ops.rank(x.t)
if not x.is_stacked:
rank += 1
return rank
ranks = [_get_rank(x) for x in self._inputs]
max_rank = ranks[0]
for rank in ranks[1:]:
max_rank = math_ops.maximum(rank, max_rank)
for i, inp in enumerate(self._inputs):
if inp.is_stacked:
shape = array_ops.shape(inp.t)
rank_diff = array_ops.reshape(max_rank - ranks[i], [1])
ones = array_ops.tile([1], rank_diff)
new_shape = array_ops.concat([shape[:1], ones, shape[1:]], axis=0)
self._inputs[i] = wrap(array_ops.reshape(inp.t, new_shape), True)
@property
def inputs(self):
return self._inputs
@property
def num_inputs(self):
return len(self._inputs)
def input(self, index):
assert len(self._inputs) > index, (index, self._inputs)
return self._inputs[index]
def stacked_input(self, index):
t, is_stacked, _ = self.input(index)
if not is_stacked:
op_type = self.op_type
op_def = getattr(self._op, "op_def", None)
if op_def is None:
input_name = "at index %d" % index
else:
input_name = "\"%s\"" % op_def.input_arg[index].name
raise ValueError("Input %s of op \"%s\" expected to be not loop invariant"
".\nError while converting op %s"
"with converted inputs\n%s" % (input_name, op_type,
self._op, self.inputs))
return t
def unstacked_input(self, index):
t, is_stacked, _ = self.input(index)
if is_stacked:
op_type = self.op_type
op_def = getattr(self._op, "op_def", None)
if op_def is None:
input_name = "at index %d" % index
else:
input_name = "\"%s\"" % op_def.input_arg[index].name
raise ValueError("Input %s of op \"%s\" expected to be loop invariant"
".\nError while converting op %s"
"with converted inputs\n%s" % (input_name, op_type,
self._op, self.inputs))
return t
@property
def op(self):
return self._op
@property
def op_type(self):
return self._op.type
def get_attr(self, attr):
return self._op.get_attr(attr)
@property
def outputs(self):
return self._op.outputs
def output(self, index):
assert index < len(self._op.outputs)
return self._op.outputs[index]
_pfor_converter_registry = {}
class RegisterPFor(object):
"""Utility to register converters for pfor.
Usage:
@RegisterPFor(foo_op_type)
def _foo_converter(pfor_input):
...
The above will register conversion function `_foo_converter` for handling
conversion of `foo_op_type`. These converters are called during vectorization
of a `pfor` loop body. For each operation node in this loop body,
the vectorization process will call the converter corresponding to the
operation type of the node.
During conversion, the registered function will be called with a single
argument `pfor_input`, of type `PForInput`, which will contain state needed
for the conversion. When the converter is called for a node, all its inputs
should already have been converted and these converted values are stored in
`pfor_input.inputs`. This registered function should output a list of
WrappedTensor objects with the same length as the number of outputs of the
node being converted. If the node had zero outputs, then it should return an
ops.Operation object. These new sets of nodes should implement the
functionality of running that operation for the number of iterations specified
by `pfor_input.pfor.loop_len_vector[0]` where the inputs of the node for each
iteration are picked from `pfor_inputs.inputs()`.
One tricky aspect of the conversion process is keeping track of, and
leveraging loop invariance of computation. Each converted input is a
WrappedTensor which indicates whether the input was loop invariant or not. If
the converted value is loop invariant, its rank should match the rank of the
corresponding tensor in the loop body, else its rank is larger by 1. The
converter should look at the loop invariance of the inputs and generate new
nodes based on that. Note that the converter will not be called if all inputs
are loop invariant and the operation is not stateful. The converter should
determine if its own output is loop invariant and `wrap` its output
accordingly.
Example:
Here, the converter is trying to convert a Reshape node in the loop body. This
node will have two inputs: the tensor to reshape, and the new shape. The
example here only handles the case where the shape is loop invariant.
@RegisterPFor("Reshape")
def _convert_reshape(pfor_input):
# We assume that input is not loop invariant. Call to `stacked_input`
# asserts that and returns the converted value. This value will have a rank
# larger by 1 compared to the rank of the input in the loop body.
t = pfor_input.stacked_input(0)
# We assume that shape input is loop invariant. Call to `unstacked_input`
# asserts that and returns the converted value.
shape = pfor_input.unstacked_input(1)
# We compute `new_shape` by prepending the number of iterations to the
# original shape.
new_shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape],
axis=0)
# The vectorized output involves reshaping the converted input `t` using
# `new_shape`.
new_output = array_ops.reshape(t, new_shape)
# The converted output is marked as not loop invariant using the call to
# wrap.
return wrap(new_output, True)
"""
def __init__(self, op_type):
"""Creates an object to register a converter for op with type `op_type`."""
self.op_type = op_type
def __call__(self, converter):
name = self.op_type
assert name not in _pfor_converter_registry, "Re-registering %s " % name
_pfor_converter_registry[name] = converter
return converter
class RegisterPForWithArgs(RegisterPFor):
"""Utility to register converters for pfor.
Usage:
@RegisteRPFor(foo_op_type, foo=value, ....)
def _foo_converter(pfor_input, foo=None, ....):
...
See RegisterPFor for details on the conversion function.
`RegisterPForWithArgs` allows binding extra arguments to the
conversion function at registration time.
"""
def __init__(self, op_type, *args, **kw_args):
super(RegisterPForWithArgs, self).__init__(op_type)
self._args = args
self._kw_args = kw_args
def __call__(self, converter):
def _f(pfor_input):
return converter(pfor_input, self.op_type, *self._args, **self._kw_args)
super(RegisterPForWithArgs, self).__call__(_f)
return converter
# TODO(agarwal): call raw_ops instead of calling these low level routines.
def _create_op(op_type, inputs, op_dtypes, attrs=None):
"""Utility to create an op."""
op = ops.get_default_graph().create_op(
op_type, inputs, op_dtypes, attrs=attrs, compute_device=True)
flat_attrs = nest.flatten([(str(a), op.get_attr(str(a))) for a in attrs])
execute.record_gradient(
op_type, op.inputs, tuple(flat_attrs), op.outputs[:], "")
return op
WrappedTensor = collections.namedtuple("WrappedTensor",
["t", "is_stacked", "is_sparse_stacked"])
"""Wrapper around the result of a Tensor conversion.
The additional fields are useful for keeping track of the conversion state as
data flows through the ops in the loop body. For every op whose output is a
Tensor, its converter should return either a WrappedTensor or a list of
WrappedTensors.
Args:
t: The converted tensor
is_stacked: True if the tensor is stacked, i.e. represents the results of all
the iterations of the loop, where each row i of the tensor corresponds to
that op's output on iteration i of the loop. False if the tensor is not
stacked, i.e. represents the result of the op on of a single iteration of
the loop, where the result does not vary between iterations.
is_sparse_stacked: True if the tensor corresponds to a component tensor
(indices, values, or dense_shape) of a sparse tensor, and has been logically
stacked via a sparse conversion.
"""
def wrap(tensor, is_stacked=True, is_sparse_stacked=False):
"""Helper to create a WrappedTensor object."""
assert isinstance(is_stacked, bool)
assert isinstance(is_sparse_stacked, bool)
assert isinstance(tensor, ops.Tensor)
assert not is_sparse_stacked or is_stacked, ("If the wrapped tensor is "
"stacked via a sparse "
"conversion, it must also be "
"stacked.")
return WrappedTensor(tensor, is_stacked, is_sparse_stacked)
def _fallback_converter(pfor_input):
logging.warn("Using a while_loop for converting %s", pfor_input.op_type)
output_dtypes = [x.dtype for x in pfor_input.outputs]
iters = pfor_input.pfor.loop_len_vector[0]
def while_body(i, *ta_list):
"""Body of while loop."""
inputs = [
x[i, ...] if stacked else x for x, stacked, _ in pfor_input.inputs
]
op_outputs = _create_op(
pfor_input.op_type,
inputs,
output_dtypes,
attrs=pfor_input.op.node_def.attr).outputs
outputs = []
for out, ta in zip(op_outputs, ta_list):
assert isinstance(out, ops.Tensor)
outputs.append(ta.write(i, array_ops.expand_dims(out, 0)))
return tuple([i + 1] + outputs)
ta_list = control_flow_ops.while_loop(
lambda i, *ta: i < iters, while_body, [0] + [
tensor_array_ops.TensorArray(dtype, iters) for dtype in output_dtypes
])[1:]
return tuple([wrap(ta.concat(), True) for ta in ta_list])
class PForConfig(object):
"""A configuration object used to communicate with loop body function."""
def __init__(self):
# This may be set to the number of iterations.
self._maybe_iters = None
# Map from output placeholder to the unvectorized tensor.
self._reduce_concat_map = object_identity.ObjectIdentityDictionary()
# Reverse map of `self._reduce_concat_map`.
self._reverse_reduce_concat_map = object_identity.ObjectIdentityDictionary()
def _has_reductions(self):
"""True if some reductions where performed by loop body."""
return len(self._reduce_concat_map)
def _set_iters(self, iters):
"""Set number of pfor iterations."""
self._maybe_iters = iters
# TODO(agarwal): handle reductions inside control flow constructs.
def reduce_concat(self, x):
"""Performs a concat reduction on `x` across pfor iterations.
Note that this currently may not work inside a control flow construct.
Args:
x: an unvectorized Tensor.
Returns:
A Tensor that has rank one higher than `x`. The value is the vectorized
version of `x`, i.e. stacking the value of `x` across different pfor
iterations.
"""
assert not context.executing_eagerly()
assert isinstance(x, ops.Tensor)
if x not in self._reduce_concat_map:
out_shape = tensor_shape.TensorShape([self._maybe_iters]).concatenate(
x.shape)
with ops.control_dependencies([x]):
# Control dependency to make sure out is converted after x.
out = array_ops.placeholder(x.dtype, out_shape)
self._reduce_concat_map[out] = x
self._reverse_reduce_concat_map[x] = out
return out
else:
return self._reverse_reduce_concat_map[x]
def reduce_mean(self, x):
"""Performs a mean reduction on `x` across pfor iterations.
Note that this currently may not work inside a control flow construct.
Args:
x: an unvectorized Tensor.
Returns:
A Tensor that has same rank as `x`. The value is the mean of the values
of `x` across the pfor iterations.
"""
y = self.reduce_concat(x)
return math_ops.reduce_mean(y, axis=0)
def reduce_sum(self, x):
"""Performs a sum reduction on `x` across pfor iterations.
Note that this currently may not work inside a control flow construct.
Args:
x: an unvectorized Tensor.
Returns:
A Tensor that has same rank as `x`. The value is the sum of the values
of `x` across the pfor iterations.
"""
y = self.reduce_concat(x)
return math_ops.reduce_sum(y, axis=0)
def _lookup_reduction(self, pl):
"""Lookups Placeholder `pl` in the reduction map."""
msg = "Expected Tensor, got {} of type {}."
assert isinstance(pl, ops.Tensor), msg.format(pl, type(pl))
return self._reduce_concat_map.get(pl, None)
class PFor(object):
"""Implementation of rewrite of parallel-for loops.
This class takes a DAG or a set of DAGs representing the body of a
parallel-for loop, and adds new operations to the graph that implements
functionality equivalent to running that loop body for a specified number of
iterations. This new set of nodes may or may not use a tensorflow loop
construct.
The process of conversion does not delete or change any existing operations.
It only adds operations that efficiently implement the equivalent
functionality. We refer to the added ops as "converted ops".
The conversion process uses a simple greedy heuristic. It walks the loop body
and tries to express the functionality of running each node in a loop with a
new set of nodes. When converting an op several cases are possible:
- The op is not inside the loop body. Hence it can be used as is.
- The op does not depend on the iteration number and is stateless. In this
case, it can be used as is.
- The op is not stateful, and depends on iteration number only through control
dependencies. In this case, we can create a single op with same inputs and
attributes, but with "converted" control dependencies.
- The op is not stateful, and all its inputs are loop invariant. In this
case, similar to above, we can create a single op with same inputs and
attributes, but with "converted" control dependencies.
- The op is stateful or at least one of the inputs is not loop invariant. In
this case, we run the registered converter for that op to create a set of
converted ops. All nodes in the set will have converted control dependencies
corresponding to control dependencies of the original op. If the op returned
multiple outputs, "converted outputs" could be produced by different ops in
this set.
"""
def __init__(self,
loop_var,
loop_len,
pfor_ops,
all_indices=None,
all_indices_partitioned=False,
pfor_config=None):
"""Creates an object to rewrite a parallel-for loop.
Args:
loop_var: ops.Tensor output of a Placeholder operation. The value should
be an int32 scalar representing the loop iteration number.
loop_len: A scalar or scalar Tensor representing the number of iterations
the loop is run for.
pfor_ops: List of all ops inside the loop body.
all_indices: If not None, an int32 vector with size `loop_len`
representing the iteration ids that are still active. These values
should be unique and sorted. However they may not be contiguous. This is
typically the case when inside a control flow construct which has
partitioned the indices of the iterations that are being converted.
all_indices_partitioned: If True, this object is being constructed from a
control flow construct where not all the pfor iterations are guaranteed
to be active.
pfor_config: PForConfig object used while constructing the loop body.
"""
assert isinstance(loop_var, ops.Tensor)
assert loop_var.op.type == "Placeholder"
self._loop_var = loop_var
loop_len_value = tensor_util.constant_value(loop_len)
if loop_len_value is not None:
loop_len = loop_len_value
self._loop_len_vector = array_ops.reshape(loop_len, [1])
self._all_indices_partitioned = all_indices_partitioned
if all_indices_partitioned:
assert all_indices is not None
self.all_indices = (
math_ops.range(loop_len) if all_indices is None else all_indices)
self._conversion_map = object_identity.ObjectIdentityDictionary()
self._conversion_map[loop_var] = wrap(self.all_indices, True)
self._pfor_ops = set(pfor_ops)
self._pfor_op_ids = set([x._id for x in pfor_ops])
self._pfor_config = pfor_config
def op_is_inside_loop(self, op):
"""True if op was created inside the pfor loop body."""
assert isinstance(op, ops.Operation)
# Note that we use self._pfor_op_ids for the check and not self._pfor_ops
# since it appears there tensorflow API could return different python
# objects representing the same Operation node.
return op._id in self._pfor_op_ids
def _convert_sparse(self, y):
"""Returns the converted value corresponding to SparseTensor y.
For SparseTensors, instead of stacking the component tensors separately,
resulting in component tensors with shapes (N, m, rank), (N, m), and (N,
rank) respectively for indices, values, and dense_shape (where N is the loop
length and m is the number of sparse tensor values per loop iter), we want
to logically stack the SparseTensors, to create a SparseTensor whose
components are size (N * m, rank + 1), (N * m, ), and (rank + 1,)
respectively.
Here, we try to get the conversion of each component tensor.
If the tensors are stacked via a sparse conversion, return the resulting
SparseTensor composed of the converted components. Otherwise, the component
tensors are either unstacked or stacked naively. In the latter case, we
unstack the component tensors to reform loop_len SparseTensor elements,
then correctly batch them.
The unstacked tensors must have the same rank. Each dimension of each
SparseTensor will expand to be the largest among all SparseTensor elements
for that dimension. For example, if there are N SparseTensors of rank 3
being stacked, with N dense shapes, where the i_th shape is (x_i, y_i, z_i),
the new dense shape will be (N, max_i(x_i), max_i(y_i), max_i(z_i)).
Args:
y: A tf.SparseTensor.
Returns:
A tf.SparseTensor that is the converted value corresponding to y.
"""
outputs = [
self._convert_helper(t) for t in (y.indices, y.values, y.dense_shape)
]
assert all(isinstance(o, WrappedTensor) for o in outputs)
if all(w.is_sparse_stacked for w in outputs):
return sparse_tensor.SparseTensor(*[w.t for w in outputs])
assert not any(w.is_sparse_stacked for w in outputs), (
"Error converting SparseTensor. All components should be logically "
"stacked, or none.")
# If component tensors were not sparsely stacked, they are either unstacked
# or stacked without knowledge that they are components of sparse tensors.
# In this case, we have to restack them.
return self._restack_sparse_tensor_logically(
*[self._unwrap_or_tile(w) for w in outputs])
def _restack_sparse_tensor_logically(self, indices, values, shape):
sparse_tensor_rank = indices.get_shape().dims[-1].value
if sparse_tensor_rank is not None:
sparse_tensor_rank += 1
def fn(args):
res = gen_sparse_ops.serialize_sparse(
args[0], args[1], args[2], out_type=dtypes.variant)
return res
# Applies a map function to the component tensors to serialize each
# sparse tensor element and batch them all, then deserializes the batch.
# TODO(rachelim): Try to do this without map_fn -- add the right offsets
# to shape and indices tensors instead.
result = map_fn.map_fn(
fn, [indices, values, shape], dtype=dtypes.variant)
return sparse_ops.deserialize_sparse(
result, dtype=values.dtype, rank=sparse_tensor_rank)
def _unwrap_or_tile(self, wrapped_tensor):
"""Given a wrapped tensor, unwrap if stacked. Otherwise, tiles it."""
output, is_stacked = wrapped_tensor.t, wrapped_tensor.is_stacked
if is_stacked:
return output
else:
return _stack(output, self._loop_len_vector).t
def convert(self, y):
"""Returns the converted value corresponding to y.
Args:
y: A ops.Tensor or a ops.Operation object. If latter, y should not have
any outputs.
Returns:
If y does not need to be converted, it returns y as is. Else it returns
the "converted value" corresponding to y.
"""
if y is None:
return None
if isinstance(y, sparse_tensor.SparseTensor):
return self._convert_sparse(y)
assert isinstance(y, (ops.Tensor, ops.Operation)), y
output = self._convert_helper(y)
if isinstance(output, WrappedTensor):
assert isinstance(y, ops.Tensor)
return self._unwrap_or_tile(output)
else:
assert isinstance(y, ops.Operation)
assert not y.outputs
assert isinstance(output, ops.Operation)
return output
def _was_converted(self, t):
"""True if t is not a conversion of itself."""
converted_t = self._conversion_map[t]
return converted_t.t is not t
def _add_conversion(self, old_output, new_output):
assert isinstance(old_output, (ops.Tensor, ops.Operation)), old_output
assert isinstance(new_output, (WrappedTensor, ops.Operation)), new_output
self._conversion_map[old_output] = new_output
def _convert_helper(self, op_or_tensor):
stack = [op_or_tensor]
while stack:
y = stack[0]
if y in self._conversion_map:
assert isinstance(self._conversion_map[y],
(WrappedTensor, ops.Operation))
stack.pop(0)
continue
if isinstance(y, ops.Operation):
assert not y.outputs, (
"We only support converting Operation objects with no outputs. "
"Got %s", y)
y_op = y
else:
assert isinstance(y, ops.Tensor), y
y_op = y.op
is_while_loop = y_op.type == "Exit"
if is_while_loop:
while_op = WhileOp(
y, pfor_ops=self._pfor_ops, pfor_config=self._pfor_config)
is_inside_loop = while_op.is_inside_loop
# If all nodes in the while_loop graph were created inside the pfor, we
# treat the whole loop subgraph as a single op (y_op) and try to convert
# it. For while_loops that are created completely or partially outside,
# we treat them as external and should be able to simply return the Exit
# node output as is without needing any conversion. Note that for
# while_loops that are partially constructed inside, we assume they will
# be loop invariant. If that is not the case, it will create runtime
# errors since the converted graph would depend on the self._loop_var
# placeholder.
if is_inside_loop:
y_op = while_op
else:
is_inside_loop = self.op_is_inside_loop(y_op)
# If this op was not created inside the loop body, we will return as is.
# 1. Convert inputs and control inputs.
def _add_to_stack(x):
if x not in self._conversion_map:
stack.insert(0, x)
return True
else:
return False
if is_inside_loop:
added_to_stack = False
for inp in y_op.inputs:
added_to_stack |= _add_to_stack(inp)
for cinp in y_op.control_inputs:
if cinp.outputs:
for t in cinp.outputs:
added_to_stack |= _add_to_stack(t)
else:
added_to_stack |= _add_to_stack(cinp)
if added_to_stack:
continue
converted_inputs = [self._conversion_map[inp] for inp in y_op.inputs]
some_input_converted = any(self._was_converted(x) for x in y_op.inputs)
some_input_stacked = any(x.is_stacked for x in converted_inputs)
converted_control_ops = set()
some_control_input_converted = False
for cinp in y_op.control_inputs:
if cinp.outputs:
for t in cinp.outputs:
converted_t = self._conversion_map[t]
if self._was_converted(t):
some_control_input_converted = True
converted_control_ops.add(converted_t.t.op)
else:
converted_cinp = self._conversion_map[cinp]
assert isinstance(converted_cinp, ops.Operation)
if converted_cinp != cinp:
some_control_input_converted = True
converted_control_ops.add(converted_cinp)
converted_control_ops = list(converted_control_ops)
is_stateful = _is_stateful_pfor_op(y_op)
else:
converted_inputs = []
converted_control_ops = []
logging.vlog(3, "converting op:%s\ninputs:%s\ncontrol_inputs:%s", y_op,
converted_inputs, converted_control_ops)
# 2. Convert y_op
# If converting a while_loop, we let the while_loop convertor deal with
# putting the control dependencies appropriately.
control_dependencies = [] if is_while_loop else converted_control_ops
with ops.control_dependencies(control_dependencies), ops.name_scope(
y_op.name + "/pfor/"):
# Op is a placeholder for a reduction.
if (self._pfor_config is not None and
self._pfor_config._lookup_reduction(y) is not None):
# Handle reductions. Map the placeholder to the unvectorized input
# that is being reduced.
reduction_input = self._pfor_config._lookup_reduction(y)
assert isinstance(reduction_input, ops.Tensor), reduction_input
# Tensor being reduced should already be converted due to a control
# dependency on the created placeholder.
# Note that in cases where reduction_input is in an outer context, one
# needs to locate the corresponding Enter node and use that to lookup
# the conversion.
# TODO(agarwal): handle reductions inside control flow constructs.
assert reduction_input in self._conversion_map, (
"Unable to handle reduction of %s, possibly as it was used "
"inside a control flow construct. Note that reductions across "
"pfor iterations are currently not supported inside control flow "
"constructs." % reduction_input)
output = self._conversion_map[reduction_input]
# If original input is not stacked, we tile it. Also we always mark
# output as unstacked.
new_outputs = [wrap(self._unwrap_or_tile(output), False)]
# None of the inputs and control inputs were converted.
elif ((not is_inside_loop or
(not is_stateful and not some_input_converted and
not some_control_input_converted)) and
y.graph == ops.get_default_graph()):
if y is y_op:
assert not isinstance(y_op, WhileOp)
new_outputs = y_op
else:
new_outputs = [wrap(x, False) for x in y_op.outputs]
elif not (is_stateful or is_while_loop or some_input_stacked):
# All inputs are unstacked or uncoverted but some control inputs are
# converted.
# TODO(rachelim): Handle the case where some inputs are sparsely
# stacked (i.e. any(x.is_sparse_stacked for x in converted_inputs))
new_op = _create_op(y_op.type, [x.t for x in converted_inputs],
[x.dtype for x in y_op.outputs],
y_op.node_def.attr)
if y is y_op:
new_outputs = new_op
else:
new_outputs = [wrap(x, False) for x in new_op.outputs]
else:
# Either some inputs are not loop invariant or op is stateful.
if hasattr(y_op, "pfor_converter"):
converter = y_op.pfor_converter
else:
converter = _pfor_converter_registry.get(y_op.type, None)
if converter is None:
if flags.FLAGS.op_conversion_fallback_to_while_loop:
converter = _fallback_converter
else:
raise ValueError(
"No converter defined for %s\n%s\ninputs: %s. "
"\nEither add a converter or set "
"--op_conversion_fallback_to_while_loop=True, "
"which may run slower" % (y_op.type, y_op, converted_inputs))
# TODO(rachelim): Handle the case where some inputs are sparsely
# stacked. We should only call the converter if it supports handling
# those inputs.
new_outputs = converter(_PforInput(self, y_op, converted_inputs))
if isinstance(new_outputs, WrappedTensor):
new_outputs = [new_outputs]
assert isinstance(new_outputs,
(list, tuple, ops.Operation)), new_outputs
logging.vlog(2, "converted %s %s", y_op, new_outputs)
# Insert into self._conversion_map
if y is y_op:
assert isinstance(new_outputs, ops.Operation)
self._add_conversion(y_op, new_outputs)
else:
assert len(y_op.outputs) == len(new_outputs), (
y_op, y_op.outputs, new_outputs)
for old_output, new_output in zip(y_op.outputs, new_outputs):
assert isinstance(new_output, WrappedTensor), (new_output, y, y_op)
self._add_conversion(old_output, new_output)
stack.pop(0)
return self._conversion_map[op_or_tensor]
@property
def loop_len_vector(self):
"""Returns a single element vector whose value is number of iterations."""
return self._loop_len_vector
@property
def loop_var(self):
"""Returns placeholder loop variable."""
return self._loop_var
@property
def pfor_ops(self):
return self._pfor_ops
@property
def pfor_config(self):
return self._pfor_config
@property
def all_indices_partitioned(self):
"""all_indices_partitioned property.
Returns:
True if we are inside a control flow construct and not all pfor iterations
may be active.
"""
return self._all_indices_partitioned
# The code below defines converters for different operations. Please see comment
# for RegisterPFor to see how converters should be defined.
# nn_ops
def _flatten_first_two_dims(x):
"""Merges first two dimensions."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([[-1], old_shape[2:]], axis=0)
return array_ops.reshape(x, new_shape)
def _unflatten_first_dim(x, first_dim):
"""Splits first dimension into [first_dim, -1]."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([first_dim, [-1], old_shape[1:]], axis=0)
return array_ops.reshape(x, new_shape)
def _inputs_with_flattening(pfor_input, input_indices):
"""Stacks and flattens first dim of inputs at indices `input_indices`."""
if input_indices is None:
input_indices = []
pfor_input.stack_inputs(stack_indices=input_indices)
inputs = []
for i in range(pfor_input.num_inputs):
if i in input_indices:
inp = pfor_input.stacked_input(i)
inp = _flatten_first_two_dims(inp)
else:
inp = pfor_input.unstacked_input(i)
inputs.append(inp)
return inputs
@RegisterPForWithArgs("Conv2D", dims=[0])
@RegisterPForWithArgs("AvgPool", dims=[0])
@RegisterPForWithArgs("MaxPool", dims=[0])
@RegisterPForWithArgs("MaxPool3D", dims=[0])
@RegisterPForWithArgs("MaxPool3DGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPoolGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPool3DGradGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPoolGradGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("SoftmaxCrossEntropyWithLogits", dims=[0, 1])
def _convert_flatten_batch(pfor_input, op_type, dims):
del op_type
inputs = _inputs_with_flattening(pfor_input, dims)
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs]
return [wrap(x, True) for x in outputs]
_channel_flatten_input_cache = {}
def _channel_flatten_input(x, data_format):
"""Merge the stack dimension with the channel dimension.
If S is pfor's stacking dimension, then,
- for SNCHW, we transpose to NSCHW. If N dimension has size 1, the transpose
should be cheap.
- for SNHWC, we transpose to NHWCS.
We then merge the S and C dimension.
Args:
x: ops.Tensor to transform.
data_format: "NCHW" or "NHWC".
Returns:
A 3-element tuple with the transformed value, along with the shape for
reshape and order for transpose required to transform back.
"""
graph = ops.get_default_graph()
cache_key = (graph, x.experimental_ref(), data_format)
if cache_key not in _channel_flatten_input_cache:
x_shape = array_ops.shape(x)
if data_format == b"NCHW":
order = [1, 0, 2, 3, 4]
shape = array_ops.concat([x_shape[1:2], [-1], x_shape[3:]], axis=0)
reverse_order = order
else:
order = [1, 2, 3, 0, 4]
shape = array_ops.concat([x_shape[1:4], [-1]], axis=0)
reverse_order = [3, 0, 1, 2, 4]
# Move S dimension next to C dimension.
x = array_ops.transpose(x, order)
reverse_shape = array_ops.shape(x)
# Reshape to merge the S and C dimension.
x = array_ops.reshape(x, shape)
outputs = x, reverse_order, reverse_shape
_channel_flatten_input_cache[cache_key] = outputs
else:
outputs = _channel_flatten_input_cache[cache_key]
return outputs
# Note that with training=True, running FusedBatchNormV3 on individual examples
# is very different from running FusedBatchNormV3 on a batch of those examples.
# This is because, for the latter case, the operation can be considered as first
# computing the mean and variance over all the examples and then using these
# to scale all those examples. This creates a data dependency between these
# different "iterations" since the inputs to the scaling step depends on the
# statistics coming from all these inputs.
# As with other kernels, the conversion here effectively runs the kernel
# independently for each iteration, and returns outputs by stacking outputs from
# each of those iterations.
@RegisterPFor("FusedBatchNormV3")
def _convert_fused_batch_norm(pfor_input):
is_training = pfor_input.get_attr("is_training")
# When BatchNorm is used with training=False, mean and variance are provided
# externally and used as is by the op. Thus, we can merge the S and N
# dimensions as we do for regular operations.
# When BatchNorm is used with training=True, mean and variance are computed
# for each channel across the batch dimension (first one). If we merge S and N
# dimensions, mean and variances will be computed over a larger set. So, we
# merge the S and C dimensions instead.
if not is_training:
# We return zeros for batch_mean and batch_variance output. Note that CPU
# and GPU seem to have different behavior for those two outputs. CPU outputs
# zero because these values are not used during inference. GPU outputs
# something, probably real means and variances.
inputs = _inputs_with_flattening(pfor_input, [0])
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
y = outputs[0]
n = pfor_input.pfor.loop_len_vector
y = _unflatten_first_dim(y, n)
mean = pfor_input.unstacked_input(3)
zeros = array_ops.zeros_like(mean)
return [wrap(y, True)] + [wrap(zeros, False)] * 5
pfor_input.stack_inputs()
data_format = pfor_input.get_attr("data_format")
# We merge the first dimension with the "C" dimension, run FusedBatchNormV3,
# and then transpose back.
x = pfor_input.stacked_input(0)
x, reverse_order, reverse_shape = _channel_flatten_input(x, data_format)
# Note that we stack all the other inputs as well so that they are the same
# size as the new size of the channel dimension.
inputs = [x] + [
array_ops.reshape(pfor_input.stacked_input(i), [-1])
for i in range(1, pfor_input.num_inputs)
]
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
y = outputs[0]
y = array_ops.reshape(y, reverse_shape)
y = array_ops.transpose(y, reverse_order)
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]]
outputs = [y] + outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("FusedBatchNormGradV3")
def _convert_fused_batch_norm_grad(pfor_input):
pfor_input.stack_inputs()
data_format = pfor_input.get_attr("data_format")
y_backprop = pfor_input.stacked_input(0)
y_backprop, _, _ = _channel_flatten_input(y_backprop, data_format)
x = pfor_input.stacked_input(1)
x, x_reverse_order, x_reverse_shape = _channel_flatten_input(x, data_format)
inputs = [y_backprop, x] + [
array_ops.reshape(pfor_input.stacked_input(i), [-1])
for i in range(2, pfor_input.num_inputs)
]
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
x_backprop = outputs[0]
x_backprop = array_ops.reshape(x_backprop, x_reverse_shape)
x_backprop = array_ops.transpose(x_backprop, x_reverse_order)
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]]
outputs = [x_backprop] + outputs
return [wrap(output, True) for output in outputs]
@RegisterPForWithArgs("Conv2DBackpropInput", flatten_dims=[2], shape_dim=0)
@RegisterPForWithArgs("AvgPoolGrad", flatten_dims=[1], shape_dim=0)
def _convert_flatten_batch_shape_input(pfor_input, op_type, flatten_dims,
shape_dim):
del op_type
inputs = _inputs_with_flattening(pfor_input, flatten_dims)
n = pfor_input.pfor.loop_len_vector
# Adjust the `input_sizes` input.
ones = array_ops.ones(
[array_ops.shape(inputs[shape_dim])[0] - 1], dtype=n.dtype)
inputs[shape_dim] *= array_ops.concat([n, ones], axis=0)
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
outputs = [_unflatten_first_dim(x, n) for x in outputs]
return [wrap(x, True) for x in outputs]
@RegisterPFor("Conv2DBackpropFilter")
def _convert_conv2d_backprop_filter(pfor_input):
pfor_input.stack_inputs(stack_indices=[2])
inputs, inputs_stacked, _ = pfor_input.input(0)
filter_sizes = pfor_input.unstacked_input(1)
grads = pfor_input.stacked_input(2)
strides = pfor_input.get_attr("strides")
padding = pfor_input.get_attr("padding")
use_cudnn_on_gpu = pfor_input.get_attr("use_cudnn_on_gpu")
data_format = pfor_input.get_attr("data_format")
dilations = pfor_input.get_attr("dilations")
if inputs_stacked:
# TODO(agarwal): Implement this efficiently.
logging.warn("Conv2DBackpropFilter uses a while_loop. Fix that!")
def while_body(i, ta):
inp_i = inputs[i, ...]
grad_i = grads[i, ...]
output = nn_ops.conv2d_backprop_filter(
inp_i,
filter_sizes,
grad_i,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations)
return i + 1, ta.write(i, array_ops.expand_dims(output, 0))
n = array_ops.reshape(pfor_input.pfor.loop_len_vector, [])
_, ta = control_flow_ops.while_loop(
lambda i, ta: i < n, while_body,
(0, tensor_array_ops.TensorArray(inputs.dtype, n)))
output = ta.concat()
return wrap(output, True)
else:
# We merge the stack dimension with the channel dimension of the gradients
# and pretend we had a larger filter (see change to filter_sizes below).
# Once the filter backprop is computed, we reshape and transpose back
# appropriately.
grads, _, _ = _channel_flatten_input(grads, data_format)
n = pfor_input.pfor.loop_len_vector
old_filter_sizes = filter_sizes
filter_sizes *= array_ops.concat([[1, 1, 1], n], axis=0)
output = nn_ops.conv2d_backprop_filter(
inputs,
filter_sizes,
grads,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations)
new_filter_shape = array_ops.concat([old_filter_sizes[:3], n, [-1]], axis=0)
output = array_ops.reshape(output, new_filter_shape)
output = array_ops.transpose(output, [3, 0, 1, 2, 4])
return wrap(output, True)
@RegisterPForWithArgs("LogSoftmax", gen_nn_ops.log_softmax)
@RegisterPForWithArgs("Softmax", gen_nn_ops.softmax)
def _convert_softmax(pfor_input, op_type, op_func):
del op_type
return wrap(op_func(pfor_input.stacked_input(0)), True)
# array_ops
@RegisterPForWithArgs("Identity", array_ops.identity)
@RegisterPForWithArgs("StopGradient", array_ops.stop_gradient)
@RegisterPForWithArgs("MatrixDiag", array_ops.matrix_diag)
@RegisterPForWithArgs("MatrixDiagPart", array_ops.matrix_diag_part)
def _convert_identity(pfor_input, op_type, op_func):
del op_type
return wrap(op_func(*[x.t for x in pfor_input.inputs]), True)
@RegisterPFor("IdentityN")
def _convert_identity_n(pfor_input):
outputs = array_ops.identity_n([x.t for x in pfor_input.inputs])
return [wrap(out, inp.is_stacked) for out, inp in
zip(outputs, pfor_input.inputs)]
@RegisterPFor("Reshape")
def _convert_reshape(pfor_input):
t = pfor_input.stacked_input(0)
shape = pfor_input.unstacked_input(1)
new_shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0)
return wrap(array_ops.reshape(t, new_shape), True)
@RegisterPFor("BroadcastTo")
def _convert_broadcast_to(pfor_input):
t = pfor_input.stacked_input(0)
shape = pfor_input.unstacked_input(1)
new_shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0)
# Expand dims of stacked t to broadcast against the new shape.
# TODO(davmre): consider factoring out common code with
# `expanddim_inputs_for_broadcast`, which has similar logic but with
# implicit shapes (of input Tensors) rather than explicit shapes.
rank_diff = array_ops.shape(new_shape)[0] - array_ops.rank(t)
ones = array_ops.tile([1], array_ops.reshape(rank_diff, [1]))
t_shape = array_ops.shape(t)
t_expanded_shape = array_ops.concat([t_shape[:1], ones, t_shape[1:]], axis=0)
return wrap(array_ops.broadcast_to(array_ops.reshape(t, t_expanded_shape),
new_shape), True)
@RegisterPFor("ExpandDims")
def _convert_expanddims(pfor_input):
t = pfor_input.stacked_input(0)
dim = pfor_input.unstacked_input(1)
dim += math_ops.cast(dim >= 0, dtypes.int32)
return wrap(array_ops.expand_dims(t, axis=dim), True)
@RegisterPForWithArgs("LowerBound", gen_array_ops.lower_bound)
@RegisterPForWithArgs("UpperBound", gen_array_ops.upper_bound)
def _convert_searchsorted(pfor_input, _, op_func):
pfor_input.stack_inputs()
sorted_inputs = _flatten_first_two_dims(pfor_input.stacked_input(0))
values = _flatten_first_two_dims(pfor_input.stacked_input(1))
out_type = pfor_input.get_attr("out_type")
output = op_func(sorted_inputs, values, out_type)
return wrap(_unflatten_first_dim(
output, pfor_input.pfor.loop_len_vector), True)
@RegisterPFor("MatrixBandPart")
def _convert_matrix_band_part(pfor_input):
t = pfor_input.stacked_input(0)
num_lower = pfor_input.unstacked_input(1)
num_upper = pfor_input.unstacked_input(2)
return wrap(array_ops.matrix_band_part(
t, num_lower=num_lower, num_upper=num_upper), True)
@RegisterPFor("MatrixSetDiag")
def _convert_matrix_set_diag(pfor_input):
pfor_input.stack_inputs()
t = pfor_input.stacked_input(0)
diag = pfor_input.stacked_input(1)
return wrap(array_ops.matrix_set_diag(t, diag), True)
# Registrations for MatrixDiagV2, MatrixDiagPartv2, and MatrixSetDiagV2.
# The input orders defined in the OpKernel and the actual python API are
# different (for compatibility with V1), so we cannot use _convert_identity.
@RegisterPFor("MatrixDiagV2")
def _convert_matrix_diag_v2(pfor_input):
diagonal = pfor_input.stacked_input(0)
k = pfor_input.unstacked_input(1)
num_rows = pfor_input.unstacked_input(2)
num_cols = pfor_input.unstacked_input(3)
padding_value = pfor_input.unstacked_input(4)
return wrap(
array_ops.matrix_diag(
diagonal,
k=k,
num_rows=num_rows,
num_cols=num_cols,
padding_value=padding_value), True)
# See notes for MatrixDiagV2
@RegisterPFor("MatrixDiagPartV2")
def _convert_matrix_diag_part_v2(pfor_input):
input = pfor_input.stacked_input(0) # pylint:disable=redefined-builtin
k = pfor_input.unstacked_input(1)
padding_value = pfor_input.unstacked_input(2)
return wrap(
array_ops.matrix_diag_part(input, k=k, padding_value=padding_value), True)
# See notes for MatrixDiagV2
@RegisterPFor("MatrixSetDiagV2")
def _convert_matrix_set_diag_v2(pfor_input):
pfor_input.stack_inputs([0, 1])
input = pfor_input.stacked_input(0) # pylint:disable=redefined-builtin
diagonal = pfor_input.stacked_input(1)
k = pfor_input.unstacked_input(2)
return wrap(array_ops.matrix_set_diag(input, diagonal, k=k), True)
@RegisterPFor("OneHot")
def _convert_one_hot(pfor_input):
indices = pfor_input.stacked_input(0)
depth = pfor_input.unstacked_input(1)
on_value = pfor_input.unstacked_input(2)
off_value = pfor_input.unstacked_input(3)
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
return wrap(
array_ops.one_hot(indices, depth, on_value, off_value, axis), True)
@RegisterPFor("Slice")
def _convert_slice(pfor_input):
t = pfor_input.stacked_input(0)
begin = pfor_input.unstacked_input(1)
size = pfor_input.unstacked_input(2)
begin = array_ops.concat([[0], begin], axis=0)
size = array_ops.concat([[-1], size], axis=0)
return wrap(array_ops.slice(t, begin, size), True)
@RegisterPFor("Tile")
def _convert_tile(pfor_input):
t = pfor_input.stacked_input(0)
multiples = pfor_input.unstacked_input(1)
multiples = array_ops.concat([[1], multiples], 0)
return wrap(array_ops.tile(t, multiples), True)
@RegisterPFor("Pack")
def _convert_pack(pfor_input):
pfor_input.stack_inputs()
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
return wrap(
array_ops.stack([x.t for x in pfor_input.inputs], axis=axis), True)
@RegisterPFor("Unpack")
def _convert_unpack(pfor_input):
value = pfor_input.stacked_input(0)
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
num = pfor_input.get_attr("num")
return [wrap(x, True) for x in array_ops.unstack(value, axis=axis, num=num)]
@RegisterPFor("Pad")
def _convert_pad(pfor_input):
t = pfor_input.stacked_input(0)
paddings = pfor_input.unstacked_input(1)
paddings = array_ops.concat([[[0, 0]], paddings], 0)
return wrap(array_ops.pad(t, paddings, mode="CONSTANT"), True)
@RegisterPFor("Split")
def _convert_split(pfor_input):
split_dim = pfor_input.unstacked_input(0)
t = pfor_input.stacked_input(1)
num_split = pfor_input.get_attr("num_split")
split_dim += math_ops.cast(split_dim >= 0, dtypes.int32)
return [wrap(x, True) for x in array_ops.split(t, num_split, axis=split_dim)]
@RegisterPFor("SplitV")
def _convert_split_v(pfor_input):
t = pfor_input.stacked_input(0)
splits = pfor_input.unstacked_input(1)
split_dim = pfor_input.unstacked_input(2)
split_dim += math_ops.cast(split_dim >= 0, dtypes.int32)
return [wrap(x, True) for x in array_ops.split(t, splits, axis=split_dim)]
@RegisterPFor("Squeeze")
def _convert_squeeze(pfor_input):
t = pfor_input.stacked_input(0)
squeeze_dims = pfor_input.get_attr("squeeze_dims")
squeeze_dims = [i + 1 if i >= 0 else i for i in squeeze_dims]
return wrap(array_ops.squeeze(t, axis=squeeze_dims), True)
@RegisterPFor("Transpose")
def _convert_transpose(pfor_input):
t = pfor_input.stacked_input(0)
perm = pfor_input.unstacked_input(1)
new_perm = array_ops.concat([[0], perm + 1], axis=0)
return wrap(array_ops.transpose(t, new_perm), True)
@RegisterPFor("ZerosLike")
def _convert_zeroslike(pfor_input):
t = pfor_input.stacked_input(0)
shape = array_ops.shape(t)[1:]
return wrap(array_ops.zeros(shape, dtype=t.dtype), False)
@RegisterPFor("Gather")
@RegisterPFor("GatherV2")
def _convert_gather(pfor_input):
param, param_stacked, _ = pfor_input.input(0)
indices, indices_stacked, _ = pfor_input.input(1)
op_type = pfor_input.op_type
if op_type == "Gather":
validate_indices = pfor_input.get_attr("validate_indices")
axis = 0
else:
validate_indices = None
axis = pfor_input.unstacked_input(2)
axis_value = tensor_util.constant_value(axis)
if axis_value is not None:
axis = axis_value
if indices_stacked and not param_stacked:
if indices is pfor_input.pfor.all_indices and axis == 0:
param_shape0 = param.shape.dims[0].value
indices_shape0 = indices.shape.dims[0].value
if param_shape0 is not None and indices_shape0 == param_shape0:
# Note that with loops and conditionals, indices may not be contiguous.
# However they will be sorted and unique. So if the shape matches, then
# it must be picking up all the rows of param.
return wrap(param, True)
# TODO(agarwal): use array_ops.slice here.
output = array_ops.gather(
param, indices, validate_indices=validate_indices, axis=axis)
if axis != 0:
axis = control_flow_ops.cond(
axis < 0, lambda: axis + array_ops.rank(param), lambda: axis)
order = array_ops.concat(
[[axis],
math_ops.range(axis),
math_ops.range(axis + 1, array_ops.rank(output))],
axis=0)
output = control_flow_ops.cond(
math_ops.equal(axis, 0), lambda: output,
lambda: array_ops.transpose(output, order))
return wrap(output, True)
if param_stacked:
loop_len_vector = pfor_input.pfor.loop_len_vector
pfor_input.stack_inputs(stack_indices=[1])
indices = pfor_input.stacked_input(1)
param_flat = _flatten_first_two_dims(param)
# Recompute indices to handle stacked param.
indices_offset = math_ops.range(
loop_len_vector[0]) * array_ops.shape(param)[1]
# Reshape indices_offset to allow broadcast addition
ones = array_ops.ones([array_ops.rank(indices) - 1], dtype=dtypes.int32)
new_shape = array_ops.concat([loop_len_vector, ones], axis=0)
indices_offset = array_ops.reshape(indices_offset, new_shape)
indices += indices_offset
# TODO(agarwal): handle axis != 0. May need to transpose param or
# array_ops.gather_nd.
if isinstance(axis, ops.Tensor):
axis_value = tensor_util.constant_value(axis)
else:
try:
axis_value = int(axis)
except TypeError:
axis_value = None
msg = ("Gather, where indices and param are both loop dependent, currently "
"requires axis=0")
if axis_value is not None and axis_value != 0:
raise ValueError("Error while converting %s. %s. Got axis=%d" %
(pfor_input.op, msg, axis))
with ops.control_dependencies(
[check_ops.assert_equal(axis, 0, message=msg)]):
output = array_ops.gather(param_flat, indices)
return wrap(output, True)
@RegisterPFor("ConcatV2")
def _convert_concatv2(pfor_input):
n = pfor_input.num_inputs
pfor_input.stack_inputs(stack_indices=range(n - 1))
axis = pfor_input.unstacked_input(n - 1)
axis += math_ops.cast(axis >= 0, axis.dtype)
return wrap(
array_ops.concat([x.t for x in pfor_input.inputs[:n - 1]], axis=axis),
True)
@RegisterPFor("StridedSlice")
def _convert_strided_slice(pfor_input):
inp = pfor_input.stacked_input(0)
begin = pfor_input.unstacked_input(1)
end = pfor_input.unstacked_input(2)
strides = pfor_input.unstacked_input(3)
begin_mask = pfor_input.get_attr("begin_mask")
end_mask = pfor_input.get_attr("end_mask")
ellipsis_mask = pfor_input.get_attr("ellipsis_mask")
new_axis_mask = pfor_input.get_attr("new_axis_mask")
shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask")
begin = array_ops.concat([[0], begin], axis=0)
end = array_ops.concat([[0], end], axis=0)
strides = array_ops.concat([[1], strides], axis=0)
begin_mask = begin_mask << 1 | 1
end_mask = end_mask << 1 | 1
ellipsis_mask <<= 1
new_axis_mask <<= 1
shrink_axis_mask <<= 1
return wrap(
array_ops.strided_slice(
inp,
begin,
end,
strides,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask), True)
@RegisterPFor("StridedSliceGrad")
def _convert_strided_slice_grad(pfor_input):
shape = pfor_input.unstacked_input(0)
begin = pfor_input.unstacked_input(1)
end = pfor_input.unstacked_input(2)
strides = pfor_input.unstacked_input(3)
dy = pfor_input.stacked_input(4)
begin_mask = pfor_input.get_attr("begin_mask")
end_mask = pfor_input.get_attr("end_mask")
ellipsis_mask = pfor_input.get_attr("ellipsis_mask")
new_axis_mask = pfor_input.get_attr("new_axis_mask")
shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask")
shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0)
begin = array_ops.concat([[0], begin], axis=0)
end = array_ops.concat([[0], end], axis=0)
strides = array_ops.concat([[1], strides], axis=0)
begin_mask = begin_mask << 1 | 1
end_mask = end_mask << 1 | 1
ellipsis_mask <<= 1
new_axis_mask <<= 1
shrink_axis_mask <<= 1
return wrap(
array_ops.strided_slice_grad(
shape,
begin,
end,
strides,
dy,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask), True)
# math_ops
@RegisterPFor("MatMul")
def _convert_matmul(pfor_input):
# TODO(agarwal): Check if tiling is faster than two transposes.
a, a_stacked, _ = pfor_input.input(0)
b, b_stacked, _ = pfor_input.input(1)
tr_a = pfor_input.get_attr("transpose_a")
tr_b = pfor_input.get_attr("transpose_b")
if a_stacked and b_stacked:
output = wrap(math_ops.matmul(a, b, adjoint_a=tr_a, adjoint_b=tr_b), True)
return output
elif a_stacked:
if tr_a:
a = array_ops.transpose(a, [0, 2, 1])
if a.shape.is_fully_defined():
x, y, z = a.shape
else:
x, y, z = [
array_ops.reshape(i, [])
for i in array_ops.split(array_ops.shape(a), 3)
]
a = array_ops.reshape(a, [x * y, z])
prod = math_ops.matmul(a, b, transpose_b=tr_b)
return wrap(array_ops.reshape(prod, [x, y, -1]), True)
else:
assert b_stacked
if tr_b:
perm = [2, 0, 1]
b = array_ops.transpose(b, perm)
else:
# As an optimization, if one of the first two dimensions is 1, then we can
# reshape instead of transpose.
# TODO(agarwal): This check can be done inside Transpose kernel.
b_shape = array_ops.shape(b)
min_dim = math_ops.minimum(b_shape[0], b_shape[1])
perm = control_flow_ops.cond(
math_ops.equal(min_dim, 1), lambda: [0, 1, 2], lambda: [1, 0, 2])
new_shape = array_ops.stack([b_shape[1], b_shape[0], b_shape[2]])
b = array_ops.transpose(b, perm)
b = array_ops.reshape(b, new_shape)
if b.shape.is_fully_defined():
x, y, z = b.shape
else:
x, y, z = [
array_ops.reshape(i, [])
for i in array_ops.split(array_ops.shape(b), 3)
]
b = array_ops.reshape(b, [x, y * z])
prod = math_ops.matmul(a, b, transpose_a=tr_a)
prod = array_ops.reshape(prod, [-1, y, z])
prod = array_ops.transpose(prod, [1, 0, 2])
return wrap(prod, True)
# TODO(rmlarsen): Use the converter of BatchMatMulV2 once compatibility window
# is met.
@RegisterPFor("BatchMatMul")
def _convert_batch_mat_mul(pfor_input):
# TODO(agarwal): There may be a more efficient way to do this instead of
# stacking the inputs.
pfor_input.stack_inputs()
x = pfor_input.stacked_input(0)
y = pfor_input.stacked_input(1)
adj_x = pfor_input.get_attr("adj_x")
adj_y = pfor_input.get_attr("adj_y")
x = _flatten_first_two_dims(x)
y = _flatten_first_two_dims(y)
output = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
output = _unflatten_first_dim(output, pfor_input.pfor.loop_len_vector)
return wrap(output, True)
@RegisterPFor("BatchMatMulV2")
def _convert_batch_mat_mul_v2(pfor_input):
pfor_input.expanddim_inputs_for_broadcast()
x = pfor_input.input(0)[0]
y = pfor_input.input(1)[0]
adj_x = pfor_input.get_attr("adj_x")
adj_y = pfor_input.get_attr("adj_y")
output = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
return wrap(output, True)
@RegisterPForWithArgs("Sum", math_ops.reduce_sum)
@RegisterPForWithArgs("Prod", math_ops.reduce_prod)
@RegisterPForWithArgs("Max", math_ops.reduce_max)
@RegisterPForWithArgs("Min", math_ops.reduce_min)
@RegisterPForWithArgs("Mean", math_ops.reduce_mean)
@RegisterPForWithArgs("All", math_ops.reduce_all)
@RegisterPForWithArgs("Any", math_ops.reduce_any)
def _convert_reduction(pfor_input, _, op_func):
t = pfor_input.stacked_input(0)
indices = pfor_input.unstacked_input(1)
# Shift positive indices by one to account for the extra dimension.
indices += math_ops.cast(indices >= 0, dtypes.int32)
keep_dims = pfor_input.get_attr("keep_dims")
return wrap(op_func(t, indices, keepdims=keep_dims), True)
@RegisterPForWithArgs("Cumsum", math_ops.cumsum)
@RegisterPForWithArgs("Cumprod", math_ops.cumprod)
def _convert_cumfoo(pfor_input, _, op_func):
t = pfor_input.stacked_input(0)
axis = pfor_input.unstacked_input(1)
# Shift positive indices by one to account for the extra dimension.
axis += math_ops.cast(axis >= 0, dtypes.int32)
exclusive = pfor_input.get_attr("exclusive")
reverse = pfor_input.get_attr("reverse")
return wrap(op_func(t, axis, exclusive=exclusive, reverse=reverse), True)
@RegisterPFor("BiasAdd")
def _convert_biasadd(pfor_input):
t, t_stacked, _ = pfor_input.input(0)
bias, bias_stacked, _ = pfor_input.input(1)
data_format = pfor_input.get_attr("data_format").decode()
if bias_stacked:
# BiasAdd only supports 1-D biases, so cast bias to match value and use Add.
pfor_input.expanddim_inputs_for_broadcast()
t, _, _ = pfor_input.input(0)
bias = math_ops.cast(pfor_input.stacked_input(1), t.dtype)
if compat.as_bytes(data_format) == b"NCHW":
b_shape = array_ops.shape(bias)
new_b_shape = array_ops.concat(
[b_shape[:-3], b_shape[-1:], b_shape[-3:-1]], axis=0)
bias = array_ops.reshape(bias, new_b_shape)
return wrap(math_ops.add(t, bias), True)
else:
assert t_stacked, "At least one input to BiasAdd should be loop variant."
if compat.as_bytes(data_format) == b"NCHW":
shape = array_ops.shape(t)
flattened_shape = array_ops.concat([[-1], shape[2:]], axis=0)
t = array_ops.reshape(t, flattened_shape)
t = nn_ops.bias_add(t, bias, data_format="NCHW")
t = array_ops.reshape(t, shape)
return wrap(t, True)
return wrap(nn_ops.bias_add(t, bias, data_format=data_format), True)
@RegisterPFor("UnsortedSegmentSum")
def _convert_unsortedsegmentsum(pfor_input):
pfor_input.stack_inputs([0, 1])
data = pfor_input.stacked_input(0)
segment_ids = pfor_input.stacked_input(1)
# TODO(agarwal): handle stacked?
num_segments = pfor_input.unstacked_input(2)
if segment_ids.dtype != num_segments.dtype:
segment_ids = math_ops.cast(segment_ids, dtypes.int64)
num_segments = math_ops.cast(num_segments, dtypes.int64)
dtype = segment_ids.dtype
segment_shape = array_ops.shape(segment_ids, out_type=dtype)
n = segment_shape[0]
ones = array_ops.ones_like(segment_shape, dtype=dtype)[1:]
segment_offset = num_segments * math_ops.range(n, dtype=dtype)
segment_offset = array_ops.reshape(segment_offset,
array_ops.concat([[n], ones], axis=0))
segment_ids += segment_offset
num_segments = math_ops.cast(num_segments, dtypes.int64) * math_ops.cast(
n, dtypes.int64)
output = math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
new_output_shape = array_ops.concat(
[[n, -1], array_ops.shape(output)[1:]], axis=0)
output = array_ops.reshape(output, new_output_shape)
return wrap(output, True)
@RegisterPFor("Cast")
def _convert_cast(pfor_input):
inp = pfor_input.stacked_input(0)
dtype = pfor_input.get_attr("DstT")
return wrap(math_ops.cast(inp, dtype), True)
@RegisterPForWithArgs("Abs", math_ops.abs)
@RegisterPForWithArgs("Acos", math_ops.acos)
@RegisterPForWithArgs("Acosh", math_ops.acosh)
@RegisterPForWithArgs("Add", math_ops.add)
@RegisterPForWithArgs("AddV2", math_ops.add_v2)
@RegisterPForWithArgs("Angle", math_ops.angle)
@RegisterPForWithArgs("Asin", math_ops.asin)
@RegisterPForWithArgs("Asinh", math_ops.asinh)
@RegisterPForWithArgs("Atan", math_ops.atan)
@RegisterPForWithArgs("Atan2", math_ops.atan2)
@RegisterPForWithArgs("Atanh", math_ops.atanh)
@RegisterPForWithArgs("BesselI0e", math_ops.bessel_i0e)
@RegisterPForWithArgs("BesselI1e", math_ops.bessel_i1e)
@RegisterPForWithArgs("BitwiseAnd", bitwise_ops.bitwise_and)
@RegisterPForWithArgs("BitwiseOr", bitwise_ops.bitwise_or)
@RegisterPForWithArgs("BitwiseXor", bitwise_ops.bitwise_xor)
@RegisterPForWithArgs("Ceil", math_ops.ceil)
@RegisterPForWithArgs("Complex", math_ops.complex)
@RegisterPForWithArgs("ComplexAbs", math_ops.complex_abs)
@RegisterPForWithArgs("Conj", math_ops.conj)
@RegisterPForWithArgs("Cos", math_ops.cos)
@RegisterPForWithArgs("Cosh", math_ops.cosh)
@RegisterPForWithArgs("Digamma", math_ops.digamma)
@RegisterPForWithArgs("Div", math_ops.div)
@RegisterPForWithArgs("DivNoNan", math_ops.div_no_nan)
@RegisterPForWithArgs("Elu", nn_ops.elu)
@RegisterPForWithArgs("Erf", math_ops.erf)
@RegisterPForWithArgs("Erfc", math_ops.erfc)
@RegisterPForWithArgs("Exp", math_ops.exp)
@RegisterPForWithArgs("Expm1", math_ops.expm1)
@RegisterPForWithArgs("Floor", math_ops.floor)
@RegisterPForWithArgs("FloorDiv", math_ops.floor_div)
@RegisterPForWithArgs("FloorMod", math_ops.floor_mod)
@RegisterPForWithArgs("Greater", math_ops.greater)
@RegisterPForWithArgs("GreaterEqual", math_ops.greater_equal)
@RegisterPForWithArgs("Igamma", math_ops.igamma)
@RegisterPForWithArgs("IgammaGradA", math_ops.igamma_grad_a)
@RegisterPForWithArgs("Igammac", math_ops.igammac)
@RegisterPForWithArgs("Imag", math_ops.imag)
@RegisterPForWithArgs("Inv", math_ops.inv)
@RegisterPForWithArgs("Invert", bitwise_ops.invert)
@RegisterPForWithArgs("IsFinite", math_ops.is_finite)
@RegisterPForWithArgs("IsInf", math_ops.is_inf)
@RegisterPForWithArgs("IsNan", math_ops.is_nan)
@RegisterPForWithArgs("LeftShift", bitwise_ops.left_shift)
@RegisterPForWithArgs("Less", math_ops.less)
@RegisterPForWithArgs("LessEqual", math_ops.less_equal)
@RegisterPForWithArgs("Lgamma", math_ops.lgamma)
@RegisterPForWithArgs("Log", math_ops.log)
@RegisterPForWithArgs("Log1p", math_ops.log1p)
@RegisterPForWithArgs("LogicalAnd", math_ops.logical_and)
@RegisterPForWithArgs("LogicalNot", math_ops.logical_not)
@RegisterPForWithArgs("LogicalOr", math_ops.logical_or)
@RegisterPForWithArgs("LogicalXor", math_ops.logical_xor)
@RegisterPForWithArgs("Maximum", math_ops.maximum)
@RegisterPForWithArgs("Minimum", math_ops.minimum)
@RegisterPForWithArgs("Mod", math_ops.mod)
@RegisterPForWithArgs("Mul", math_ops.multiply)
@RegisterPForWithArgs("MulNoNan", math_ops.mul_no_nan)
@RegisterPForWithArgs("Neg", math_ops.negative)
@RegisterPForWithArgs("Polygamma", math_ops.polygamma)
@RegisterPForWithArgs("Pow", math_ops.pow)
@RegisterPForWithArgs("Real", math_ops.real)
@RegisterPForWithArgs("RealDiv", math_ops.divide)
@RegisterPForWithArgs("Reciprocal", math_ops.reciprocal)
@RegisterPForWithArgs("Relu", nn_ops.relu)
@RegisterPForWithArgs("Relu6", nn_ops.relu6)
@RegisterPForWithArgs("RightShift", bitwise_ops.right_shift)
@RegisterPForWithArgs("Rint", math_ops.rint)
@RegisterPForWithArgs("Round", math_ops.round)
@RegisterPForWithArgs("Rsqrt", math_ops.rsqrt)
@RegisterPForWithArgs("Selu", nn_ops.selu)
@RegisterPForWithArgs("Sigmoid", math_ops.sigmoid)
@RegisterPForWithArgs("Sign", math_ops.sign)
@RegisterPForWithArgs("Sin", math_ops.sin)
@RegisterPForWithArgs("Sinh", math_ops.sinh)
@RegisterPForWithArgs("Softplus", nn_ops.softplus)
@RegisterPForWithArgs("Softsign", nn_ops.softsign)
@RegisterPForWithArgs("Sqrt", math_ops.sqrt)
@RegisterPForWithArgs("Square", math_ops.square)
@RegisterPForWithArgs("SquaredDifference", math_ops.squared_difference)
@RegisterPForWithArgs("Sub", math_ops.subtract)
@RegisterPForWithArgs("Tan", math_ops.tan)
@RegisterPForWithArgs("Tanh", math_ops.tanh)
@RegisterPForWithArgs("TruncateDiv", math_ops.truncate_div)
@RegisterPForWithArgs("TruncateMod", math_ops.truncate_mod)
@RegisterPForWithArgs("Xdivy", math_ops.xdivy)
@RegisterPForWithArgs("Xlogy", math_ops.xlogy)
@RegisterPForWithArgs("Zeta", math_ops.zeta)
def _convert_cwise(pfor_input, op_type, op_func):
# Note that ops handled here do not have attributes except those listed below
# and hence don't need extra arguments passed to the cwise_op call below.
for attr in pfor_input.op.node_def.attr.keys():
assert attr in [u"T", u"Tout", u"_xla_compile_id"], (op_type, attr)
pfor_input.expanddim_inputs_for_broadcast()
return wrap(op_func(*[x.t for x in pfor_input.inputs]), True)
@RegisterPFor("Equal")
def _convert_equal(pfor_input):
pfor_input.expanddim_inputs_for_broadcast()
x = pfor_input.input(0)[0]
y = pfor_input.input(1)[0]
incompatible_shape_error = pfor_input.get_attr("incompatible_shape_error")
assert incompatible_shape_error
return wrap(math_ops.equal(x, y), True)
@RegisterPFor("NotEqual")
def _convert_not_equal(pfor_input):
pfor_input.expanddim_inputs_for_broadcast()
x = pfor_input.input(0)[0]
y = pfor_input.input(1)[0]
incompatible_shape_error = pfor_input.get_attr("incompatible_shape_error")
assert incompatible_shape_error
return wrap(math_ops.not_equal(x, y), True)
@RegisterPFor("ApproximateEqual")
def _convert_approximate_equal(pfor_input):
pfor_input.expanddim_inputs_for_broadcast()
x = pfor_input.input(0)[0]
y = pfor_input.input(1)[0]
tolerance = pfor_input.get_attr("tolerance")
return wrap(math_ops.approximate_equal(x, y, tolerance=tolerance), True)
@RegisterPFor("Shape")
def _convert_shape(pfor_input):
out_type = pfor_input.get_attr("out_type")
return wrap(
array_ops.shape(pfor_input.stacked_input(0), out_type=out_type)[1:],
False)
@RegisterPFor("ShapeN")
def _convert_shape_n(pfor_input):
out_type = pfor_input.get_attr("out_type")
shapes = [
array_ops.shape(x, out_type=out_type)[1:]
if stacked else array_ops.shape(x) for x, stacked, _ in pfor_input.inputs
]
return [wrap(x, False) for x in shapes]
@RegisterPFor("Size")
def _convert_size(pfor_input):
out_type = pfor_input.get_attr("out_type")
n = math_ops.cast(pfor_input.pfor.loop_len_vector[0], out_type)
return wrap(
array_ops.size(pfor_input.stacked_input(0), out_type=out_type) // n,
False)
@RegisterPFor("Rank")
def _convert_rank(pfor_input):
return wrap(array_ops.rank(pfor_input.stacked_input(0)) - 1, False)
@RegisterPFor("AddN")
def _convert_addn(pfor_input):
# AddN does not support broadcasting.
pfor_input.stack_inputs()
return wrap(math_ops.add_n([x.t for x in pfor_input.inputs]), True)
@RegisterPFor("Cross")
def _convert_cross(pfor_input):
pfor_input.stack_inputs()
a = pfor_input.stacked_input(0)
b = pfor_input.stacked_input(1)
return wrap(math_ops.cross(a, b), True)
@RegisterPFor("BiasAddGrad")
def _convert_biasaddgrad(pfor_input):
grad = pfor_input.stacked_input(0)
fmt = pfor_input.get_attr("data_format")
if fmt == b"NCHW":
output = math_ops.reduce_sum(grad, axis=[1, 3, 4], keepdims=False)
else:
grad_shape = array_ops.shape(grad)
last_dim_shape = grad_shape[-1]
first_dim_shape = grad_shape[0]
output = array_ops.reshape(grad, [first_dim_shape, -1, last_dim_shape])
output = math_ops.reduce_sum(output, axis=[1], keepdims=False)
return wrap(output, True)
# Some required ops are not exposed under the tf namespace. Hence relying on
# _create_op to create them.
@RegisterPForWithArgs("EluGrad")
@RegisterPForWithArgs("Relu6Grad")
@RegisterPForWithArgs("ReluGrad")
@RegisterPForWithArgs("SeluGrad")
@RegisterPForWithArgs("SigmoidGrad")
@RegisterPForWithArgs("SoftplusGrad")
@RegisterPForWithArgs("SoftsignGrad")
@RegisterPForWithArgs("TanhGrad")
@RegisterPForWithArgs("SqrtGrad")
@RegisterPForWithArgs("RsqrtGrad")
@RegisterPForWithArgs("ReciprocalGrad")
def _convert_grads(pfor_input, op_type, *args, **kw_args):
del args
del kw_args
# TODO(agarwal): Looks like these ops don't support broadcasting. Hence we
# have to use tiling here.
pfor_input.stack_inputs()
outputs = _create_op(
op_type, [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("Select")
def _convert_select(pfor_input):
pfor_input.stack_inputs()
cond = pfor_input.stacked_input(0)
t = pfor_input.stacked_input(1)
e = pfor_input.stacked_input(2)
cond_rank = array_ops.rank(cond)
cond, t, e = control_flow_ops.cond(
cond_rank > 1, lambda: _inputs_with_flattening(pfor_input, [0, 1, 2]),
lambda: [cond, t, e])
outputs = _create_op(
pfor_input.op_type, [cond, t, e], [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
n = pfor_input.pfor.loop_len_vector
out = control_flow_ops.cond(cond_rank > 1,
lambda: _unflatten_first_dim(outputs[0], n),
lambda: outputs[0])
return [wrap(out, True) for x in outputs]
@RegisterPFor("SelectV2")
def _convert_selectv2(pfor_input):
pfor_input.expanddim_inputs_for_broadcast()
cond = pfor_input.input(0)[0]
t = pfor_input.input(1)[0]
e = pfor_input.input(2)[0]
out = array_ops.where_v2(cond, t, e)
return wrap(out, True)
# random_ops
def _transpose_dim_to_front(x, dim):
rank = array_ops.rank(x)
return array_ops.transpose(
x,
perm=array_ops.concat([
[dim],
math_ops.range(0, dim),
math_ops.range(dim + 1, rank)], axis=0))
@RegisterPForWithArgs("RandomUniform")
@RegisterPForWithArgs("RandomUniformInt")
@RegisterPForWithArgs("RandomStandardNormal")
@RegisterPForWithArgs("TruncatedNormal")
def _convert_random(pfor_input, op_type, *args, **kw_args):
del args
del kw_args
inputs = [pfor_input.unstacked_input(i) for i in range(pfor_input.num_inputs)]
# inputs[0] is "shape"
inputs[0] = array_ops.concat(
[pfor_input.pfor.loop_len_vector, inputs[0]], axis=0)
logging.warning(
"Note that %s inside pfor op may not give same output as "
"inside a sequential loop.", op_type)
outputs = _create_op(
op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("RandomGamma")
@RegisterPFor("RandomPoissonV2")
def _convert_random_with_param(pfor_input):
shape = pfor_input.unstacked_input(0)
# param is lam (Poisson rate) or alpha (Gamma shape).
param, param_stacked, _ = pfor_input.input(1)
logging.warning(
"Note that %s inside pfor op may not give same output as "
"inside a sequential loop.", pfor_input.op_type)
if param_stacked:
samples = _create_op(
pfor_input.op_type,
inputs=[shape, param],
op_dtypes=[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs[0]
loop_dim = array_ops.shape(shape)[0]
stacked_samples = _transpose_dim_to_front(samples, loop_dim)
else:
shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0)
stacked_samples = _create_op(
pfor_input.op_type,
inputs=[shape, param],
op_dtypes=[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs[0]
return wrap(stacked_samples, True)
@RegisterPFor("Multinomial")
def _convert_multinomial(pfor_input):
logits, logits_stacked, _ = pfor_input.input(0)
num_samples = pfor_input.unstacked_input(1)
seed = pfor_input.get_attr("seed")
seed2 = pfor_input.get_attr("seed2")
output_dtype = pfor_input.get_attr("output_dtype")
logging.warning(
"Note that Multinomial inside pfor op may not give same output as "
"inside a sequential loop.")
n = pfor_input.pfor.loop_len_vector[0]
if logits_stacked:
flattened_logits = _flatten_first_two_dims(logits)
samples = gen_random_ops.multinomial(
flattened_logits,
num_samples,
seed=seed, seed2=seed2, output_dtype=output_dtype)
stacked_samples = _unflatten_first_dim(samples, [n])
else:
samples = gen_random_ops.multinomial(
logits, num_samples * n,
seed=seed, seed2=seed2, output_dtype=output_dtype)
stacked_samples = array_ops.transpose(
array_ops.reshape(samples, [-1, n, num_samples]), [1, 0, 2])
return wrap(stacked_samples, True)
# linalg_ops
@RegisterPFor("Cholesky")
def _convert_cholesky(pfor_input):
t = pfor_input.stacked_input(0)
return wrap(linalg_ops.cholesky(t), True)
@RegisterPFor("LogMatrixDeterminant")
def _convert_log_matrix_determinant(pfor_input):
# Input must have shape [N, M, M], so we need to flatten.
t = _flatten_first_two_dims(pfor_input.stacked_input(0))
sign, log_abs_det = linalg_ops.log_matrix_determinant(t)
return [wrap(_unflatten_first_dim(x, pfor_input.pfor.loop_len_vector), True)
for x in (sign, log_abs_det)]
@RegisterPFor("MatrixTriangularSolve")
def _convert_matrix_triangular_solve(pfor_input):
pfor_input.stack_inputs()
matrix = pfor_input.stacked_input(0)
rhs = pfor_input.stacked_input(1)
lower = pfor_input.get_attr("lower")
adjoint = pfor_input.get_attr("adjoint")
output = linalg_ops.matrix_triangular_solve(
matrix, rhs, lower=lower, adjoint=adjoint)
return wrap(output, True)
# logging_ops
@RegisterPFor("Assert")
def _convert_assert(pfor_input):
cond, cond_stacked, _ = pfor_input.input(0)
if cond_stacked:
cond = math_ops.reduce_all(cond)
data_list = [x.t for x in pfor_input.inputs][1:]
return _create_op("Assert", [cond] + data_list, [],
attrs=pfor_input.op.node_def.attr)
@RegisterPFor("Print")
def _convert_print(pfor_input):
# Note that we don't stack all the inputs. Hence unstacked values are printed
# once here vs multiple times in a while_loop.
pfor_input.stack_inputs([0])
outputs = _create_op(
"Print", [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
# data_flow_ops
# TensorArray conversion is tricky since we don't support arrays of
# TensorArrays. For converting them, we consider two distinct cases:
#
# 1. The array is constructed outside the pfor call, and read/written inside the
# loop.
# This is an easier case since we don't need to make an array of TensorArrays.
# A correctness requirement is that these parallel iterations shouldn't attempt
# to write to the same location. Hence at conversion time we disallow indices to
# be loop-invariant as that would guarantee a collision. Even if the indices are
# not loop-invariant, they could conflict and that shall trigger runtime errors.
#
# 2. The array is constructed and used entirely inside each pfor iteration.
# For simplicity, here we require that the indices used for write/scatter are
# "unstacked". Otherwise it becomes hard to merge the TensorArrays created in
# different pfor iterations. We consider two sub_cases:
#
# 2a Elements written to the array are "stacked"
# To simulate multiple TensorArrays, we may increase the dimension of each
# element of the array. i.e. the i_th row of the j_th entry of the converted
# TensorArray corresponds to the j_th entry of the TensorArray in the i_th
# pfor iteration.
#
# 2b Elements written to the array are "unstacked"
# In this case we don't increase the dimensions to avoid redundant tiling. Each
# iteration is trying to write the same value. So we convert that to a single
# write.
#
# Here are some tricks used to implement the above:
# - TensorArrayV3 constructor encodes the element shape as an attr. Instead of
# trying to trace whether future writes are stacked or unstacked in order to set
# this attr, we set it to correspond to unknown shape.
# - We use the "flow" output of the different ops to track whether the array
# elements are stacked or unstacked. If a stacked write/scatter is done, we make
# the flow stacked as well.
# - We use some heuristic traversal of the graph to track whether the
# TensorArray handle was created inside or outside the pfor loop.
@RegisterPFor("TensorArrayV3")
def _convert_tensor_array_v3(pfor_input):
size = pfor_input.unstacked_input(0)
dtype = pfor_input.get_attr("dtype")
dynamic_size = pfor_input.get_attr("dynamic_size")
clear_after_read = pfor_input.get_attr("clear_after_read")
identical_element_shapes = pfor_input.get_attr("identical_element_shapes")
tensor_array_name = pfor_input.get_attr("tensor_array_name")
handle, flow = data_flow_ops.tensor_array_v3(
size,
dtype=dtype,
# We don't set element shape since we don't know if writes are stacked or
# not yet.
element_shape=None,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
identical_element_shapes=identical_element_shapes,
tensor_array_name=tensor_array_name)
# Note we keep flow unstacked for now since we don't know if writes will be
# stacked or not.
return wrap(handle, False), wrap(flow, False)
@RegisterPFor("TensorArraySizeV3")
def _convert_tensor_array_size_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
flow, flow_stacked, _ = pfor_input.input(1)
if flow_stacked:
flow = _unstack_flow(flow)
size = data_flow_ops.tensor_array_size_v3(handle, flow)
return wrap(size, False)
def _handle_inside_pfor(pfor_input, handle):
"""Returns True if handle was created inside the pfor loop."""
# We use some heuristic to find the original TensorArray creation op.
# The logic should handle the common cases (except cond based subgraphs).
# In theory the user could perform different operations on the handle (like
# Reshape, stack multiple handles, etc) which could break this logic.
# TODO(agarwal): handle Switch/Merge.
while handle.op.type in ("Enter", "Identity"):
handle = handle.op.inputs[0]
if handle.op.type not in [
"TensorArrayV3", "TensorArrayGradV3", "TensorArrayGradWithShape"]:
raise ValueError("Unable to find source for handle %s" % handle)
else:
return pfor_input.pfor.op_is_inside_loop(handle.op)
def _unstack_flow(value):
# TODO(agarwal): consider looking if this is a Tile op then get its input.
# This may avoid running the Tile operations.
return array_ops.gather(value, 0)
@RegisterPFor("TensorArrayReadV3")
def _convert_tensor_array_read_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
index, index_stacked, _ = pfor_input.input(1)
dtype = pfor_input.get_attr("dtype")
flow, flow_stacked, _ = pfor_input.input(2)
if flow_stacked:
flow = _unstack_flow(flow)
is_inside_pfor = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside_pfor:
# Note that if we are inside a control flow construct inside the pfor, and
# only some of the iterations are doing the read (i.e.
# `all_indices_partitioned` is True), then the read operation should only
# return values for the currently active pfor iterations (`all_indices`
# below). Hence, whenever the returned value is stacked (i.e. `flow` is
# stacked), we may need to do an extra gather after reading the values. Also
# note that if `is_inside` is false, then values in the tensor array are
# unstacked. So the check is only needed in this branch.
all_indices = pfor_input.pfor.all_indices
all_indices_partitioned = pfor_input.pfor.all_indices_partitioned
# Note: flow_stacked indicates if values in the TensorArray are stacked or
# not.
if index_stacked:
if flow_stacked:
raise ValueError(
"It looks like TensorArrayReadV3 was called on a TensorArray whose"
" values are not loop-invariant, and the read indices were also"
" not loop invariant. This is currently unsupported.")
value = data_flow_ops.tensor_array_gather_v3(
handle, index, flow, dtype=dtype)
return wrap(value, True)
value = data_flow_ops.tensor_array_read_v3(
handle, index, flow, dtype=dtype)
if flow_stacked and all_indices_partitioned:
value = array_ops.gather(value, all_indices)
return wrap(value, flow_stacked)
# Values in the TensorArray should be unstacked (since different iterations
# couldn't write to the same location). So whether output is stacked or not
# depends on index_stacked.
if index_stacked:
value = data_flow_ops.tensor_array_gather_v3(
handle, index, flow, dtype=dtype)
else:
value = data_flow_ops.tensor_array_read_v3(
handle, index, flow, dtype=dtype)
return wrap(value, index_stacked)
@RegisterPFor("TensorArrayWriteV3")
def _convert_tensor_array_write_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
index, index_stacked, _ = pfor_input.input(1)
value, value_stacked, _ = pfor_input.input(2)
flow, flow_stacked, _ = pfor_input.input(3)
if value_stacked and pfor_input.pfor.all_indices_partitioned:
# Looks like we are in a control flow in a pfor where not all iterations are
# active now. We don't allow that since that could lead to different indices
# having different shapes which will be hard to merge later.
raise ValueError("Writing non loop invariant values to TensorArray from "
"inside a while_loop/cond not supported.")
if flow_stacked:
flow = _unstack_flow(flow)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
if index_stacked:
raise ValueError("Need indices for %s to be loop invariant" % handle)
if not flow_stacked and not value_stacked:
flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow)
return wrap(flow_out, False)
else:
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
# TODO(agarwal): Note that if flow is unstacked and value is stacked, then
# this may or may not be a safe situation. flow is unstacked both for a
# freshly created TensorArray, as well as after unstacked values are
# written to it. If it is the latter, then we cannot write a stacked value
# now since that may cause runtime errors due to different shapes in the
# array. At the moment we are not able to handle this gracefully and
# distinguish between the two cases. That would require some heuristic
# traversal of the graph to figure out whether all the writes are
# unstacked or not.
flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
else:
if not index_stacked:
raise ValueError("Need indices for %s to be not loop invariant" % handle)
# Note that even when index_stacked is true, actual values in index may
# still not be unique. However that will cause runtime error when executing
# the scatter operation below.
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, index, value, flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
def _transpose_first_two_dims(value):
# TODO(agarwal): optimize if one of the dims == 1.
value_shape = array_ops.shape(value)
v0 = value_shape[0]
v1 = value_shape[1]
value = array_ops.reshape(value, [v0, v1, -1])
value = array_ops.transpose(value, [1, 0, 2])
new_shape = array_ops.concat([[v1, v0], value_shape[2:]], axis=0)
return array_ops.reshape(value, new_shape)
@RegisterPFor("TensorArrayGatherV3")
def _convert_tensor_array_gather_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
indices, indices_stacked, _ = pfor_input.input(1)
indices = array_ops.reshape(indices, [-1])
flow, flow_stacked, _ = pfor_input.input(2)
if flow_stacked:
flow = _unstack_flow(flow)
dtype = pfor_input.get_attr("dtype")
# TODO(agarwal): support element_shape attr?
n = pfor_input.pfor.loop_len_vector
value = data_flow_ops.tensor_array_gather_v3(
handle, indices, flow, dtype=dtype)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
# flow_stacked indicates if values in the TensorArray are stacked or not.
if indices_stacked:
if flow_stacked:
raise ValueError(
"It looks like TensorArrayGatherV3 was called on a TensorArray "
"whose values are not loop-invariant, and the indices were also "
"not loop invariant. This is currently unsupported.")
else:
value = _unflatten_first_dim(value, n)
return wrap(value, True)
else:
if flow_stacked:
# Since elements in this array are stacked and `value` was produced by
# gather, its first two dims are "gathered elements" and "stack
# dimension". Our semantics require these two to be flipped.
value = _transpose_first_two_dims(value)
return wrap(value, flow_stacked)
else:
# Values in the TensorArray should be unstacked (since different iterations
# couldn't write to the same location). So whether output is stacked or not
# depends on indices_stacked.
if indices_stacked:
value = _unflatten_first_dim(value, n)
return wrap(value, indices_stacked)
@RegisterPFor("TensorArrayScatterV3")
def _convert_tensor_array_scatter_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
indices, indices_stacked, _ = pfor_input.input(1)
indices = array_ops.reshape(indices, [-1])
value, value_stacked, _ = pfor_input.input(2)
flow, flow_stacked, _ = pfor_input.input(3)
if flow_stacked:
flow = _unstack_flow(flow)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
if indices_stacked:
raise ValueError("Need indices for %s to be loop invariant" % handle)
# Note that flow_stacked indicates if existing values in the array are
# stacked or not.
if not flow_stacked and not value_stacked:
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return wrap(flow_out, False)
if not value_stacked:
# TODO(agarwal): tile in the second dimension directly instead of
# transposing below.
value = _stack(value, pfor_input.pfor.loop_len_vector).t
value = _transpose_first_two_dims(value)
# TODO(agarwal): Note that if a previous write was unstacked, flow will be
# unstacked, and a stacked value may be written here which may cause
# runtime error due to different elements having different shape. We do
# not try to prevent that.
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
if not indices_stacked:
raise ValueError("Need indices for %s to be not loop invariant" % handle)
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
value = _flatten_first_two_dims(value)
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
@RegisterPFor("TensorArrayGradV3")
def _convert_tensor_array_grad_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
flow, flow_stacked, _ = pfor_input.input(1)
if flow_stacked:
flow = _unstack_flow(flow)
source = pfor_input.get_attr("source")
# TODO(agarwal): For now, we assume that gradients are stacked if the
# TensorArrayGradV3 call is being done inside the pfor. Getting that wrong
# will give runtime error due to incorrect shape being written to the
# accumulator. It is difficult to know in advance if gradients written will be
# stacked or not. Note that flow being stacked is not indicative of the
# gradient being stacked or not. Revisit this later.
shape_to_prepend = pfor_input.pfor.loop_len_vector
grad_handle, flow_out = data_flow_ops.tensor_array_grad_with_shape(
handle=handle,
flow_in=flow,
shape_to_prepend=shape_to_prepend,
source=source)
flow_out = _stack(flow_out, pfor_input.pfor.loop_len_vector).t
return [wrap(grad_handle, False), wrap(flow_out, True)]
# StackV2 conversion is tricky since we don't have arrays of StackV2. So similar
# to TensorArrays, we convert them by changing the dimension of the elements
# inside the stack.
#
# We consider two cases:
#
# 1. StackV2 is constructed and used entirely inside the pfor loop.
# We keep a single Stack and perform the push/pop operations of all the
# iterations in lock-step. We also assume that all the iterations perform these
# operations. In case of dynamic control flow, if only some of the iterations
# try to perform a push/pop, then the conversion may not work correctly and may
# cause undefined behavior.
# TODO(agarwal): test StackV2 with dynamic control flow.
#
# 2. StackV2 is constructed outside the pfor loop.
# Performing stack push/pop in a parallel fashion is ill-defined. However given
# that reading stacks created externally is a common operation when computing
# jacobians, we provide some special semantics here as follows.
# - disallow push operations to the stack
# - pop operations are performed in lock step by all iterations, similar to the
# case when the stack is created inside. A single value is popped during the
# lock-step operation and broadcast to all the iterations. Values in the stack
# are assumed to be loop-invariant.
#
# Some other implementation details:
# We use an ugly logic to find whether values in Stack data structure are
# loop invariant or not. When converting push/pop operations, we keep track of
# whether the last conversion used a stacked value or not (see _stack_cache
# below). As a result if an unstacked value is written first, subsequent stacked
# writes are disallowed when they could have been allowed in theory.
# Map from cache key based on StackV2 handle to a bool indicating whether values
# are stacked or not.
# TODO(agarwal): move _stack_cache inside pfor?
_stack_cache = {}
def _stack_cache_key(pfor_input):
"""Create cache key corresponding to a stack handle."""
op_type = pfor_input.op_type
assert op_type in ["StackPushV2", "StackPopV2"], op_type
orig_handle = pfor_input.op.inputs[0]
while orig_handle.op.type in ["Identity", "Enter"]:
orig_handle = orig_handle.op.inputs[0]
assert orig_handle.op.type == "StackV2", orig_handle.op
return ops.get_default_graph(), pfor_input.pfor, orig_handle
def _stack_handle_inside_pfor(handle, pfor_input):
while handle.op.type in ["Identity", "Enter"]:
handle = handle.op.inputs[0]
assert handle.op.type == "StackV2", (
"Unable to find StackV2 op. Got %s" % handle.op)
return pfor_input.pfor.op_is_inside_loop(handle.op)
@RegisterPFor("StackPushV2")
def _convert_stack_push_v2(pfor_input):
handle = pfor_input.unstacked_input(0)
elem, elem_stacked, _ = pfor_input.input(1)
swap_memory = pfor_input.get_attr("swap_memory")
if not _stack_handle_inside_pfor(pfor_input.op.inputs[0], pfor_input):
raise ValueError("StackPushV2 not allowed on stacks created outside pfor")
stack_cache_key = _stack_cache_key(pfor_input)
stacked = _stack_cache.get(stack_cache_key, None)
if stacked is None:
stacked = elem_stacked
_stack_cache[stack_cache_key] = stacked
else:
# If we previously made it unstacked then we can't revert to being stacked.
if not stacked and elem_stacked:
raise ValueError(
"It looks like the stack was previously determined to be loop"
" invariant, but we are now trying to push a loop dependent value"
" to it. This is currently unsupported.")
if stacked and not elem_stacked:
elem = _stack(elem, pfor_input.pfor.loop_len_vector).t
out = data_flow_ops.stack_push_v2(handle, elem, swap_memory=swap_memory)
return wrap(out, stacked)
# Note that inputs to this convertor will be unstacked. However it should get
# called since it is a stateful op.
@RegisterPFor("StackPopV2")
def _convert_stack_pop_v2(pfor_input):
handle = pfor_input.unstacked_input(0)
stack_cache_key = _stack_cache_key(pfor_input)
stacked = _stack_cache.get(stack_cache_key, None)
# If a StackPushV2 has not been converted yet, we default to unstacked since
# the push could be outside of pfor, or the covertor may not be called if the
# inputs are unconverted.
if stacked is None:
stacked = False
_stack_cache[stack_cache_key] = False
elem_type = pfor_input.get_attr("elem_type")
out = data_flow_ops.stack_pop_v2(handle, elem_type)
return wrap(out, stacked)
# parsing_ops
@RegisterPFor("DecodeCSV")
def _convert_decode_csv(pfor_input):
lines = pfor_input.stacked_input(0)
record_defaults = [
pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs)
]
field_delim = pfor_input.get_attr("field_delim")
use_quote_delim = pfor_input.get_attr("use_quote_delim")
select_cols = pfor_input.get_attr("select_cols")
if not select_cols:
select_cols = None
return [
wrap(t, True) for t in parsing_ops.decode_csv(
lines,
record_defaults,
field_delim=field_delim,
use_quote_delim=use_quote_delim,
select_cols=select_cols)
]
@RegisterPFor("ParseSingleExample")
def _convert_parse_single_example(pfor_input):
serialized = pfor_input.stacked_input(0)
dense_defaults = [
pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs)
]
sparse_keys = pfor_input.get_attr("sparse_keys")
dense_keys = pfor_input.get_attr("dense_keys")
sparse_types = pfor_input.get_attr("sparse_types")
dense_shapes = pfor_input.get_attr("dense_shapes")
output = gen_parsing_ops.parse_example(
serialized=serialized,
names=[],
dense_defaults=dense_defaults,
sparse_keys=sparse_keys,
dense_keys=dense_keys,
sparse_types=sparse_types,
dense_shapes=dense_shapes)
return [wrap(t, True, True) for t in nest.flatten(output)]
# functional_ops
@RegisterPFor("StatefulPartitionedCall")
@RegisterPFor("PartitionedCall")
def _convert_partitioned_call(pfor_input):
func_name = pfor_input.get_attr("f").name
func = pfor_input.op.graph._get_function(compat.as_bytes(func_name))
assert isinstance(func.graph, func_graph.FuncGraph), (
"Could not find FuncGraph object for %s. Got func %s" % (func_name, func))
pfor = pfor_input.pfor
converter = PFor(loop_var=pfor.loop_var,
loop_len=pfor.loop_len_vector[0],
pfor_ops=func.graph.get_operations(),
all_indices=pfor.all_indices,
all_indices_partitioned=pfor.all_indices_partitioned,
pfor_config=pfor.pfor_config)
# TODO(agarwal): consider caching this function definition.
@def_function.function
def f(*args):
assert all(isinstance(arg, WrappedTensor) for arg in args), args
assert len(args) == len(func.graph.inputs), (args, func.graph.inputs)
# Map inputs to function arguments.
for inp, arg in zip(func.graph.inputs, args):
converter._add_conversion(inp, arg)
# Convert output tensors.
return tuple([converter._convert_helper(x).t
for x in func._func_graph_outputs])
call_outputs = f(*pfor_input.inputs)
assert len(call_outputs) == len(func._func_graph_outputs)
outputs = []
for call_output, output_tensor in zip(call_outputs, func._func_graph_outputs):
func_output = converter._convert_helper(output_tensor)
outputs.append(wrap(call_output,
func_output.is_stacked,
func_output.is_sparse_stacked))
return outputs
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/parallel_for/pfor.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for vectorization of array kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compat import compat
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ArrayTest(PForTestCase):
def test_gather(self):
x = random_ops.random_uniform([3, 3, 3])
def loop_fn(i):
outputs = []
x_i = array_ops.gather(x, i)
for y in [x, x_i]:
axes = [0, 2, -1] if y is x else [0]
for axis in axes:
outputs.append(array_ops.gather(y, 2, axis=axis))
outputs.append(array_ops.gather(y, i, axis=axis))
outputs.append(array_ops.gather(y, [i], axis=axis))
outputs.append(array_ops.gather(y, [i, 2], axis=axis))
outputs.append(array_ops.gather(y, [[2, i], [i, 1]], axis=axis))
return outputs
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 20)
def test_shape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.shape(x_i), array_ops.shape(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int64])
def test_size(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.size(x_i), array_ops.size(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int64])
def test_rank(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.rank(x_i)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_shape_n(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return array_ops.shape_n([x_i, x, y, y_i]), array_ops.shape_n(
[x_i, x, y, y_i], out_type=dtypes.int64)
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.int32] * 4 + [dtypes.int64] * 4)
def test_reshape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.reshape(x1, [-1]), array_ops.reshape(x1, [1, 3, 1, -1])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_broadcast_to(self):
x = random_ops.random_uniform([3, 2, 1, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.broadcast_to(x1, [2, 2, 3]),
array_ops.broadcast_to(x1, [1, 2, 1, 3]))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_expand_dims(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.expand_dims(
x1, axis=-1), array_ops.expand_dims(
x1, axis=1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_one_hot(self):
indices = random_ops.random_uniform(
[3, 2, 3], minval=0, maxval=4, dtype=dtypes.int32)
def loop_fn(i):
indices_i = array_ops.gather(indices, i)
return (array_ops.one_hot(indices_i, depth=4, on_value=2., off_value=-2.),
array_ops.one_hot(indices_i, depth=4, axis=1))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_searchsorted(self):
sorted_inputs = math_ops.cumsum(random_ops.random_uniform([3, 2, 4]),
axis=-1)
values = random_ops.random_uniform([2, 3], minval=-1, maxval=4.5)
def loop_fn(i):
inputs_i = array_ops.gather(sorted_inputs, i)
return [array_ops.searchsorted(inputs_i, values, out_type=dtypes.int32,
side="left"), # creates LowerBound op.
array_ops.searchsorted(inputs_i, values, out_type=dtypes.int64,
side="right")] # creates UpperBound op.
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int64])
def test_slice(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.slice(x1, begin=(0, 1), size=(2, 1))
self._test_loop_fn(loop_fn, 3)
def test_tile(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [2, 1])
self._test_loop_fn(loop_fn, 3)
def test_tile_loop_dependent(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [i, 1])
with self.assertRaisesRegexp(ValueError, "expected to be loop invariant"):
pfor_control_flow_ops.pfor(loop_fn, 2)
def test_pack(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.stack([x1, y], axis=-1)
self._test_loop_fn(loop_fn, 1)
def test_unpack(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.unstack(
x_i, 4, axis=-1), array_ops.unstack(
x_i, 3, axis=1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 7)
def test_pad(self):
x = random_ops.random_uniform([3, 2, 3])
padding = constant_op.constant([[1, 2], [3, 4]])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.pad(x1, padding, mode="CONSTANT")
self._test_loop_fn(loop_fn, 3)
def test_split(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.split(x1, 2, axis=0), array_ops.split(x1, 3, axis=-1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 5)
def test_split_v(self):
x = random_ops.random_uniform([3, 6, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.split(x1, [2, 1, 3], axis=0),
array_ops.split(x1, [3], axis=-1))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 4)
def test_squeeze(self):
x = random_ops.random_uniform([5, 1, 2, 1])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.squeeze(x1, axis=0),
array_ops.squeeze(x1, axis=-1),
array_ops.squeeze(x1))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_transpose(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.transpose(x1, [2, 1, 0])
self._test_loop_fn(loop_fn, 3)
def test_zeros_like(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
z = array_ops.zeros_like(x1),
return z, z + x1
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_concat_v2(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.concat(
[x1, x1, y], axis=0), array_ops.concat(
[x1, x1, y], axis=-1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_unary_cwise_ops(self):
for op in [array_ops.identity, array_ops.stop_gradient]:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
y = op(x1) + x1
loss = nn.l2_loss(y)
return op(x), y, g.gradient(loss, x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_identity_n(self):
x = random_ops.random_uniform([3, 4])
def loop_fn(i):
return array_ops.identity_n([x, array_ops.gather(x, i)])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_matrix_band_part(self):
x = random_ops.random_uniform([3, 4, 2, 2])
for num_lower, num_upper in ((0, -1), (-1, 0), (1, 1)):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return array_ops.matrix_band_part(
array_ops.gather(x, i),
num_lower=num_lower,
num_upper=num_upper)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_matrix_diag(self):
x = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
diagonal = array_ops.gather(x, i)
if compat.forward_compatible(2019, 8, 31):
return array_ops.matrix_diag(diagonal, k=(0, 1), num_rows=4, num_cols=5)
return array_ops.matrix_diag(diagonal)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32])
def test_matrix_diag_part(self):
x = random_ops.random_uniform([3, 4, 6])
def loop_fn(i):
input = array_ops.gather(x, i) # pylint: disable=redefined-builtin
if compat.forward_compatible(2019, 8, 31):
return array_ops.matrix_diag_part(input, k=(-2, 0), padding_value=3)
return array_ops.matrix_diag_part(input)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32])
def test_matrix_set_diag(self):
matrices = random_ops.random_uniform([3, 4, 4])
diags = random_ops.random_uniform([3, 4])
num_outputs = 3
if compat.forward_compatible(2019, 8, 31):
bands = random_ops.random_uniform([3, 3, 4])
num_outputs = 6
def loop_fn(i):
matrix_i = array_ops.gather(matrices, i)
diag_i = array_ops.gather(diags, i)
results = [
array_ops.matrix_set_diag(matrix_i, diag_i),
array_ops.matrix_set_diag(matrices[0, ...], diag_i),
array_ops.matrix_set_diag(matrix_i, diags[0, ...])
]
if compat.forward_compatible(2019, 8, 31):
k = (-1, 1)
band_i = array_ops.gather(bands, i)
results.extend([
array_ops.matrix_set_diag(matrix_i, band_i, k=k),
array_ops.matrix_set_diag(matrices[0, ...], band_i, k=k),
array_ops.matrix_set_diag(matrix_i, bands[0, ...], k=k)
])
return results
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * num_outputs)
def test_strided_slice(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 4, 4, 2, 2, 2])
g.watch(x)
def loop_fn(i):
with g:
x_i = array_ops.gather(x, i)
y = x_i[:2, ::2, 1::3, ..., array_ops.newaxis, 1]
loss = nn.l2_loss(y)
return y, g.gradient(loss, x_i)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/parallel_for/array_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for vectorization of math kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compat import compat
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MathTest(PForTestCase):
def _test_unary_cwise_ops(self, ops, is_complex):
for op in ops:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
if is_complex:
y = random_ops.random_uniform([3, 5])
g.watch(y)
x = math_ops.complex(x, y)
# pylint: disable=cell-var-from-loop
output_dtypes = []
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
y1 = op(x1)
outputs = [op(x), y1]
if y1.dtype == dtypes.float32:
loss = math_ops.reduce_sum(y1 * y1)
else:
loss = None
if loss is not None:
grad = g.gradient(loss, x1)
if grad is not None:
outputs.append(grad)
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
def test_unary_cwise_complex_ops(self):
complex_ops = [
math_ops.angle,
math_ops.imag,
math_ops.complex_abs,
math_ops.real,
math_ops.conj,
]
self._test_unary_cwise_ops(complex_ops, True)
def test_unary_cwise_real_ops_1(self):
real_ops = [
lambda x: math_ops.acosh(1 + math_ops.square(x)),
math_ops.abs,
math_ops.acos,
math_ops.asin,
math_ops.asinh,
math_ops.atan,
math_ops.atanh,
math_ops.bessel_i0e,
math_ops.bessel_i1e,
math_ops.cos,
math_ops.cosh,
math_ops.digamma,
math_ops.erf,
math_ops.erfc,
math_ops.exp,
math_ops.expm1,
math_ops.inv,
math_ops.is_finite,
math_ops.is_inf,
math_ops.lgamma,
math_ops.log,
math_ops.log1p,
]
self._test_unary_cwise_ops(real_ops, False)
def test_unary_cwise_real_ops_2(self):
real_ops = [
math_ops.neg,
math_ops.negative,
math_ops.reciprocal,
math_ops.rint,
math_ops.round,
math_ops.rsqrt,
math_ops.sigmoid,
math_ops.sign,
math_ops.sin,
math_ops.sinh,
math_ops.sqrt,
math_ops.square,
math_ops.tan,
math_ops.tanh,
nn.elu,
nn.relu,
nn.relu6,
nn.selu,
nn.softplus,
nn.softsign,
]
self._test_unary_cwise_ops(real_ops, False)
def test_unary_cwise_no_grad(self):
for op in [math_ops.ceil,
math_ops.floor,
math_ops.logical_not]:
x = random_ops.random_uniform([3, 5])
if op == math_ops.logical_not:
x = x > 0
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return op(array_ops.gather(x, i))
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=x.dtype)
def test_binary_cwise_ops(self):
logical_ops = [
math_ops.logical_and,
math_ops.logical_or,
math_ops.logical_xor
]
# Wrapper functions restricting the range of inputs of zeta and polygamma.
def safe_polygamma(x, y):
return math_ops.polygamma(
math_ops.round(clip_ops.clip_by_value(y, 1, 10)),
x * x + 1)
def safe_zeta(x, y):
return math_ops.zeta(x * x + 1, y * y)
float_ops = [
math_ops.add,
math_ops.add_v2,
math_ops.atan2,
math_ops.complex,
math_ops.div,
math_ops.divide,
math_ops.div_no_nan,
math_ops.equal,
math_ops.floor_mod,
math_ops.greater,
math_ops.greater_equal,
math_ops.igamma,
math_ops.igammac,
math_ops.igamma_grad_a,
math_ops.less,
math_ops.less_equal,
math_ops.maximum,
math_ops.minimum,
math_ops.mod,
math_ops.multiply,
math_ops.not_equal,
math_ops.pow,
math_ops.squared_difference,
math_ops.subtract,
math_ops.truncate_mod,
safe_polygamma,
safe_zeta,
]
# FloorDiv fails on XLA due floor's discontinuities exacerbating small
# division differences.
if not test_util.is_xla_enabled():
float_ops += [math_ops.floor_div]
for op in logical_ops + float_ops:
x = random_ops.random_uniform([7, 3, 5])
y = random_ops.random_uniform([3, 5])
if op in logical_ops:
x = x > 0
y = y > 0
output_dtypes = []
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
def test_approximate_equal(self):
x = random_ops.random_uniform([3, 5])
y = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
return math_ops.approximate_equal(x1, y1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.bool])
def test_addn(self):
x = random_ops.random_uniform([2, 3, 5])
y = random_ops.random_uniform([3, 5])
z = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return math_ops.add_n([x1, y, z])
self._test_loop_fn(loop_fn, 2)
def test_cross(self):
x = random_ops.random_uniform([4, 2, 3])
y = random_ops.random_uniform([4, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
x_0 = array_ops.gather(x, 0)
return math_ops.cross(x_i, y_i), math_ops.cross(x_0, y_i)
self._test_loop_fn(loop_fn, 4, loop_fn_dtypes=[dtypes.float32] * 2)
def test_matmul(self):
for tr_a in (True, False):
for tr_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (5, 3) if tr_a else (3, 5)
if stack_a:
shape_a = (2,) + shape_a
shape_b = (7, 5) if tr_b else (5, 7)
if stack_b:
shape_b = (2,) + shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_batch_matmul(self):
for tr_a in (True, False):
for tr_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (4, 5, 3) if tr_a else (4, 3, 5)
if stack_a:
shape_a = (2,) + shape_a
shape_b = (4, 7, 5) if tr_b else (4, 5, 7)
if stack_b:
shape_b = (2,) + shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_batch_matmul_broadcast(self):
if not compat.forward_compatible(2019, 4, 25):
self.skipTest("Skipping test for future functionality.")
for broadcast_a in (True, False):
for broadcast_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (2, 3, 5) if broadcast_a else (4, 2, 3, 5)
shape_b = (2, 5, 7) if broadcast_b else (4, 2, 5, 7)
shape_a = (2,) + shape_a if stack_a else shape_a
shape_b = (2,) + shape_b if stack_b else shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_reduction(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for op in [
math_ops.reduce_sum, math_ops.reduce_prod, math_ops.reduce_max,
math_ops.reduce_min, math_ops.reduce_mean,
]:
for axis in ([1], None, [0, 2]):
for keepdims in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return op(a, axis=axis, keepdims=keepdims)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_boolean_reduction(self):
x = random_ops.random_uniform([2, 3, 4, 5]) > 0.5
for op in [math_ops.reduce_any, math_ops.reduce_all]:
for axis in ([1], None, [0, 2]):
for keepdims in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return op(a, axis=axis, keepdims=keepdims)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2, loop_fn_dtypes=[dtypes.bool])
def test_cum_sum(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for axis in (1, -2):
for exclusive in (True, False):
for reverse in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.cumsum(
a, axis=axis, exclusive=exclusive, reverse=reverse)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_cum_prod(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for axis in (1, -2):
for exclusive in (True, False):
for reverse in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.cumprod(
a, axis=axis, exclusive=exclusive, reverse=reverse)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_bias_add(self):
for data_format in ("NCHW", "NHWC"):
for stacked_value in (True, False):
x_shape = [3, 4, 5, 6]
if stacked_value:
x_shape = [2] + x_shape
x = random_ops.random_uniform(x_shape)
for stacked_bias in (True, False):
if not (stacked_value or stacked_bias):
continue
with backprop.GradientTape(persistent=True) as g:
bias_dim = -1
if data_format == "NCHW":
bias_dim = 2 if stacked_value else 1
bias_shape = [x_shape[bias_dim]]
if stacked_bias:
bias_shape = [2] + bias_shape
bias = random_ops.random_uniform(bias_shape)
g.watch(bias)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
a = array_ops.gather(x, i) if stacked_value else x
b = array_ops.gather(bias, i) if stacked_bias else bias
y = nn.bias_add(a, b, data_format=data_format)
loss = math_ops.reduce_sum(y * y)
grad = g.gradient(loss, bias)
if stacked_bias:
# If we gather over bias in loop_fn, the gradient will be an
# instance of `IndexedSlices` with attrs `values` and `indices`.
return y, grad.values, grad.indices
else:
return y, grad
# pylint: enable=cell-var-from-loop
out_dtypes = [dtypes.float32, dtypes.float32]
if stacked_bias:
out_dtypes = out_dtypes + [dtypes.int32]
self._test_loop_fn(
loop_fn, 2, loop_fn_dtypes=out_dtypes)
def test_unsorted_segment_sum(self):
t = random_ops.random_uniform([3, 3, 2])
for segment_ids_dtype in (dtypes.int32, dtypes.int64):
for num_segments_dtype in (dtypes.int32, dtypes.int64):
segment_ids = constant_op.constant([[0, 0, 2], [0, 1, 2], [2, 2, 2]],
dtype=segment_ids_dtype)
num_segments = constant_op.constant(3, dtype=num_segments_dtype)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
data = array_ops.gather(t, i)
data_0 = array_ops.gather(t, 0)
seg_ids = array_ops.gather(segment_ids, i)
seg_ids_0 = array_ops.gather(segment_ids, 0)
return (math_ops.unsorted_segment_sum(data, seg_ids, num_segments),
math_ops.unsorted_segment_sum(data_0, seg_ids, num_segments),
math_ops.unsorted_segment_sum(data, seg_ids_0, num_segments))
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, [dtypes.float32] * 3)
def test_cast(self):
x = constant_op.constant([[1], [2]])
y = constant_op.constant([[1.0], [2.0]])
def loop_fn(i):
return (math_ops.cast(array_ops.gather(x, i), dtypes.float32),
math_ops.cast(array_ops.gather(y, i), dtypes.int32))
self._test_loop_fn(
loop_fn, 2, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
def test_tanh_axpy(self):
a = constant_op.constant(3.)
x = random_ops.random_uniform([4, 5])
y = random_ops.random_uniform([6, 5])
n = x.shape[0]
def loop_fn(i):
return math_ops.tanh(a * array_ops.gather(x, i) + array_ops.gather(y, i))
self._test_loop_fn(loop_fn, n)
def test_select(self):
a = random_ops.random_uniform([2, 3, 5])
b = random_ops.random_uniform([2, 3, 5])
for cond_shape in [2], [2, 3], [2, 3, 5]:
cond = random_ops.random_uniform(cond_shape) > 0.5
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
cond_i = array_ops.gather(cond, i)
return array_ops.where(cond_i, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_selectv2_cond_needs_broadcast(self):
a = random_ops.random_uniform([2, 3, 5])
b = random_ops.random_uniform([2, 3, 5])
# wherev2 assumes all shapes are broadcastable with each other.
# This means that we can only specify conditions that are
# broadcastable with [3, 5].
for cond_shape in [2], [2, 1], [2, 5], [2, 3, 1], [2, 3, 5]:
cond = random_ops.random_uniform(cond_shape) > 0.5
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
cond_i = array_ops.gather(cond, i)
return array_ops.where_v2(cond_i, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_selectv2_args_need_broadcast(self):
a = random_ops.random_uniform([2, 5])
b = random_ops.random_uniform([2, 3, 5])
# wherev2 assumes all shapes are broadcastable with each other.
# This means that we can only specify conditions that are
# broadcastable with [3, 5].
for cond_shape in [2], [2, 1], [2, 5], [2, 3, 1], [2, 3, 5]:
cond = random_ops.random_uniform(cond_shape) > 0.5
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
cond_i = array_ops.gather(cond, i)
return array_ops.where_v2(cond_i, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_selectv2_cond_fixed(self):
cond = random_ops.random_uniform([3, 5]) > 0.5
b = random_ops.random_uniform([2, 3, 5])
# wherev2 assumes all shapes are broadcastable with each other.
# This means that we can only specify conditions that are
# broadcastable with [3, 5].
for a_shape in [2], [2, 1], [2, 5], [2, 3, 1], [2, 3, 5]:
a = random_ops.random_uniform(a_shape)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
return array_ops.where_v2(cond, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
@test_util.run_all_in_graph_and_eager_modes
class LinalgTest(PForTestCase):
def test_cholesky(self):
z = random_ops.random_normal([2, 3, 3])
x = (math_ops.matmul(z, array_ops.matrix_transpose(z)) # Ensure pos. def.
+ linalg_ops.eye(3)) # Ensure well-conditioned.
def loop_fn(i):
return linalg_ops.cholesky(array_ops.gather(x, i))
self._test_loop_fn(loop_fn, 2)
def test_log_matrix_determinant(self):
x = random_ops.random_normal([3, 4, 2, 2])
def loop_fn(i):
return linalg_ops.log_matrix_determinant(array_ops.gather(x, i))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_matrix_triangular_solve(self):
for lower in (True, False):
for adjoint in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (2, 4, 3, 3) if stack_a else (4, 3, 3)
shape_b = (2, 4, 3, 5) if stack_b else (4, 3, 5)
x = array_ops.matrix_band_part(
random_ops.random_uniform(shape_a)
+ linalg_ops.eye(3), # Ensure well-conditioned.
*((-1, 0) if lower else (0, -1))) # Ensure triangular.
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return linalg_ops.matrix_triangular_solve(a, b,
lower=lower,
adjoint=adjoint)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/parallel_for/math_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Signal reconstruction via overlapped addition of frames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("signal.overlap_and_add")
def overlap_and_add(signal, frame_step, name=None):
"""Reconstructs a signal from a framed representation.
Adds potentially overlapping frames of a signal with shape
`[..., frames, frame_length]`, offsetting subsequent frames by `frame_step`.
The resulting tensor has shape `[..., output_size]` where
output_size = (frames - 1) * frame_step + frame_length
Args:
signal: A [..., frames, frame_length] `Tensor`. All dimensions may be
unknown, and rank must be at least 2.
frame_step: An integer or scalar `Tensor` denoting overlap offsets. Must be
less than or equal to `frame_length`.
name: An optional name for the operation.
Returns:
A `Tensor` with shape `[..., output_size]` containing the overlap-added
frames of `signal`'s inner-most two dimensions.
Raises:
ValueError: If `signal`'s rank is less than 2, or `frame_step` is not a
scalar integer.
"""
with ops.name_scope(name, "overlap_and_add", [signal, frame_step]):
signal = ops.convert_to_tensor(signal, name="signal")
signal.shape.with_rank_at_least(2)
frame_step = ops.convert_to_tensor(frame_step, name="frame_step")
frame_step.shape.assert_has_rank(0)
if not frame_step.dtype.is_integer:
raise ValueError("frame_step must be an integer. Got %s" %
frame_step.dtype)
signal_shape = array_ops.shape(signal)
# All dimensions that are not part of the overlap-and-add. Can be empty for
# rank 2 inputs.
outer_dimensions = signal_shape[:-2]
outer_rank = array_ops.size(outer_dimensions)
def full_shape(inner_shape):
return array_ops.concat([outer_dimensions, inner_shape], 0)
frame_length = signal_shape[-1]
frames = signal_shape[-2]
# Compute output length.
output_length = frame_length + frame_step * (frames - 1)
# If frame_length is equal to frame_step, there's no overlap so just
# reshape the tensor.
frame_step_static = tensor_util.constant_value(frame_step)
if (frame_step_static is not None and signal.shape.dims is not None and
frame_step_static == signal.shape.dims[-1].value):
output_shape = full_shape([output_length])
return array_ops.reshape(signal, output_shape, name="fast_path")
# The following code is documented using this example:
#
# frame_step = 2
# signal.shape = (3, 5)
# a b c d e
# f g h i j
# k l m n o
# Compute the number of segments, per frame.
segments = -(-frame_length // frame_step) # Divide and round up.
# Pad the frame_length dimension to a multiple of the frame step.
# Pad the frames dimension by `segments` so that signal.shape = (6, 6)
# a b c d e 0
# f g h i j 0
# k l m n o 0
# 0 0 0 0 0 0
# 0 0 0 0 0 0
# 0 0 0 0 0 0
paddings = [[0, segments], [0, segments * frame_step - frame_length]]
outer_paddings = array_ops.zeros([outer_rank, 2], dtypes.int32)
paddings = array_ops.concat([outer_paddings, paddings], 0)
signal = array_ops.pad(signal, paddings)
# Reshape so that signal.shape = (3, 6, 2)
# ab cd e0
# fg hi j0
# kl mn o0
# 00 00 00
# 00 00 00
# 00 00 00
shape = full_shape([frames + segments, segments, frame_step])
signal = array_ops.reshape(signal, shape)
# Transpose dimensions so that signal.shape = (3, 6, 2)
# ab fg kl 00 00 00
# cd hi mn 00 00 00
# e0 j0 o0 00 00 00
perm = array_ops.concat(
[math_ops.range(outer_rank), outer_rank + [1, 0, 2]], 0)
signal = array_ops.transpose(signal, perm)
# Reshape so that signal.shape = (18, 2)
# ab fg kl 00 00 00 cd hi mn 00 00 00 e0 j0 o0 00 00 00
shape = full_shape([(frames + segments) * segments, frame_step])
signal = array_ops.reshape(signal, shape)
# Truncate so that signal.shape = (15, 2)
# ab fg kl 00 00 00 cd hi mn 00 00 00 e0 j0 o0
signal = signal[..., :(frames + segments - 1) * segments, :]
# Reshape so that signal.shape = (3, 5, 2)
# ab fg kl 00 00
# 00 cd hi mn 00
# 00 00 e0 j0 o0
shape = full_shape([segments, (frames + segments - 1), frame_step])
signal = array_ops.reshape(signal, shape)
# Now, reduce over the columns, to achieve the desired sum.
signal = math_ops.reduce_sum(signal, -3)
# Flatten the array.
shape = full_shape([(frames + segments - 1) * frame_step])
signal = array_ops.reshape(signal, shape)
# Truncate to final length.
signal = signal[..., :output_length]
return signal
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/signal/reconstruction_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Signal processing operations.
See the [tf.signal](https://tensorflow.org/api_guides/python/contrib.signal)
guide.
@@frame
@@hamming_window
@@hann_window
@@inverse_stft
@@inverse_stft_window_fn
@@mfccs_from_log_mel_spectrograms
@@linear_to_mel_weight_matrix
@@overlap_and_add
@@stft
[hamming]: https://en.wikipedia.org/wiki/Window_function#Hamming_window
[hann]: https://en.wikipedia.org/wiki/Window_function#Hann_window
[mel]: https://en.wikipedia.org/wiki/Mel_scale
[mfcc]: https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops.signal.dct_ops import dct
from tensorflow.python.ops.signal.fft_ops import fft
from tensorflow.python.ops.signal.fft_ops import fft2d
from tensorflow.python.ops.signal.fft_ops import fft3d
from tensorflow.python.ops.signal.fft_ops import fftshift
from tensorflow.python.ops.signal.fft_ops import rfft
from tensorflow.python.ops.signal.fft_ops import rfft2d
from tensorflow.python.ops.signal.fft_ops import rfft3d
from tensorflow.python.ops.signal.dct_ops import idct
from tensorflow.python.ops.signal.fft_ops import ifft
from tensorflow.python.ops.signal.fft_ops import ifft2d
from tensorflow.python.ops.signal.fft_ops import ifft3d
from tensorflow.python.ops.signal.fft_ops import ifftshift
from tensorflow.python.ops.signal.fft_ops import irfft
from tensorflow.python.ops.signal.fft_ops import irfft2d
from tensorflow.python.ops.signal.fft_ops import irfft3d
from tensorflow.python.ops.signal.mel_ops import linear_to_mel_weight_matrix
from tensorflow.python.ops.signal.mfcc_ops import mfccs_from_log_mel_spectrograms
from tensorflow.python.ops.signal.reconstruction_ops import overlap_and_add
from tensorflow.python.ops.signal.shape_ops import frame
from tensorflow.python.ops.signal.spectral_ops import inverse_stft
from tensorflow.python.ops.signal.spectral_ops import inverse_stft_window_fn
from tensorflow.python.ops.signal.spectral_ops import stft
from tensorflow.python.ops.signal.window_ops import hamming_window
from tensorflow.python.ops.signal.window_ops import hann_window
# pylint: enable=unused-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/signal/signal.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Spectral operations (e.g. Short-time Fourier Transform)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.ops.signal import reconstruction_ops
from tensorflow.python.ops.signal import shape_ops
from tensorflow.python.ops.signal import window_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('signal.stft')
def stft(signals, frame_length, frame_step, fft_length=None,
window_fn=window_ops.hann_window,
pad_end=False, name=None):
"""Computes the [Short-time Fourier Transform][stft] of `signals`.
Implemented with GPU-compatible ops and supports gradients.
Args:
signals: A `[..., samples]` `float32` `Tensor` of real-valued signals.
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT to apply.
If not provided, uses the smallest power of 2 enclosing `frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
pad_end: Whether to pad the end of `signals` with zeros when the provided
frame length and step produces a frame that lies partially past its end.
name: An optional name for the operation.
Returns:
A `[..., frames, fft_unique_bins]` `Tensor` of `complex64` STFT values where
`fft_unique_bins` is `fft_length // 2 + 1` (the unique components of the
FFT).
Raises:
ValueError: If `signals` is not at least rank 1, `frame_length` is
not scalar, or `frame_step` is not scalar.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'stft', [signals, frame_length,
frame_step]):
signals = ops.convert_to_tensor(signals, name='signals')
signals.shape.with_rank_at_least(1)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
framed_signals = shape_ops.frame(
signals, frame_length, frame_step, pad_end=pad_end)
# Optionally window the framed signals.
if window_fn is not None:
window = window_fn(frame_length, dtype=framed_signals.dtype)
framed_signals *= window
# fft_ops.rfft produces the (fft_length/2 + 1) unique components of the
# FFT of the real windowed signals in framed_signals.
return fft_ops.rfft(framed_signals, [fft_length])
@tf_export('signal.inverse_stft_window_fn')
def inverse_stft_window_fn(frame_step,
forward_window_fn=window_ops.hann_window,
name=None):
"""Generates a window function that can be used in `inverse_stft`.
Constructs a window that is equal to the forward window with a further
pointwise amplitude correction. `inverse_stft_window_fn` is equivalent to
`forward_window_fn` in the case where it would produce an exact inverse.
See examples in `inverse_stft` documentation for usage.
Args:
frame_step: An integer scalar `Tensor`. The number of samples to step.
forward_window_fn: window_fn used in the forward transform, `stft`.
name: An optional name for the operation.
Returns:
A callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype.
The returned window is suitable for reconstructing original waveform in
inverse_stft.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
def inverse_stft_window_fn_inner(frame_length, dtype):
"""Computes a window that can be used in `inverse_stft`.
Args:
frame_length: An integer scalar `Tensor`. The window length in samples.
dtype: Data type of waveform passed to `stft`.
Returns:
A window suitable for reconstructing original waveform in `inverse_stft`.
Raises:
ValueError: If `frame_length` is not scalar, `forward_window_fn` is not a
callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype
`frame_step` is not scalar, or `frame_step` is not scalar.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
# Use equation 7 from Griffin + Lim.
forward_window = forward_window_fn(frame_length, dtype=dtype)
denom = math_ops.square(forward_window)
overlaps = -(-frame_length // frame_step) # Ceiling division.
denom = array_ops.pad(denom, [(0, overlaps * frame_step - frame_length)])
denom = array_ops.reshape(denom, [overlaps, frame_step])
denom = math_ops.reduce_sum(denom, 0, keepdims=True)
denom = array_ops.tile(denom, [overlaps, 1])
denom = array_ops.reshape(denom, [overlaps * frame_step])
return forward_window / denom[:frame_length]
return inverse_stft_window_fn_inner
@tf_export('signal.inverse_stft')
def inverse_stft(stfts,
frame_length,
frame_step,
fft_length=None,
window_fn=window_ops.hann_window,
name=None):
"""Computes the inverse [Short-time Fourier Transform][stft] of `stfts`.
To reconstruct an original waveform, a complimentary window function should
be used in inverse_stft. Such a window function can be constructed with
tf.signal.inverse_stft_window_fn.
Example:
```python
frame_length = 400
frame_step = 160
waveform = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1000])
stft = tf.signal.stft(waveform, frame_length, frame_step)
inverse_stft = tf.signal.inverse_stft(
stft, frame_length, frame_step,
window_fn=tf.signal.inverse_stft_window_fn(frame_step))
```
if a custom window_fn is used in stft, it must be passed to
inverse_stft_window_fn:
```python
frame_length = 400
frame_step = 160
window_fn = functools.partial(window_ops.hamming_window, periodic=True),
waveform = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1000])
stft = tf.signal.stft(
waveform, frame_length, frame_step, window_fn=window_fn)
inverse_stft = tf.signal.inverse_stft(
stft, frame_length, frame_step,
window_fn=tf.signal.inverse_stft_window_fn(
frame_step, forward_window_fn=window_fn))
```
Implemented with GPU-compatible ops and supports gradients.
Args:
stfts: A `complex64` `[..., frames, fft_unique_bins]` `Tensor` of STFT bins
representing a batch of `fft_length`-point STFTs where `fft_unique_bins`
is `fft_length // 2 + 1`
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT that produced
`stfts`. If not provided, uses the smallest power of 2 enclosing
`frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
name: An optional name for the operation.
Returns:
A `[..., samples]` `Tensor` of `float32` signals representing the inverse
STFT for each input STFT in `stfts`.
Raises:
ValueError: If `stfts` is not at least rank 2, `frame_length` is not scalar,
`frame_step` is not scalar, or `fft_length` is not scalar.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'inverse_stft', [stfts]):
stfts = ops.convert_to_tensor(stfts, name='stfts')
stfts.shape.with_rank_at_least(2)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
fft_length.shape.assert_has_rank(0)
real_frames = fft_ops.irfft(stfts, [fft_length])
# frame_length may be larger or smaller than fft_length, so we pad or
# truncate real_frames to frame_length.
frame_length_static = tensor_util.constant_value(frame_length)
# If we don't know the shape of real_frames's inner dimension, pad and
# truncate to frame_length.
if (frame_length_static is None or real_frames.shape.ndims is None or
real_frames.shape.as_list()[-1] is None):
real_frames = real_frames[..., :frame_length]
real_frames_rank = array_ops.rank(real_frames)
real_frames_shape = array_ops.shape(real_frames)
paddings = array_ops.concat(
[array_ops.zeros([real_frames_rank - 1, 2],
dtype=frame_length.dtype),
[[0, math_ops.maximum(0, frame_length - real_frames_shape[-1])]]], 0)
real_frames = array_ops.pad(real_frames, paddings)
# We know real_frames's last dimension and frame_length statically. If they
# are different, then pad or truncate real_frames to frame_length.
elif real_frames.shape.as_list()[-1] > frame_length_static:
real_frames = real_frames[..., :frame_length_static]
elif real_frames.shape.as_list()[-1] < frame_length_static:
pad_amount = frame_length_static - real_frames.shape.as_list()[-1]
real_frames = array_ops.pad(real_frames,
[[0, 0]] * (real_frames.shape.ndims - 1) +
[[0, pad_amount]])
# The above code pads the inner dimension of real_frames to frame_length,
# but it does so in a way that may not be shape-inference friendly.
# Restore shape information if we are able to.
if frame_length_static is not None and real_frames.shape.ndims is not None:
real_frames.set_shape([None] * (real_frames.shape.ndims - 1) +
[frame_length_static])
# Optionally window and overlap-add the inner 2 dimensions of real_frames
# into a single [samples] dimension.
if window_fn is not None:
window = window_fn(frame_length, dtype=stfts.dtype.real_dtype)
real_frames *= window
return reconstruction_ops.overlap_and_add(real_frames, frame_step)
def _enclosing_power_of_two(value):
"""Return 2**N for integer N such that 2**N >= value."""
value_static = tensor_util.constant_value(value)
if value_static is not None:
return constant_op.constant(
int(2**np.ceil(np.log(value_static) / np.log(2.0))), value.dtype)
return math_ops.cast(
math_ops.pow(
2.0,
math_ops.ceil(
math_ops.log(math_ops.cast(value, dtypes.float32)) /
math_ops.log(2.0))), value.dtype)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/signal/spectral_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mel-Frequency Cepstral Coefficients (MFCCs) ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import dct_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('signal.mfccs_from_log_mel_spectrograms')
def mfccs_from_log_mel_spectrograms(log_mel_spectrograms, name=None):
"""Computes [MFCCs][mfcc] of `log_mel_spectrograms`.
Implemented with GPU-compatible ops and supports gradients.
[Mel-Frequency Cepstral Coefficient (MFCC)][mfcc] calculation consists of
taking the DCT-II of a log-magnitude mel-scale spectrogram. [HTK][htk]'s MFCCs
use a particular scaling of the DCT-II which is almost orthogonal
normalization. We follow this convention.
All `num_mel_bins` MFCCs are returned and it is up to the caller to select
a subset of the MFCCs based on their application. For example, it is typical
to only use the first few for speech recognition, as this results in
an approximately pitch-invariant representation of the signal.
For example:
```python
sample_rate = 16000.0
# A Tensor of [batch_size, num_samples] mono PCM samples in the range [-1, 1].
pcm = tf.compat.v1.placeholder(tf.float32, [None, None])
# A 1024-point STFT with frames of 64 ms and 75% overlap.
stfts = tf.signal.stft(pcm, frame_length=1024, frame_step=256,
fft_length=1024)
spectrograms = tf.abs(stfts)
# Warp the linear scale spectrograms into the mel-scale.
num_spectrogram_bins = stfts.shape[-1].value
lower_edge_hertz, upper_edge_hertz, num_mel_bins = 80.0, 7600.0, 80
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz)
mel_spectrograms = tf.tensordot(
spectrograms, linear_to_mel_weight_matrix, 1)
mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6)
# Compute MFCCs from log_mel_spectrograms and take the first 13.
mfccs = tf.signal.mfccs_from_log_mel_spectrograms(
log_mel_spectrograms)[..., :13]
```
Args:
log_mel_spectrograms: A `[..., num_mel_bins]` `float32` `Tensor` of
log-magnitude mel-scale spectrograms.
name: An optional name for the operation.
Returns:
A `[..., num_mel_bins]` `float32` `Tensor` of the MFCCs of
`log_mel_spectrograms`.
Raises:
ValueError: If `num_mel_bins` is not positive.
[mfcc]: https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
[htk]: https://en.wikipedia.org/wiki/HTK_(software)
"""
with ops.name_scope(name, 'mfccs_from_log_mel_spectrograms',
[log_mel_spectrograms]):
# Compute the DCT-II of the resulting log-magnitude mel-scale spectrogram.
# The DCT used in HTK scales every basis vector by sqrt(2/N), which is the
# scaling required for an "orthogonal" DCT-II *except* in the 0th bin, where
# the true orthogonal DCT (as implemented by scipy) scales by sqrt(1/N). For
# this reason, we don't apply orthogonal normalization and scale the DCT by
# `0.5 * sqrt(2/N)` manually.
log_mel_spectrograms = ops.convert_to_tensor(log_mel_spectrograms,
dtype=dtypes.float32)
if (log_mel_spectrograms.shape.ndims and
log_mel_spectrograms.shape.dims[-1].value is not None):
num_mel_bins = log_mel_spectrograms.shape.dims[-1].value
if num_mel_bins == 0:
raise ValueError('num_mel_bins must be positive. Got: %s' %
log_mel_spectrograms)
else:
num_mel_bins = array_ops.shape(log_mel_spectrograms)[-1]
dct2 = dct_ops.dct(log_mel_spectrograms, type=2)
return dct2 * math_ops.rsqrt(
math_ops.cast(num_mel_bins, dtypes.float32) * 2.0)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/signal/mfcc_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fast-Fourier Transform ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import manip_ops
from tensorflow.python.framework import tensor_util as _tensor_util
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import math_ops as _math_ops
from tensorflow.python.util.tf_export import tf_export
def _infer_fft_length_for_rfft(input_tensor, fft_rank):
"""Infers the `fft_length` argument for a `rank` RFFT from `input_tensor`."""
# A TensorShape for the inner fft_rank dimensions.
fft_shape = input_tensor.get_shape()[-fft_rank:]
# If any dim is unknown, fall back to tensor-based math.
if not fft_shape.is_fully_defined():
return _array_ops.shape(input_tensor)[-fft_rank:]
# Otherwise, return a constant.
return _ops.convert_to_tensor(fft_shape.as_list(), _dtypes.int32)
def _infer_fft_length_for_irfft(input_tensor, fft_rank):
"""Infers the `fft_length` argument for a `rank` IRFFT from `input_tensor`."""
# A TensorShape for the inner fft_rank dimensions.
fft_shape = input_tensor.get_shape()[-fft_rank:]
# If any dim is unknown, fall back to tensor-based math.
if not fft_shape.is_fully_defined():
fft_length = _array_ops.unstack(_array_ops.shape(input_tensor)[-fft_rank:])
fft_length[-1] = _math_ops.maximum(0, 2 * (fft_length[-1] - 1))
return _array_ops.stack(fft_length)
# Otherwise, return a constant.
fft_length = fft_shape.as_list()
if fft_length:
fft_length[-1] = max(0, 2 * (fft_length[-1] - 1))
return _ops.convert_to_tensor(fft_length, _dtypes.int32)
def _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=False):
"""Pads `input_tensor` to `fft_length` on its inner-most `fft_rank` dims."""
fft_shape = _tensor_util.constant_value_as_shape(fft_length)
# Edge case: skip padding empty tensors.
if (input_tensor.shape.ndims is not None and
any(dim.value == 0 for dim in input_tensor.shape.dims)):
return input_tensor
# If we know the shapes ahead of time, we can either skip or pre-compute the
# appropriate paddings. Otherwise, fall back to computing paddings in
# TensorFlow.
if fft_shape.is_fully_defined() and input_tensor.shape.ndims is not None:
# Slice the last FFT-rank dimensions from input_tensor's shape.
input_fft_shape = input_tensor.shape[-fft_shape.ndims:]
if input_fft_shape.is_fully_defined():
# In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
if is_reverse:
fft_shape = fft_shape[:-1].concatenate(
fft_shape.dims[-1].value // 2 + 1)
paddings = [[0, max(fft_dim.value - input_dim.value, 0)]
for fft_dim, input_dim in zip(
fft_shape.dims, input_fft_shape.dims)]
if any(pad > 0 for _, pad in paddings):
outer_paddings = [[0, 0]] * max((input_tensor.shape.ndims -
fft_shape.ndims), 0)
return _array_ops.pad(input_tensor, outer_paddings + paddings)
return input_tensor
# If we can't determine the paddings ahead of time, then we have to pad. If
# the paddings end up as zero, tf.pad has a special-case that does no work.
input_rank = _array_ops.rank(input_tensor)
input_fft_shape = _array_ops.shape(input_tensor)[-fft_rank:]
outer_dims = _math_ops.maximum(0, input_rank - fft_rank)
outer_paddings = _array_ops.zeros([outer_dims], fft_length.dtype)
# In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
if is_reverse:
fft_length = _array_ops.concat([fft_length[:-1],
fft_length[-1:] // 2 + 1], 0)
fft_paddings = _math_ops.maximum(0, fft_length - input_fft_shape)
paddings = _array_ops.concat([outer_paddings, fft_paddings], 0)
paddings = _array_ops.stack([_array_ops.zeros_like(paddings), paddings],
axis=1)
return _array_ops.pad(input_tensor, paddings)
def _rfft_wrapper(fft_fn, fft_rank, default_name):
"""Wrapper around gen_spectral_ops.rfft* that infers fft_length argument."""
def _rfft(input_tensor, fft_length=None, name=None):
"""Wrapper around gen_spectral_ops.rfft* that infers fft_length argument."""
with _ops.name_scope(name, default_name,
[input_tensor, fft_length]) as name:
input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.float32)
input_tensor.shape.with_rank_at_least(fft_rank)
if fft_length is None:
fft_length = _infer_fft_length_for_rfft(input_tensor, fft_rank)
else:
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length)
return fft_fn(input_tensor, fft_length, name)
_rfft.__doc__ = fft_fn.__doc__
return _rfft
def _irfft_wrapper(ifft_fn, fft_rank, default_name):
"""Wrapper around gen_spectral_ops.irfft* that infers fft_length argument."""
def _irfft(input_tensor, fft_length=None, name=None):
"""Wrapper irfft* that infers fft_length argument."""
with _ops.name_scope(name, default_name,
[input_tensor, fft_length]) as name:
input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.complex64)
input_tensor.shape.with_rank_at_least(fft_rank)
if fft_length is None:
fft_length = _infer_fft_length_for_irfft(input_tensor, fft_rank)
else:
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length,
is_reverse=True)
return ifft_fn(input_tensor, fft_length, name)
_irfft.__doc__ = ifft_fn.__doc__
return _irfft
# FFT/IFFT 1/2/3D are exported via
# third_party/tensorflow/core/api_def/python_api/
fft = gen_spectral_ops.fft
ifft = gen_spectral_ops.ifft
fft2d = gen_spectral_ops.fft2d
ifft2d = gen_spectral_ops.ifft2d
fft3d = gen_spectral_ops.fft3d
ifft3d = gen_spectral_ops.ifft3d
rfft = _rfft_wrapper(gen_spectral_ops.rfft, 1, "rfft")
tf_export("signal.rfft", v1=["signal.rfft", "spectral.rfft"])(rfft)
irfft = _irfft_wrapper(gen_spectral_ops.irfft, 1, "irfft")
tf_export("signal.irfft", v1=["signal.irfft", "spectral.irfft"])(irfft)
rfft2d = _rfft_wrapper(gen_spectral_ops.rfft2d, 2, "rfft2d")
tf_export("signal.rfft2d", v1=["signal.rfft2d", "spectral.rfft2d"])(rfft2d)
irfft2d = _irfft_wrapper(gen_spectral_ops.irfft2d, 2, "irfft2d")
tf_export("signal.irfft2d", v1=["signal.irfft2d", "spectral.irfft2d"])(irfft2d)
rfft3d = _rfft_wrapper(gen_spectral_ops.rfft3d, 3, "rfft3d")
tf_export("signal.rfft3d", v1=["signal.rfft3d", "spectral.rfft3d"])(rfft3d)
irfft3d = _irfft_wrapper(gen_spectral_ops.irfft3d, 3, "irfft3d")
tf_export("signal.irfft3d", v1=["signal.irfft3d", "spectral.irfft3d"])(irfft3d)
def _fft_size_for_grad(grad, rank):
return _math_ops.reduce_prod(_array_ops.shape(grad)[-rank:])
@_ops.RegisterGradient("FFT")
def _fft_grad(_, grad):
size = _math_ops.cast(_fft_size_for_grad(grad, 1), grad.dtype)
return ifft(grad) * size
@_ops.RegisterGradient("IFFT")
def _ifft_grad(_, grad):
rsize = _math_ops.cast(
1. / _math_ops.cast(_fft_size_for_grad(grad, 1), grad.dtype.real_dtype),
grad.dtype)
return fft(grad) * rsize
@_ops.RegisterGradient("FFT2D")
def _fft2d_grad(_, grad):
size = _math_ops.cast(_fft_size_for_grad(grad, 2), grad.dtype)
return ifft2d(grad) * size
@_ops.RegisterGradient("IFFT2D")
def _ifft2d_grad(_, grad):
rsize = _math_ops.cast(
1. / _math_ops.cast(_fft_size_for_grad(grad, 2), grad.dtype.real_dtype),
grad.dtype)
return fft2d(grad) * rsize
@_ops.RegisterGradient("FFT3D")
def _fft3d_grad(_, grad):
size = _math_ops.cast(_fft_size_for_grad(grad, 3), grad.dtype)
return ifft3d(grad) * size
@_ops.RegisterGradient("IFFT3D")
def _ifft3d_grad(_, grad):
rsize = _math_ops.cast(
1. / _math_ops.cast(_fft_size_for_grad(grad, 3), grad.dtype.real_dtype),
grad.dtype)
return fft3d(grad) * rsize
def _rfft_grad_helper(rank, irfft_fn):
"""Returns a gradient function for an RFFT of the provided rank."""
# Can't happen because we don't register a gradient for RFFT3D.
assert rank in (1, 2), "Gradient for RFFT3D is not implemented."
def _grad(op, grad):
"""A gradient function for RFFT with the provided `rank` and `irfft_fn`."""
fft_length = op.inputs[1]
input_shape = _array_ops.shape(op.inputs[0])
is_even = _math_ops.cast(1 - (fft_length[-1] % 2), _dtypes.complex64)
def _tile_for_broadcasting(matrix, t):
expanded = _array_ops.reshape(
matrix,
_array_ops.concat([
_array_ops.ones([_array_ops.rank(t) - 2], _dtypes.int32),
_array_ops.shape(matrix)
], 0))
return _array_ops.tile(
expanded, _array_ops.concat([_array_ops.shape(t)[:-2], [1, 1]], 0))
def _mask_matrix(length):
"""Computes t_n = exp(sqrt(-1) * pi * n^2 / line_len)."""
# TODO(rjryan): Speed up computation of twiddle factors using the
# following recurrence relation and cache them across invocations of RFFT.
#
# t_n = exp(sqrt(-1) * pi * n^2 / line_len)
# for n = 0, 1,..., line_len-1.
# For n > 2, use t_n = t_{n-1}^2 / t_{n-2} * t_1^2
a = _array_ops.tile(
_array_ops.expand_dims(_math_ops.range(length), 0), (length, 1))
b = _array_ops.transpose(a, [1, 0])
return _math_ops.exp(
-2j * np.pi * _math_ops.cast(a * b, _dtypes.complex64) /
_math_ops.cast(length, _dtypes.complex64))
def _ymask(length):
"""A sequence of [1+0j, -1+0j, 1+0j, -1+0j, ...] with length `length`."""
return _math_ops.cast(1 - 2 * (_math_ops.range(length) % 2),
_dtypes.complex64)
y0 = grad[..., 0:1]
if rank == 1:
ym = grad[..., -1:]
extra_terms = y0 + is_even * ym * _ymask(input_shape[-1])
elif rank == 2:
# Create a mask matrix for y0 and ym.
base_mask = _mask_matrix(input_shape[-2])
# Tile base_mask to match y0 in shape so that we can batch-matmul the
# inner 2 dimensions.
tiled_mask = _tile_for_broadcasting(base_mask, y0)
y0_term = _math_ops.matmul(tiled_mask, _math_ops.conj(y0))
extra_terms = y0_term
ym = grad[..., -1:]
ym_term = _math_ops.matmul(tiled_mask, _math_ops.conj(ym))
inner_dim = input_shape[-1]
ym_term = _array_ops.tile(
ym_term,
_array_ops.concat([
_array_ops.ones([_array_ops.rank(grad) - 1], _dtypes.int32),
[inner_dim]
], 0)) * _ymask(inner_dim)
extra_terms += is_even * ym_term
# The gradient of RFFT is the IRFFT of the incoming gradient times a scaling
# factor, plus some additional terms to make up for the components dropped
# due to Hermitian symmetry.
input_size = _math_ops.cast(
_fft_size_for_grad(op.inputs[0], rank), _dtypes.float32)
the_irfft = irfft_fn(grad, fft_length)
return 0.5 * (the_irfft * input_size + _math_ops.real(extra_terms)), None
return _grad
def _irfft_grad_helper(rank, rfft_fn):
"""Returns a gradient function for an IRFFT of the provided rank."""
# Can't happen because we don't register a gradient for IRFFT3D.
assert rank in (1, 2), "Gradient for IRFFT3D is not implemented."
def _grad(op, grad):
"""A gradient function for IRFFT with the provided `rank` and `rfft_fn`."""
# Generate a simple mask like [1.0, 2.0, ..., 2.0, 1.0] for even-length FFTs
# and [1.0, 2.0, ..., 2.0] for odd-length FFTs. To reduce extra ops in the
# graph we special-case the situation where the FFT length and last
# dimension of the input are known at graph construction time.
fft_length = op.inputs[1]
is_odd = _math_ops.mod(fft_length[-1], 2)
input_last_dimension = _array_ops.shape(op.inputs[0])[-1]
mask = _array_ops.concat(
[[1.0], 2.0 * _array_ops.ones([input_last_dimension - 2 + is_odd]),
_array_ops.ones([1 - is_odd])], 0)
rsize = _math_ops.reciprocal(_math_ops.cast(
_fft_size_for_grad(grad, rank), _dtypes.float32))
# The gradient of IRFFT is the RFFT of the incoming gradient times a scaling
# factor and a mask. The mask scales the gradient for the Hermitian
# symmetric components of the RFFT by a factor of two, since these
# components are de-duplicated in the RFFT.
the_rfft = rfft_fn(grad, fft_length)
return the_rfft * _math_ops.cast(rsize * mask, _dtypes.complex64), None
return _grad
@tf_export("signal.fftshift")
def fftshift(x, axes=None, name=None):
"""Shift the zero-frequency component to the center of the spectrum.
This function swaps half-spaces for all axes listed (defaults to all).
Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
@compatibility(numpy)
Equivalent to numpy.fft.fftshift.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.fftshift.html
@end_compatibility
For example:
```python
x = tf.signal.fftshift([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.])
x.numpy() # array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])
```
Args:
x: `Tensor`, input tensor.
axes: `int` or shape `tuple`, optional Axes over which to shift. Default is
None, which shifts all axes.
name: An optional name for the operation.
Returns:
A `Tensor`, The shifted tensor.
"""
with _ops.name_scope(name, "fftshift") as name:
x = _ops.convert_to_tensor(x)
if axes is None:
axes = tuple(range(x.shape.ndims))
shift = [int(dim // 2) for dim in x.shape]
elif isinstance(axes, int):
shift = int(x.shape[axes] // 2)
else:
shift = [int((x.shape[ax]) // 2) for ax in axes]
return manip_ops.roll(x, shift, axes, name)
@tf_export("signal.ifftshift")
def ifftshift(x, axes=None, name=None):
"""The inverse of fftshift.
Although identical for even-length x,
the functions differ by one sample for odd-length x.
@compatibility(numpy)
Equivalent to numpy.fft.ifftshift.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.ifftshift.html
@end_compatibility
For example:
```python
x = tf.signal.ifftshift([[ 0., 1., 2.],[ 3., 4., -4.],[-3., -2., -1.]])
x.numpy() # array([[ 4., -4., 3.],[-2., -1., -3.],[ 1., 2., 0.]])
```
Args:
x: `Tensor`, input tensor.
axes: `int` or shape `tuple` Axes over which to calculate. Defaults to None,
which shifts all axes.
name: An optional name for the operation.
Returns:
A `Tensor`, The shifted tensor.
"""
with _ops.name_scope(name, "ifftshift") as name:
x = _ops.convert_to_tensor(x)
if axes is None:
axes = tuple(range(x.shape.ndims))
shift = [-int(dim // 2) for dim in x.shape]
elif isinstance(axes, int):
shift = -int(x.shape[axes] // 2)
else:
shift = [-int(x.shape[ax] // 2) for ax in axes]
return manip_ops.roll(x, shift, axes, name)
_ops.RegisterGradient("RFFT")(_rfft_grad_helper(1, irfft))
_ops.RegisterGradient("IRFFT")(_irfft_grad_helper(1, rfft))
_ops.RegisterGradient("RFFT2D")(_rfft_grad_helper(2, irfft2d))
_ops.RegisterGradient("IRFFT2D")(_irfft_grad_helper(2, rfft2d))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/signal/fft_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Discrete Cosine Transform ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math as _math
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.ops import math_ops as _math_ops
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.util.tf_export import tf_export
def _validate_dct_arguments(input_tensor, dct_type, n, axis, norm):
"""Checks that DCT/IDCT arguments are compatible and well formed."""
if axis != -1:
raise NotImplementedError("axis must be -1. Got: %s" % axis)
if n is not None and n < 1:
raise ValueError("n should be a positive integer or None")
if dct_type not in (1, 2, 3):
raise ValueError("Only Types I, II and III (I)DCT are supported.")
if dct_type == 1:
if norm == "ortho":
raise ValueError("Normalization is not supported for the Type-I DCT.")
if input_tensor.shape[-1] is not None and input_tensor.shape[-1] < 2:
raise ValueError(
"Type-I DCT requires the dimension to be greater than one.")
if norm not in (None, "ortho"):
raise ValueError(
"Unknown normalization. Expected None or 'ortho', got: %s" % norm)
# TODO(rjryan): Implement `axis` parameter.
@tf_export("signal.dct", v1=["signal.dct", "spectral.dct"])
def dct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin
"""Computes the 1D [Discrete Cosine Transform (DCT)][dct] of `input`.
Currently only Types I, II and III are supported.
Type I is implemented using a length `2N` padded `tf.signal.rfft`.
Type II is implemented using a length `2N` padded `tf.signal.rfft`, as
described here: [Type 2 DCT using 2N FFT padded (Makhoul)](https://dsp.stackexchange.com/a/10606).
Type III is a fairly straightforward inverse of Type II
(i.e. using a length `2N` padded `tf.signal.irfft`).
@compatibility(scipy)
Equivalent to [scipy.fftpack.dct](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html)
for Type-I, Type-II and Type-III DCT.
@end_compatibility
Args:
input: A `[..., samples]` `float32` `Tensor` containing the signals to
take the DCT of.
type: The DCT type to perform. Must be 1, 2 or 3.
n: The length of the transform. If length is less than sequence length,
only the first n elements of the sequence are considered for the DCT.
If n is greater than the sequence length, zeros are padded and then
the DCT is computed as usual.
axis: For future expansion. The axis to compute the DCT along. Must be `-1`.
norm: The normalization to apply. `None` for no normalization or `'ortho'`
for orthonormal normalization.
name: An optional name for the operation.
Returns:
A `[..., samples]` `float32` `Tensor` containing the DCT of `input`.
Raises:
ValueError: If `type` is not `1`, `2` or `3`, `axis` is
not `-1`, `n` is not `None` or greater than 0,
or `norm` is not `None` or `'ortho'`.
ValueError: If `type` is `1` and `norm` is `ortho`.
[dct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform
"""
_validate_dct_arguments(input, type, n, axis, norm)
with _ops.name_scope(name, "dct", [input]):
# We use the RFFT to compute the DCT and TensorFlow only supports float32
# for FFTs at the moment.
input = _ops.convert_to_tensor(input, dtype=_dtypes.float32)
seq_len = (
tensor_shape.dimension_value(input.shape[-1]) or
_array_ops.shape(input)[-1])
if n is not None:
if n <= seq_len:
input = input[..., 0:n]
else:
rank = len(input.shape)
padding = [[0, 0] for i in range(rank)]
padding[rank - 1][1] = n - seq_len
padding = _ops.convert_to_tensor(padding, dtype=_dtypes.int32)
input = _array_ops.pad(input, paddings=padding)
axis_dim = (tensor_shape.dimension_value(input.shape[-1])
or _array_ops.shape(input)[-1])
axis_dim_float = _math_ops.cast(axis_dim, _dtypes.float32)
if type == 1:
dct1_input = _array_ops.concat([input, input[..., -2:0:-1]], axis=-1)
dct1 = _math_ops.real(fft_ops.rfft(dct1_input))
return dct1
if type == 2:
scale = 2.0 * _math_ops.exp(
_math_ops.complex(
0.0, -_math_ops.range(axis_dim_float) * _math.pi * 0.5 /
axis_dim_float))
# TODO(rjryan): Benchmark performance and memory usage of the various
# approaches to computing a DCT via the RFFT.
dct2 = _math_ops.real(
fft_ops.rfft(
input, fft_length=[2 * axis_dim])[..., :axis_dim] * scale)
if norm == "ortho":
n1 = 0.5 * _math_ops.rsqrt(axis_dim_float)
n2 = n1 * _math_ops.sqrt(2.0)
# Use tf.pad to make a vector of [n1, n2, n2, n2, ...].
weights = _array_ops.pad(
_array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]],
constant_values=n2)
dct2 *= weights
return dct2
elif type == 3:
if norm == "ortho":
n1 = _math_ops.sqrt(axis_dim_float)
n2 = n1 * _math_ops.sqrt(0.5)
# Use tf.pad to make a vector of [n1, n2, n2, n2, ...].
weights = _array_ops.pad(
_array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]],
constant_values=n2)
input *= weights
else:
input *= axis_dim_float
scale = 2.0 * _math_ops.exp(
_math_ops.complex(
0.0,
_math_ops.range(axis_dim_float) * _math.pi * 0.5 /
axis_dim_float))
dct3 = _math_ops.real(
fft_ops.irfft(
scale * _math_ops.complex(input, 0.0),
fft_length=[2 * axis_dim]))[..., :axis_dim]
return dct3
# TODO(rjryan): Implement `n` and `axis` parameters.
@tf_export("signal.idct", v1=["signal.idct", "spectral.idct"])
def idct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin
"""Computes the 1D [Inverse Discrete Cosine Transform (DCT)][idct] of `input`.
Currently only Types I, II and III are supported. Type III is the inverse of
Type II, and vice versa.
Note that you must re-normalize by 1/(2n) to obtain an inverse if `norm` is
not `'ortho'`. That is:
`signal == idct(dct(signal)) * 0.5 / signal.shape[-1]`.
When `norm='ortho'`, we have:
`signal == idct(dct(signal, norm='ortho'), norm='ortho')`.
@compatibility(scipy)
Equivalent to [scipy.fftpack.idct](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.idct.html)
for Type-I, Type-II and Type-III DCT.
@end_compatibility
Args:
input: A `[..., samples]` `float32` `Tensor` containing the signals to take
the DCT of.
type: The IDCT type to perform. Must be 1, 2 or 3.
n: For future expansion. The length of the transform. Must be `None`.
axis: For future expansion. The axis to compute the DCT along. Must be `-1`.
norm: The normalization to apply. `None` for no normalization or `'ortho'`
for orthonormal normalization.
name: An optional name for the operation.
Returns:
A `[..., samples]` `float32` `Tensor` containing the IDCT of `input`.
Raises:
ValueError: If `type` is not `1`, `2` or `3`, `n` is not `None, `axis` is
not `-1`, or `norm` is not `None` or `'ortho'`.
[idct]:
https://en.wikipedia.org/wiki/Discrete_cosine_transform#Inverse_transforms
"""
_validate_dct_arguments(input, type, n, axis, norm)
inverse_type = {1: 1, 2: 3, 3: 2}[type]
return dct(input, type=inverse_type, n=n, axis=axis, norm=norm, name=name)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/signal/dct_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility ops shared across tf.contrib.signal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fractions
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
def gcd(a, b, name=None):
"""Returns the greatest common divisor via Euclid's algorithm.
Args:
a: The dividend. A scalar integer `Tensor`.
b: The divisor. A scalar integer `Tensor`.
name: An optional name for the operation.
Returns:
A scalar `Tensor` representing the greatest common divisor between `a` and
`b`.
Raises:
ValueError: If `a` or `b` are not scalar integers.
"""
with ops.name_scope(name, 'gcd', [a, b]):
a = ops.convert_to_tensor(a)
b = ops.convert_to_tensor(b)
a.shape.assert_has_rank(0)
b.shape.assert_has_rank(0)
if not a.dtype.is_integer:
raise ValueError('a must be an integer type. Got: %s' % a.dtype)
if not b.dtype.is_integer:
raise ValueError('b must be an integer type. Got: %s' % b.dtype)
# TPU requires static shape inference. GCD is used for subframe size
# computation, so we should prefer static computation where possible.
const_a = tensor_util.constant_value(a)
const_b = tensor_util.constant_value(b)
if const_a is not None and const_b is not None:
return ops.convert_to_tensor(fractions.gcd(const_a, const_b))
cond = lambda _, b: math_ops.greater(b, array_ops.zeros_like(b))
body = lambda a, b: [b, math_ops.mod(a, b)]
a, b = control_flow_ops.while_loop(cond, body, [a, b], back_prop=False)
return a
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/signal/util_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""mel conversion ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import shape_ops
from tensorflow.python.util.tf_export import tf_export
# mel spectrum constants.
_MEL_BREAK_FREQUENCY_HERTZ = 700.0
_MEL_HIGH_FREQUENCY_Q = 1127.0
def _mel_to_hertz(mel_values, name=None):
"""Converts frequencies in `mel_values` from the mel scale to linear scale.
Args:
mel_values: A `Tensor` of frequencies in the mel scale.
name: An optional name for the operation.
Returns:
A `Tensor` of the same shape and type as `mel_values` containing linear
scale frequencies in Hertz.
"""
with ops.name_scope(name, 'mel_to_hertz', [mel_values]):
mel_values = ops.convert_to_tensor(mel_values)
return _MEL_BREAK_FREQUENCY_HERTZ * (
math_ops.exp(mel_values / _MEL_HIGH_FREQUENCY_Q) - 1.0
)
def _hertz_to_mel(frequencies_hertz, name=None):
"""Converts frequencies in `frequencies_hertz` in Hertz to the mel scale.
Args:
frequencies_hertz: A `Tensor` of frequencies in Hertz.
name: An optional name for the operation.
Returns:
A `Tensor` of the same shape and type of `frequencies_hertz` containing
frequencies in the mel scale.
"""
with ops.name_scope(name, 'hertz_to_mel', [frequencies_hertz]):
frequencies_hertz = ops.convert_to_tensor(frequencies_hertz)
return _MEL_HIGH_FREQUENCY_Q * math_ops.log(
1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ))
def _validate_arguments(num_mel_bins, sample_rate,
lower_edge_hertz, upper_edge_hertz, dtype):
"""Checks the inputs to linear_to_mel_weight_matrix."""
if num_mel_bins <= 0:
raise ValueError('num_mel_bins must be positive. Got: %s' % num_mel_bins)
if sample_rate <= 0.0:
raise ValueError('sample_rate must be positive. Got: %s' % sample_rate)
if lower_edge_hertz < 0.0:
raise ValueError('lower_edge_hertz must be non-negative. Got: %s' %
lower_edge_hertz)
if lower_edge_hertz >= upper_edge_hertz:
raise ValueError('lower_edge_hertz %.1f >= upper_edge_hertz %.1f' %
(lower_edge_hertz, upper_edge_hertz))
if upper_edge_hertz > sample_rate / 2:
raise ValueError('upper_edge_hertz must not be larger than the Nyquist '
'frequency (sample_rate / 2). Got: %s for sample_rate: %s'
% (upper_edge_hertz, sample_rate))
if not dtype.is_floating:
raise ValueError('dtype must be a floating point type. Got: %s' % dtype)
@tf_export('signal.linear_to_mel_weight_matrix')
def linear_to_mel_weight_matrix(num_mel_bins=20,
num_spectrogram_bins=129,
sample_rate=8000,
lower_edge_hertz=125.0,
upper_edge_hertz=3800.0,
dtype=dtypes.float32,
name=None):
"""Returns a matrix to warp linear scale spectrograms to the [mel scale][mel].
Returns a weight matrix that can be used to re-weight a `Tensor` containing
`num_spectrogram_bins` linearly sampled frequency information from
`[0, sample_rate / 2]` into `num_mel_bins` frequency information from
`[lower_edge_hertz, upper_edge_hertz]` on the [mel scale][mel].
For example, the returned matrix `A` can be used to right-multiply a
spectrogram `S` of shape `[frames, num_spectrogram_bins]` of linear
scale spectrum values (e.g. STFT magnitudes) to generate a "mel spectrogram"
`M` of shape `[frames, num_mel_bins]`.
# `S` has shape [frames, num_spectrogram_bins]
# `M` has shape [frames, num_mel_bins]
M = tf.matmul(S, A)
The matrix can be used with `tf.tensordot` to convert an arbitrary rank
`Tensor` of linear-scale spectral bins into the mel scale.
# S has shape [..., num_spectrogram_bins].
# M has shape [..., num_mel_bins].
M = tf.tensordot(S, A, 1)
# tf.tensordot does not support shape inference for this case yet.
M.set_shape(S.shape[:-1].concatenate(A.shape[-1:]))
Args:
num_mel_bins: Python int. How many bands in the resulting mel spectrum.
num_spectrogram_bins: An integer `Tensor`. How many bins there are in the
source spectrogram data, which is understood to be `fft_size // 2 + 1`,
i.e. the spectrogram only contains the nonredundant FFT bins.
sample_rate: Python float. Samples per second of the input signal used to
create the spectrogram. We need this to figure out the actual frequencies
for each spectrogram bin, which dictates how they are mapped into the mel
scale.
lower_edge_hertz: Python float. Lower bound on the frequencies to be
included in the mel spectrum. This corresponds to the lower edge of the
lowest triangular band.
upper_edge_hertz: Python float. The desired top edge of the highest
frequency band.
dtype: The `DType` of the result matrix. Must be a floating point type.
name: An optional name for the operation.
Returns:
A `Tensor` of shape `[num_spectrogram_bins, num_mel_bins]`.
Raises:
ValueError: If `num_mel_bins`/`num_spectrogram_bins`/`sample_rate` are not
positive, `lower_edge_hertz` is negative, frequency edges are incorrectly
ordered, `upper_edge_hertz` is larger than the Nyquist frequency, or
`sample_rate` is neither a Python float nor a constant Tensor.
[mel]: https://en.wikipedia.org/wiki/Mel_scale
"""
with ops.name_scope(name, 'linear_to_mel_weight_matrix') as name:
# Convert Tensor `sample_rate` to float, if possible.
if isinstance(sample_rate, ops.Tensor):
maybe_const_val = tensor_util.constant_value(sample_rate)
if maybe_const_val is not None:
sample_rate = maybe_const_val
else:
raise ValueError('`sample_rate` was a non-constant Tensor. Must be a '
'Python float or a constant Tensor.')
# Note: As num_spectrogram_bins is passed to `math_ops.linspace`
# and the validation is already done in linspace (both in shape function
# and in kernel), there is no need to validate num_spectrogram_bins here.
_validate_arguments(num_mel_bins, sample_rate,
lower_edge_hertz, upper_edge_hertz, dtype)
# This function can be constant folded by graph optimization since there are
# no Tensor inputs.
sample_rate = ops.convert_to_tensor(
sample_rate, dtype, name='sample_rate')
lower_edge_hertz = ops.convert_to_tensor(
lower_edge_hertz, dtype, name='lower_edge_hertz')
upper_edge_hertz = ops.convert_to_tensor(
upper_edge_hertz, dtype, name='upper_edge_hertz')
zero = ops.convert_to_tensor(0.0, dtype)
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
nyquist_hertz = sample_rate / 2.0
linear_frequencies = math_ops.linspace(
zero, nyquist_hertz, num_spectrogram_bins)[bands_to_zero:]
spectrogram_bins_mel = array_ops.expand_dims(
_hertz_to_mel(linear_frequencies), 1)
# Compute num_mel_bins triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into
# num_mel_bins + 2 pieces.
band_edges_mel = shape_ops.frame(
math_ops.linspace(_hertz_to_mel(lower_edge_hertz),
_hertz_to_mel(upper_edge_hertz),
num_mel_bins + 2), frame_length=3, frame_step=1)
# Split the triples up and reshape them into [1, num_mel_bins] tensors.
lower_edge_mel, center_mel, upper_edge_mel = tuple(array_ops.reshape(
t, [1, num_mel_bins]) for t in array_ops.split(
band_edges_mel, 3, axis=1))
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the mel domain, not Hertz.
lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (
center_mel - lower_edge_mel)
upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (
upper_edge_mel - center_mel)
# Intersect the line segments with each other and zero.
mel_weights_matrix = math_ops.maximum(
zero, math_ops.minimum(lower_slopes, upper_slopes))
# Re-add the zeroed lower bins we sliced out above.
return array_ops.pad(
mel_weights_matrix, [[bands_to_zero, 0], [0, 0]], name=name)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/signal/mel_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""General shape ops for frames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import util_ops
from tensorflow.python.util.tf_export import tf_export
def _infer_frame_shape(signal, frame_length, frame_step, pad_end, axis):
"""Infers the shape of the return value of `frame`."""
frame_length = tensor_util.constant_value(frame_length)
frame_step = tensor_util.constant_value(frame_step)
axis = tensor_util.constant_value(axis)
if signal.shape.ndims is None:
return None
if axis is None:
return [None] * (signal.shape.ndims + 1)
signal_shape = signal.shape.as_list()
num_frames = None
frame_axis = signal_shape[axis]
outer_dimensions = signal_shape[:axis]
inner_dimensions = signal_shape[axis:][1:]
if signal_shape and frame_axis is not None:
if frame_step is not None and pad_end:
# Double negative is so that we round up.
num_frames = max(0, -(-frame_axis // frame_step))
elif frame_step is not None and frame_length is not None:
assert not pad_end
num_frames = max(
0, (frame_axis - frame_length + frame_step) // frame_step)
return outer_dimensions + [num_frames, frame_length] + inner_dimensions
@tf_export("signal.frame")
def frame(signal, frame_length, frame_step, pad_end=False, pad_value=0, axis=-1,
name=None):
"""Expands `signal`'s `axis` dimension into frames of `frame_length`.
Slides a window of size `frame_length` over `signal`'s `axis` dimension
with a stride of `frame_step`, replacing the `axis` dimension with
`[frames, frame_length]` frames.
If `pad_end` is True, window positions that are past the end of the `axis`
dimension are padded with `pad_value` until the window moves fully past the
end of the dimension. Otherwise, only window positions that fully overlap the
`axis` dimension are produced.
For example:
```python
pcm = tf.compat.v1.placeholder(tf.float32, [None, 9152])
frames = tf.signal.frame(pcm, 512, 180)
magspec = tf.abs(tf.signal.rfft(frames, [512]))
image = tf.expand_dims(magspec, 3)
```
Args:
signal: A `[..., samples, ...]` `Tensor`. The rank and dimensions
may be unknown. Rank must be at least 1.
frame_length: The frame length in samples. An integer or scalar `Tensor`.
frame_step: The frame hop size in samples. An integer or scalar `Tensor`.
pad_end: Whether to pad the end of `signal` with `pad_value`.
pad_value: An optional scalar `Tensor` to use where the input signal
does not exist when `pad_end` is True.
axis: A scalar integer `Tensor` indicating the axis to frame. Defaults to
the last axis. Supports negative values for indexing from the end.
name: An optional name for the operation.
Returns:
A `Tensor` of frames with shape `[..., frames, frame_length, ...]`.
Raises:
ValueError: If `frame_length`, `frame_step`, `pad_value`, or `axis` are not
scalar.
"""
with ops.name_scope(name, "frame", [signal, frame_length, frame_step,
pad_value]):
signal = ops.convert_to_tensor(signal, name="signal")
frame_length = ops.convert_to_tensor(frame_length, name="frame_length")
frame_step = ops.convert_to_tensor(frame_step, name="frame_step")
axis = ops.convert_to_tensor(axis, name="axis")
signal.shape.with_rank_at_least(1)
frame_length.shape.assert_has_rank(0)
frame_step.shape.assert_has_rank(0)
axis.shape.assert_has_rank(0)
result_shape = _infer_frame_shape(signal, frame_length, frame_step, pad_end,
axis)
# Axis can be negative. Convert it to positive.
signal_rank = array_ops.rank(signal)
axis = math_ops.range(signal_rank)[axis]
signal_shape = array_ops.shape(signal)
outer_dimensions, length_samples, inner_dimensions = array_ops.split(
signal_shape, [axis, 1, signal_rank - 1 - axis])
length_samples = array_ops.reshape(length_samples, [])
num_outer_dimensions = array_ops.size(outer_dimensions)
num_inner_dimensions = array_ops.size(inner_dimensions)
# If padding is requested, pad the input signal tensor with pad_value.
if pad_end:
pad_value = ops.convert_to_tensor(pad_value, signal.dtype)
pad_value.shape.assert_has_rank(0)
# Calculate number of frames, using double negatives to round up.
num_frames = -(-length_samples // frame_step)
# Pad the signal by up to frame_length samples based on how many samples
# are remaining starting from last_frame_position.
pad_samples = math_ops.maximum(
0, frame_length + frame_step * (num_frames - 1) - length_samples)
# Pad the inner dimension of signal by pad_samples.
paddings = array_ops.concat(
[array_ops.zeros([num_outer_dimensions, 2], dtype=pad_samples.dtype),
[[0, pad_samples]],
array_ops.zeros([num_inner_dimensions, 2], dtype=pad_samples.dtype)],
0)
signal = array_ops.pad(signal, paddings, constant_values=pad_value)
signal_shape = array_ops.shape(signal)
length_samples = signal_shape[axis]
else:
num_frames = math_ops.maximum(
0, 1 + (length_samples - frame_length) // frame_step)
subframe_length = util_ops.gcd(frame_length, frame_step)
subframes_per_frame = frame_length // subframe_length
subframes_per_hop = frame_step // subframe_length
num_subframes = length_samples // subframe_length
slice_shape = array_ops.concat([outer_dimensions,
[num_subframes * subframe_length],
inner_dimensions], 0)
subframe_shape = array_ops.concat([outer_dimensions,
[num_subframes, subframe_length],
inner_dimensions], 0)
subframes = array_ops.reshape(array_ops.strided_slice(
signal, array_ops.zeros_like(signal_shape),
slice_shape), subframe_shape)
# frame_selector is a [num_frames, subframes_per_frame] tensor
# that indexes into the appropriate frame in subframes. For example:
# [[0, 0, 0, 0], [2, 2, 2, 2], [4, 4, 4, 4]]
frame_selector = array_ops.reshape(
math_ops.range(num_frames) * subframes_per_hop, [num_frames, 1])
# subframe_selector is a [num_frames, subframes_per_frame] tensor
# that indexes into the appropriate subframe within a frame. For example:
# [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]
subframe_selector = array_ops.reshape(
math_ops.range(subframes_per_frame), [1, subframes_per_frame])
# Adding the 2 selector tensors together produces a [num_frames,
# subframes_per_frame] tensor of indices to use with tf.gather to select
# subframes from subframes. We then reshape the inner-most
# subframes_per_frame dimension to stitch the subframes together into
# frames. For example: [[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7]].
selector = frame_selector + subframe_selector
frames = array_ops.reshape(
array_ops.gather(subframes, selector, axis=axis),
array_ops.concat([outer_dimensions, [num_frames, frame_length],
inner_dimensions], 0))
if result_shape:
frames.set_shape(result_shape)
return frames
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/signal/shape_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for computing common window functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('signal.hann_window')
def hann_window(window_length, periodic=True, dtype=dtypes.float32, name=None):
"""Generate a [Hann window][hann].
Args:
window_length: A scalar `Tensor` indicating the window length to generate.
periodic: A bool `Tensor` indicating whether to generate a periodic or
symmetric window. Periodic windows are typically used for spectral
analysis while symmetric windows are typically used for digital
filter design.
dtype: The data type to produce. Must be a floating point type.
name: An optional name for the operation.
Returns:
A `Tensor` of shape `[window_length]` of type `dtype`.
Raises:
ValueError: If `dtype` is not a floating point type.
[hann]: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows
"""
return _raised_cosine_window(name, 'hann_window', window_length, periodic,
dtype, 0.5, 0.5)
@tf_export('signal.hamming_window')
def hamming_window(window_length, periodic=True, dtype=dtypes.float32,
name=None):
"""Generate a [Hamming][hamming] window.
Args:
window_length: A scalar `Tensor` indicating the window length to generate.
periodic: A bool `Tensor` indicating whether to generate a periodic or
symmetric window. Periodic windows are typically used for spectral
analysis while symmetric windows are typically used for digital
filter design.
dtype: The data type to produce. Must be a floating point type.
name: An optional name for the operation.
Returns:
A `Tensor` of shape `[window_length]` of type `dtype`.
Raises:
ValueError: If `dtype` is not a floating point type.
[hamming]: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows
"""
return _raised_cosine_window(name, 'hamming_window', window_length, periodic,
dtype, 0.54, 0.46)
def _raised_cosine_window(name, default_name, window_length, periodic,
dtype, a, b):
"""Helper function for computing a raised cosine window.
Args:
name: Name to use for the scope.
default_name: Default name to use for the scope.
window_length: A scalar `Tensor` or integer indicating the window length.
periodic: A bool `Tensor` indicating whether to generate a periodic or
symmetric window.
dtype: A floating point `DType`.
a: The alpha parameter to the raised cosine window.
b: The beta parameter to the raised cosine window.
Returns:
A `Tensor` of shape `[window_length]` of type `dtype`.
Raises:
ValueError: If `dtype` is not a floating point type or `window_length` is
not scalar or `periodic` is not scalar.
"""
if not dtype.is_floating:
raise ValueError('dtype must be a floating point type. Found %s' % dtype)
with ops.name_scope(name, default_name, [window_length, periodic]):
window_length = ops.convert_to_tensor(window_length, dtype=dtypes.int32,
name='window_length')
window_length.shape.assert_has_rank(0)
window_length_const = tensor_util.constant_value(window_length)
if window_length_const == 1:
return array_ops.ones([1], dtype=dtype)
periodic = math_ops.cast(
ops.convert_to_tensor(periodic, dtype=dtypes.bool, name='periodic'),
dtypes.int32)
periodic.shape.assert_has_rank(0)
even = 1 - math_ops.mod(window_length, 2)
n = math_ops.cast(window_length + periodic * even - 1, dtype=dtype)
count = math_ops.cast(math_ops.range(window_length), dtype)
cos_arg = constant_op.constant(2 * np.pi, dtype=dtype) * count / n
if window_length_const is not None:
return math_ops.cast(a - b * math_ops.cos(cos_arg), dtype=dtype)
return control_flow_ops.cond(
math_ops.equal(window_length, 1),
lambda: array_ops.ones([1], dtype=dtype),
lambda: math_ops.cast(a - b * math_ops.cos(cos_arg), dtype=dtype))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/signal/window_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Identity bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Identity",
]
class Identity(bijector.Bijector):
"""Compute Y = g(X) = X.
Example Use:
```python
# Create the Y=g(X)=X transform which is intended for Tensors with 1 batch
# ndim and 1 event ndim (i.e., vector of vectors).
identity = Identity()
x = [[1., 2],
[3, 4]]
x == identity.forward(x) == identity.inverse(x)
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self, validate_args=False, name="identity"):
super(Identity, self).__init__(
forward_min_event_ndims=0,
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
def _forward(self, x):
return x
def _inverse(self, y):
return y
def _inverse_log_det_jacobian(self, y):
return constant_op.constant(0., dtype=y.dtype)
def _forward_log_det_jacobian(self, x):
return constant_op.constant(0., dtype=x.dtype)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/identity_bijector.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Laplace distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import special_math
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Laplace",
"LaplaceWithSoftplusScale",
]
@tf_export(v1=["distributions.Laplace"])
class Laplace(distribution.Distribution):
"""The Laplace distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) of this distribution is,
```none
pdf(x; mu, sigma) = exp(-|x - mu| / sigma) / Z
Z = 2 sigma
```
where `loc = mu`, `scale = sigma`, and `Z` is the normalization constant.
Note that the Laplace distribution can be thought of two exponential
distributions spliced together "back-to-back."
The Lpalce distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Laplace(loc=0, scale=1)
Y = loc + scale * X
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Laplace"):
"""Construct Laplace distribution with parameters `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g., `loc / scale` is a valid operation).
Args:
loc: Floating point tensor which characterizes the location (center)
of the distribution.
scale: Positive floating point tensor which characterizes the spread of
the distribution.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` are of different dtype.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Laplace, self).__init__(
dtype=self._loc.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(), self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
# Uniform variates must be sampled from the open-interval `(-1, 1)` rather
# than `[-1, 1)`. In the case of `(0, 1)` we'd use
# `np.finfo(self.dtype.as_numpy_dtype).tiny` because it is the smallest,
# positive, "normal" number. However, the concept of subnormality exists
# only at zero; here we need the smallest usable number larger than -1,
# i.e., `-1 + eps/2`.
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=1.,
dtype=self.dtype,
seed=seed)
return (self.loc - self.scale * math_ops.sign(uniform_samples) *
math_ops.log1p(-math_ops.abs(uniform_samples)))
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return special_math.log_cdf_laplace(self._z(x))
def _log_survival_function(self, x):
return special_math.log_cdf_laplace(-self._z(x))
def _cdf(self, x):
z = self._z(x)
return (0.5 + 0.5 * math_ops.sign(z) *
(1. - math_ops.exp(-math_ops.abs(z))))
def _log_unnormalized_prob(self, x):
return -math_ops.abs(self._z(x))
def _log_normalization(self):
return math.log(2.) + math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast scale.
scale = self.scale + array_ops.zeros_like(self.loc)
return math.log(2.) + 1. + math_ops.log(scale)
def _mean(self):
return self.loc + array_ops.zeros_like(self.scale)
def _stddev(self):
return math.sqrt(2.) * self.scale + array_ops.zeros_like(self.loc)
def _median(self):
return self._mean()
def _mode(self):
return self._mean()
def _z(self, x):
return (x - self.loc) / self.scale
class LaplaceWithSoftplusScale(Laplace):
"""Laplace with softplus applied to `scale`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Laplace(loc, tf.nn.softplus(scale)) "
"instead.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="LaplaceWithSoftplusScale"):
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
super(LaplaceWithSoftplusScale, self).__init__(
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/laplace.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Categorical distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def _broadcast_cat_event_and_params(event, params, base_dtype):
"""Broadcasts the event or distribution parameters."""
if event.dtype.is_integer:
pass
elif event.dtype.is_floating:
# When `validate_args=True` we've already ensured int/float casting
# is closed.
event = math_ops.cast(event, dtype=dtypes.int32)
else:
raise TypeError("`value` should have integer `dtype` or "
"`self.dtype` ({})".format(base_dtype))
shape_known_statically = (
params.shape.ndims is not None and
params.shape[:-1].is_fully_defined() and
event.shape.is_fully_defined())
if not shape_known_statically or params.shape[:-1] != event.shape:
params *= array_ops.ones_like(event[..., array_ops.newaxis],
dtype=params.dtype)
params_shape = array_ops.shape(params)[:-1]
event *= array_ops.ones(params_shape, dtype=event.dtype)
if params.shape.ndims is not None:
event.set_shape(tensor_shape.TensorShape(params.shape[:-1]))
return event, params
@tf_export(v1=["distributions.Categorical"])
class Categorical(distribution.Distribution):
"""Categorical distribution.
The Categorical distribution is parameterized by either probabilities or
log-probabilities of a set of `K` classes. It is defined over the integers
`{0, 1, ..., K}`.
The Categorical distribution is closely related to the `OneHotCategorical` and
`Multinomial` distributions. The Categorical distribution can be intuited as
generating samples according to `argmax{ OneHotCategorical(probs) }` itself
being identical to `argmax{ Multinomial(probs, total_count=1) }`.
#### Mathematical Details
The probability mass function (pmf) is,
```none
pmf(k; pi) = prod_j pi_j**[k == j]
```
#### Pitfalls
The number of classes, `K`, must not exceed:
- the largest integer representable by `self.dtype`, i.e.,
`2**(mantissa_bits+1)` (IEEE 754),
- the maximum `Tensor` index, i.e., `2**31-1`.
In other words,
```python
K <= min(2**31-1, {
tf.float16: 2**11,
tf.float32: 2**24,
tf.float64: 2**53 }[param.dtype])
```
Note: This condition is validated only when `self.validate_args = True`.
#### Examples
Creates a 3-class distribution with the 2nd class being most likely.
```python
dist = Categorical(probs=[0.1, 0.5, 0.4])
n = 1e4
empirical_prob = tf.cast(
tf.histogram_fixed_width(
dist.sample(int(n)),
[0., 2],
nbins=3),
dtype=tf.float32) / n
# ==> array([ 0.1005, 0.5037, 0.3958], dtype=float32)
```
Creates a 3-class distribution with the 2nd class being most likely.
Parameterized by [logits](https://en.wikipedia.org/wiki/Logit) rather than
probabilities.
```python
dist = Categorical(logits=np.log([0.1, 0.5, 0.4])
n = 1e4
empirical_prob = tf.cast(
tf.histogram_fixed_width(
dist.sample(int(n)),
[0., 2],
nbins=3),
dtype=tf.float32) / n
# ==> array([0.1045, 0.5047, 0.3908], dtype=float32)
```
Creates a 3-class distribution with the 3rd class being most likely.
The distribution functions can be evaluated on counts.
```python
# counts is a scalar.
p = [0.1, 0.4, 0.5]
dist = Categorical(probs=p)
dist.prob(0) # Shape []
# p will be broadcast to [[0.1, 0.4, 0.5], [0.1, 0.4, 0.5]] to match counts.
counts = [1, 0]
dist.prob(counts) # Shape [2]
# p will be broadcast to shape [3, 5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7, 3]
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(
self,
logits=None,
probs=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="Categorical"):
"""Initialize Categorical distributions using class log-probabilities.
Args:
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of logits for each class. Only one of `logits` or
`probs` should be passed in.
probs: An N-D `Tensor`, `N >= 1`, representing the probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of probabilities for each class. Only one of
`logits` or `probs` should be passed in.
dtype: The type of the event samples (default: int32).
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[logits, probs]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits,
probs=probs,
validate_args=validate_args,
multidimensional=True,
name=name)
if validate_args:
self._logits = distribution_util.embed_check_categorical_event_shape(
self._logits)
logits_shape_static = self._logits.get_shape().with_rank_at_least(1)
if logits_shape_static.ndims is not None:
self._batch_rank = ops.convert_to_tensor(
logits_shape_static.ndims - 1,
dtype=dtypes.int32,
name="batch_rank")
else:
with ops.name_scope(name="batch_rank"):
self._batch_rank = array_ops.rank(self._logits) - 1
logits_shape = array_ops.shape(self._logits, name="logits_shape")
if tensor_shape.dimension_value(logits_shape_static[-1]) is not None:
self._event_size = ops.convert_to_tensor(
logits_shape_static.dims[-1].value,
dtype=dtypes.int32,
name="event_size")
else:
with ops.name_scope(name="event_size"):
self._event_size = logits_shape[self._batch_rank]
if logits_shape_static[:-1].is_fully_defined():
self._batch_shape_val = constant_op.constant(
logits_shape_static[:-1].as_list(),
dtype=dtypes.int32,
name="batch_shape")
else:
with ops.name_scope(name="batch_shape"):
self._batch_shape_val = logits_shape[:-1]
super(Categorical, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._logits,
self._probs],
name=name)
@property
def event_size(self):
"""Scalar `int32` tensor: the number of classes."""
return self._event_size
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def probs(self):
"""Vector of coordinatewise probabilities."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.identity(self._batch_shape_val)
def _batch_shape(self):
return self.logits.get_shape()[:-1]
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
if self.logits.get_shape().ndims == 2:
logits_2d = self.logits
else:
logits_2d = array_ops.reshape(self.logits, [-1, self.event_size])
sample_dtype = dtypes.int64 if self.dtype.size > 4 else dtypes.int32
draws = random_ops.multinomial(
logits_2d, n, seed=seed, output_dtype=sample_dtype)
draws = array_ops.reshape(
array_ops.transpose(draws),
array_ops.concat([[n], self.batch_shape_tensor()], 0))
return math_ops.cast(draws, self.dtype)
def _cdf(self, k):
k = ops.convert_to_tensor(k, name="k")
if self.validate_args:
k = distribution_util.embed_check_integer_casting_closed(
k, target_dtype=dtypes.int32)
k, probs = _broadcast_cat_event_and_params(
k, self.probs, base_dtype=self.dtype.base_dtype)
# batch-flatten everything in order to use `sequence_mask()`.
batch_flattened_probs = array_ops.reshape(probs,
(-1, self._event_size))
batch_flattened_k = array_ops.reshape(k, [-1])
to_sum_over = array_ops.where(
array_ops.sequence_mask(batch_flattened_k, self._event_size),
batch_flattened_probs,
array_ops.zeros_like(batch_flattened_probs))
batch_flattened_cdf = math_ops.reduce_sum(to_sum_over, axis=-1)
# Reshape back to the shape of the argument.
return array_ops.reshape(batch_flattened_cdf, array_ops.shape(k))
def _log_prob(self, k):
k = ops.convert_to_tensor(k, name="k")
if self.validate_args:
k = distribution_util.embed_check_integer_casting_closed(
k, target_dtype=dtypes.int32)
k, logits = _broadcast_cat_event_and_params(
k, self.logits, base_dtype=self.dtype.base_dtype)
return -nn_ops.sparse_softmax_cross_entropy_with_logits(labels=k,
logits=logits)
def _entropy(self):
return -math_ops.reduce_sum(
nn_ops.log_softmax(self.logits) * self.probs, axis=-1)
def _mode(self):
ret = math_ops.argmax(self.logits, axis=self._batch_rank)
ret = math_ops.cast(ret, self.dtype)
ret.set_shape(self.batch_shape)
return ret
@kullback_leibler.RegisterKL(Categorical, Categorical)
def _kl_categorical_categorical(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Categorical.
Args:
a: instance of a Categorical distribution object.
b: instance of a Categorical distribution object.
name: (optional) Name to use for created operations.
default is "kl_categorical_categorical".
Returns:
Batchwise KL(a || b)
"""
with ops.name_scope(name, "kl_categorical_categorical",
values=[a.logits, b.logits]):
# sum(probs log(probs / (1 - probs)))
delta_log_probs1 = (nn_ops.log_softmax(a.logits) -
nn_ops.log_softmax(b.logits))
return math_ops.reduce_sum(nn_ops.softmax(a.logits) * delta_log_probs1,
axis=-1)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/categorical.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Dirichlet distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Dirichlet",
]
_dirichlet_sample_note = """Note: `value` must be a non-negative tensor with
dtype `self.dtype` and be in the `(self.event_shape() - 1)`-simplex, i.e.,
`tf.reduce_sum(value, -1) = 1`. It must have a shape compatible with
`self.batch_shape() + self.event_shape()`."""
@tf_export(v1=["distributions.Dirichlet"])
class Dirichlet(distribution.Distribution):
"""Dirichlet distribution.
The Dirichlet distribution is defined over the
[`(k-1)`-simplex](https://en.wikipedia.org/wiki/Simplex) using a positive,
length-`k` vector `concentration` (`k > 1`). The Dirichlet is identically the
Beta distribution when `k = 2`.
#### Mathematical Details
The Dirichlet is a distribution over the open `(k-1)`-simplex, i.e.,
```none
S^{k-1} = { (x_0, ..., x_{k-1}) in R^k : sum_j x_j = 1 and all_j x_j > 0 }.
```
The probability density function (pdf) is,
```none
pdf(x; alpha) = prod_j x_j**(alpha_j - 1) / Z
Z = prod_j Gamma(alpha_j) / Gamma(sum_j alpha_j)
```
where:
* `x in S^{k-1}`, i.e., the `(k-1)`-simplex,
* `concentration = alpha = [alpha_0, ..., alpha_{k-1}]`, `alpha_j > 0`,
* `Z` is the normalization constant aka the [multivariate beta function](
https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function),
and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The `concentration` represents mean total counts of class occurrence, i.e.,
```none
concentration = alpha = mean * total_concentration
```
where `mean` in `S^{k-1}` and `total_concentration` is a positive real number
representing a mean total count.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
Warning: Some components of the samples can be zero due to finite precision.
This happens more often when some of the concentrations are very small.
Make sure to round the samples to `np.finfo(dtype).tiny` before computing the
density.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Create a single trivariate Dirichlet, with the 3rd class being three times
# more frequent than the first. I.e., batch_shape=[], event_shape=[3].
alpha = [1., 2, 3]
dist = tfd.Dirichlet(alpha)
dist.sample([4, 5]) # shape: [4, 5, 3]
# x has one sample, one batch, three classes:
x = [.2, .3, .5] # shape: [3]
dist.prob(x) # shape: []
# x has two samples from one batch:
x = [[.1, .4, .5],
[.2, .3, .5]]
dist.prob(x) # shape: [2]
# alpha will be broadcast to shape [5, 7, 3] to match x.
x = [[...]] # shape: [5, 7, 3]
dist.prob(x) # shape: [5, 7]
```
```python
# Create batch_shape=[2], event_shape=[3]:
alpha = [[1., 2, 3],
[4, 5, 6]] # shape: [2, 3]
dist = tfd.Dirichlet(alpha)
dist.sample([4, 5]) # shape: [4, 5, 2, 3]
x = [.2, .3, .5]
# x will be broadcast as [[.2, .3, .5],
# [.2, .3, .5]],
# thus matching batch_shape [2, 3].
dist.prob(x) # shape: [2]
```
Compute the gradients of samples w.r.t. the parameters:
```python
alpha = tf.constant([1.0, 2.0, 3.0])
dist = tfd.Dirichlet(alpha)
samples = dist.sample(5) # Shape [5, 3]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, alpha)
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
concentration,
validate_args=False,
allow_nan_stats=True,
name="Dirichlet"):
"""Initialize a batch of Dirichlet distributions.
Args:
concentration: Positive floating-point `Tensor` indicating mean number
of class occurrences; aka "alpha". Implies `self.dtype`, and
`self.batch_shape`, `self.event_shape`, i.e., if
`concentration.shape = [N1, N2, ..., Nm, k]` then
`batch_shape = [N1, N2, ..., Nm]` and
`event_shape = [k]`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[concentration]) as name:
self._concentration = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration, name="concentration"),
validate_args)
self._total_concentration = math_ops.reduce_sum(self._concentration, -1)
super(Dirichlet, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._total_concentration],
name=name)
@property
def concentration(self):
"""Concentration parameter; expected counts for that coordinate."""
return self._concentration
@property
def total_concentration(self):
"""Sum of last dim of concentration parameter."""
return self._total_concentration
def _batch_shape_tensor(self):
return array_ops.shape(self.total_concentration)
def _batch_shape(self):
return self.total_concentration.get_shape()
def _event_shape_tensor(self):
return array_ops.shape(self.concentration)[-1:]
def _event_shape(self):
return self.concentration.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
gamma_sample = random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
dtype=self.dtype,
seed=seed)
return gamma_sample / math_ops.reduce_sum(gamma_sample, -1, keepdims=True)
@distribution_util.AppendDocstring(_dirichlet_sample_note)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
@distribution_util.AppendDocstring(_dirichlet_sample_note)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return math_ops.reduce_sum(math_ops.xlogy(self.concentration - 1., x), -1)
def _log_normalization(self):
return special_math_ops.lbeta(self.concentration)
def _entropy(self):
k = math_ops.cast(self.event_shape_tensor()[0], self.dtype)
return (
self._log_normalization()
+ ((self.total_concentration - k)
* math_ops.digamma(self.total_concentration))
- math_ops.reduce_sum(
(self.concentration - 1.) * math_ops.digamma(self.concentration),
axis=-1))
def _mean(self):
return self.concentration / self.total_concentration[..., array_ops.newaxis]
def _covariance(self):
x = self._variance_scale_term() * self._mean()
return array_ops.matrix_set_diag(
-math_ops.matmul(x[..., array_ops.newaxis],
x[..., array_ops.newaxis, :]), # outer prod
self._variance())
def _variance(self):
scale = self._variance_scale_term()
x = scale * self._mean()
return x * (scale - x)
def _variance_scale_term(self):
"""Helper to `_covariance` and `_variance` which computes a shared scale."""
return math_ops.rsqrt(1. + self.total_concentration[..., array_ops.newaxis])
@distribution_util.AppendDocstring(
"""Note: The mode is undefined when any `concentration <= 1`. If
`self.allow_nan_stats` is `True`, `NaN` is used for undefined modes. If
`self.allow_nan_stats` is `False` an exception is raised when one or more
modes are undefined.""")
def _mode(self):
k = math_ops.cast(self.event_shape_tensor()[0], self.dtype)
mode = (self.concentration - 1.) / (
self.total_concentration[..., array_ops.newaxis] - k)
if self.allow_nan_stats:
nan = array_ops.fill(
array_ops.shape(mode),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where_v2(
math_ops.reduce_all(self.concentration > 1., axis=-1), mode, nan)
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype),
self.concentration,
message="Mode undefined when any concentration <= 1"),
], mode)
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
check_ops.assert_rank_at_least(
concentration, 1,
message="Concentration parameter must have >=1 dimensions."),
check_ops.assert_less(
1, array_ops.shape(concentration)[-1],
message="Concentration parameter must have event_size >= 2."),
], concentration)
def _maybe_assert_valid_sample(self, x):
"""Checks the validity of a sample."""
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x, message="samples must be positive"),
check_ops.assert_near(
array_ops.ones([], dtype=self.dtype),
math_ops.reduce_sum(x, -1),
message="sample last-dimension must sum to `1`"),
], x)
@kullback_leibler.RegisterKL(Dirichlet, Dirichlet)
def _kl_dirichlet_dirichlet(d1, d2, name=None):
"""Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet.
Args:
d1: instance of a Dirichlet distribution object.
d2: instance of a Dirichlet distribution object.
name: (optional) Name to use for created operations.
default is "kl_dirichlet_dirichlet".
Returns:
Batchwise KL(d1 || d2)
"""
with ops.name_scope(name, "kl_dirichlet_dirichlet", values=[
d1.concentration, d2.concentration]):
# The KL between Dirichlet distributions can be derived as follows. We have
#
# Dir(x; a) = 1 / B(a) * prod_i[x[i]^(a[i] - 1)]
#
# where B(a) is the multivariate Beta function:
#
# B(a) = Gamma(a[1]) * ... * Gamma(a[n]) / Gamma(a[1] + ... + a[n])
#
# The KL is
#
# KL(Dir(x; a), Dir(x; b)) = E_Dir(x; a){log(Dir(x; a) / Dir(x; b))}
#
# so we'll need to know the log density of the Dirichlet. This is
#
# log(Dir(x; a)) = sum_i[(a[i] - 1) log(x[i])] - log B(a)
#
# The only term that matters for the expectations is the log(x[i]). To
# compute the expectation of this term over the Dirichlet density, we can
# use the following facts about the Dirichlet in exponential family form:
# 1. log(x[i]) is a sufficient statistic
# 2. expected sufficient statistics (of any exp family distribution) are
# equal to derivatives of the log normalizer with respect to
# corresponding natural parameters: E{T[i](x)} = dA/d(eta[i])
#
# To proceed, we can rewrite the Dirichlet density in exponential family
# form as follows:
#
# Dir(x; a) = exp{eta(a) . T(x) - A(a)}
#
# where '.' is the dot product of vectors eta and T, and A is a scalar:
#
# eta[i](a) = a[i] - 1
# T[i](x) = log(x[i])
# A(a) = log B(a)
#
# Now, we can use fact (2) above to write
#
# E_Dir(x; a)[log(x[i])]
# = dA(a) / da[i]
# = d/da[i] log B(a)
# = d/da[i] (sum_j lgamma(a[j])) - lgamma(sum_j a[j])
# = digamma(a[i])) - digamma(sum_j a[j])
#
# Putting it all together, we have
#
# KL[Dir(x; a) || Dir(x; b)]
# = E_Dir(x; a){log(Dir(x; a) / Dir(x; b)}
# = E_Dir(x; a){sum_i[(a[i] - b[i]) log(x[i])} - (lbeta(a) - lbeta(b))
# = sum_i[(a[i] - b[i]) * E_Dir(x; a){log(x[i])}] - lbeta(a) + lbeta(b)
# = sum_i[(a[i] - b[i]) * (digamma(a[i]) - digamma(sum_j a[j]))]
# - lbeta(a) + lbeta(b))
digamma_sum_d1 = math_ops.digamma(
math_ops.reduce_sum(d1.concentration, axis=-1, keepdims=True))
digamma_diff = math_ops.digamma(d1.concentration) - digamma_sum_d1
concentration_diff = d1.concentration - d2.concentration
return (math_ops.reduce_sum(concentration_diff * digamma_diff, axis=-1) -
special_math_ops.lbeta(d1.concentration) +
special_math_ops.lbeta(d2.concentration))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/dirichlet.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Transformed Distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import identity_bijector
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
__all__ = [
"TransformedDistribution",
]
# The following helper functions attempt to statically perform a TF operation.
# These functions make debugging easier since we can do more validation during
# graph construction.
def _static_value(x):
"""Returns the static value of a `Tensor` or `None`."""
return tensor_util.constant_value(ops.convert_to_tensor(x))
def _logical_and(*args):
"""Convenience function which attempts to statically `reduce_all`."""
args_ = [_static_value(x) for x in args]
if any(x is not None and not bool(x) for x in args_):
return constant_op.constant(False)
if all(x is not None and bool(x) for x in args_):
return constant_op.constant(True)
if len(args) == 2:
return math_ops.logical_and(*args)
return math_ops.reduce_all(args)
def _logical_equal(x, y):
"""Convenience function which attempts to statically compute `x == y`."""
x_ = _static_value(x)
y_ = _static_value(y)
if x_ is None or y_ is None:
return math_ops.equal(x, y)
return constant_op.constant(np.array_equal(x_, y_))
def _logical_not(x):
"""Convenience function which attempts to statically apply `logical_not`."""
x_ = _static_value(x)
if x_ is None:
return math_ops.logical_not(x)
return constant_op.constant(np.logical_not(x_))
def _concat_vectors(*args):
"""Convenience function which concatenates input vectors."""
args_ = [_static_value(x) for x in args]
if any(x_ is None for x_ in args_):
return array_ops.concat(args, 0)
return constant_op.constant([x_ for vec_ in args_ for x_ in vec_])
def _pick_scalar_condition(pred, cond_true, cond_false):
"""Convenience function which chooses the condition based on the predicate."""
# Note: This function is only valid if all of pred, cond_true, and cond_false
# are scalars. This means its semantics are arguably more like tf.cond than
# tf.select even though we use tf.select to implement it.
pred_ = _static_value(pred)
if pred_ is None:
return array_ops.where_v2(pred, cond_true, cond_false)
return cond_true if pred_ else cond_false
def _ones_like(x):
"""Convenience function attempts to statically construct `ones_like`."""
# Should only be used for small vectors.
if x.get_shape().is_fully_defined():
return array_ops.ones(x.get_shape().as_list(), dtype=x.dtype)
return array_ops.ones_like(x)
def _ndims_from_shape(shape):
"""Returns `Tensor`'s `rank` implied by a `Tensor` shape."""
if shape.get_shape().ndims not in (None, 1):
raise ValueError("input is not a valid shape: not 1D")
if not shape.dtype.is_integer:
raise TypeError("input is not a valid shape: wrong dtype")
if shape.get_shape().is_fully_defined():
return constant_op.constant(shape.get_shape().as_list()[0])
return array_ops.shape(shape)[0]
def _is_scalar_from_shape(shape):
"""Returns `True` `Tensor` if `Tensor` shape implies a scalar."""
return _logical_equal(_ndims_from_shape(shape), 0)
class TransformedDistribution(distribution_lib.Distribution):
"""A Transformed Distribution.
A `TransformedDistribution` models `p(y)` given a base distribution `p(x)`,
and a deterministic, invertible, differentiable transform, `Y = g(X)`. The
transform is typically an instance of the `Bijector` class and the base
distribution is typically an instance of the `Distribution` class.
A `Bijector` is expected to implement the following functions:
- `forward`,
- `inverse`,
- `inverse_log_det_jacobian`.
The semantics of these functions are outlined in the `Bijector` documentation.
We now describe how a `TransformedDistribution` alters the input/outputs of a
`Distribution` associated with a random variable (rv) `X`.
Write `cdf(Y=y)` for an absolutely continuous cumulative distribution function
of random variable `Y`; write the probability density function `pdf(Y=y) :=
d^k / (dy_1,...,dy_k) cdf(Y=y)` for its derivative wrt to `Y` evaluated at
`y`. Assume that `Y = g(X)` where `g` is a deterministic diffeomorphism,
i.e., a non-random, continuous, differentiable, and invertible function.
Write the inverse of `g` as `X = g^{-1}(Y)` and `(J o g)(x)` for the Jacobian
of `g` evaluated at `x`.
A `TransformedDistribution` implements the following operations:
* `sample`
Mathematically: `Y = g(X)`
Programmatically: `bijector.forward(distribution.sample(...))`
* `log_prob`
Mathematically: `(log o pdf)(Y=y) = (log o pdf o g^{-1})(y)
+ (log o abs o det o J o g^{-1})(y)`
Programmatically: `(distribution.log_prob(bijector.inverse(y))
+ bijector.inverse_log_det_jacobian(y))`
* `log_cdf`
Mathematically: `(log o cdf)(Y=y) = (log o cdf o g^{-1})(y)`
Programmatically: `distribution.log_cdf(bijector.inverse(x))`
* and similarly for: `cdf`, `prob`, `log_survival_function`,
`survival_function`.
A simple example constructing a Log-Normal distribution from a Normal
distribution:
```python
ds = tfp.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Exp(),
name="LogNormalTransformedDistribution")
```
A `LogNormal` made from callables:
```python
ds = tfp.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Inline(
forward_fn=tf.exp,
inverse_fn=tf.math.log,
inverse_log_det_jacobian_fn=(
lambda y: -tf.reduce_sum(tf.math.log(y), axis=-1)),
name="LogNormalTransformedDistribution")
```
Another example constructing a Normal from a StandardNormal:
```python
ds = tfp.distributions
normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Affine(
shift=-1.,
scale_identity_multiplier=2.)
name="NormalTransformedDistribution")
```
A `TransformedDistribution`'s batch- and event-shape are implied by the base
distribution unless explicitly overridden by `batch_shape` or `event_shape`
arguments. Specifying an overriding `batch_shape` (`event_shape`) is
permitted only if the base distribution has scalar batch-shape (event-shape).
The bijector is applied to the distribution as if the distribution possessed
the overridden shape(s). The following example demonstrates how to construct a
multivariate Normal as a `TransformedDistribution`.
```python
ds = tfp.distributions
# We will create two MVNs with batch_shape = event_shape = 2.
mean = [[-1., 0], # batch:0
[0., 1]] # batch:1
chol_cov = [[[1., 0],
[0, 1]], # batch:0
[[1, 0],
[2, 2]]] # batch:1
mvn1 = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Affine(shift=mean, scale_tril=chol_cov),
batch_shape=[2], # Valid because base_distribution.batch_shape == [].
event_shape=[2]) # Valid because base_distribution.event_shape == [].
mvn2 = ds.MultivariateNormalTriL(loc=mean, scale_tril=chol_cov)
# mvn1.log_prob(x) == mvn2.log_prob(x)
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
distribution,
bijector=None,
batch_shape=None,
event_shape=None,
validate_args=False,
name=None):
"""Construct a Transformed Distribution.
Args:
distribution: The base distribution instance to transform. Typically an
instance of `Distribution`.
bijector: The object responsible for calculating the transformation.
Typically an instance of `Bijector`. `None` means `Identity()`.
batch_shape: `integer` vector `Tensor` which overrides `distribution`
`batch_shape`; valid only if `distribution.is_scalar_batch()`.
event_shape: `integer` vector `Tensor` which overrides `distribution`
`event_shape`; valid only if `distribution.is_scalar_event()`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class. Default:
`bijector.name + distribution.name`.
"""
parameters = dict(locals())
name = name or (("" if bijector is None else bijector.name) +
distribution.name)
with ops.name_scope(name, values=[event_shape, batch_shape]) as name:
# For convenience we define some handy constants.
self._zero = constant_op.constant(0, dtype=dtypes.int32, name="zero")
self._empty = constant_op.constant([], dtype=dtypes.int32, name="empty")
if bijector is None:
bijector = identity_bijector.Identity(validate_args=validate_args)
# We will keep track of a static and dynamic version of
# self._is_{batch,event}_override. This way we can do more prior to graph
# execution, including possibly raising Python exceptions.
self._override_batch_shape = self._maybe_validate_shape_override(
batch_shape, distribution.is_scalar_batch(), validate_args,
"batch_shape")
self._is_batch_override = _logical_not(_logical_equal(
_ndims_from_shape(self._override_batch_shape), self._zero))
self._is_maybe_batch_override = bool(
tensor_util.constant_value(self._override_batch_shape) is None or
tensor_util.constant_value(self._override_batch_shape).size != 0)
self._override_event_shape = self._maybe_validate_shape_override(
event_shape, distribution.is_scalar_event(), validate_args,
"event_shape")
self._is_event_override = _logical_not(_logical_equal(
_ndims_from_shape(self._override_event_shape), self._zero))
self._is_maybe_event_override = bool(
tensor_util.constant_value(self._override_event_shape) is None or
tensor_util.constant_value(self._override_event_shape).size != 0)
# To convert a scalar distribution into a multivariate distribution we
# will draw dims from the sample dims, which are otherwise iid. This is
# easy to do except in the case that the base distribution has batch dims
# and we're overriding event shape. When that case happens the event dims
# will incorrectly be to the left of the batch dims. In this case we'll
# cyclically permute left the new dims.
self._needs_rotation = _logical_and(
self._is_event_override,
_logical_not(self._is_batch_override),
_logical_not(distribution.is_scalar_batch()))
override_event_ndims = _ndims_from_shape(self._override_event_shape)
self._rotate_ndims = _pick_scalar_condition(
self._needs_rotation, override_event_ndims, 0)
# We'll be reducing the head dims (if at all), i.e., this will be []
# if we don't need to reduce.
self._reduce_event_indices = math_ops.range(
self._rotate_ndims - override_event_ndims, self._rotate_ndims)
self._distribution = distribution
self._bijector = bijector
super(TransformedDistribution, self).__init__(
dtype=self._distribution.dtype,
reparameterization_type=self._distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=self._distribution.allow_nan_stats,
parameters=parameters,
# We let TransformedDistribution access _graph_parents since this class
# is more like a baseclass than derived.
graph_parents=(distribution._graph_parents + # pylint: disable=protected-access
bijector.graph_parents),
name=name)
@property
def distribution(self):
"""Base distribution, p(x)."""
return self._distribution
@property
def bijector(self):
"""Function transforming x => y."""
return self._bijector
def _event_shape_tensor(self):
return self.bijector.forward_event_shape_tensor(
distribution_util.pick_vector(
self._is_event_override,
self._override_event_shape,
self.distribution.event_shape_tensor()))
def _event_shape(self):
# If there's a chance that the event_shape has been overridden, we return
# what we statically know about the `event_shape_override`. This works
# because: `_is_maybe_event_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `event_shape` or we do.
#
# Since the `bijector` may change the `event_shape`, we then forward what we
# know to the bijector. This allows the `bijector` to have final say in the
# `event_shape`.
static_override = tensor_util.constant_value_as_shape(
self._override_event_shape)
return self.bijector.forward_event_shape(
static_override
if self._is_maybe_event_override
else self.distribution.event_shape)
def _batch_shape_tensor(self):
return distribution_util.pick_vector(
self._is_batch_override,
self._override_batch_shape,
self.distribution.batch_shape_tensor())
def _batch_shape(self):
# If there's a chance that the batch_shape has been overridden, we return
# what we statically know about the `batch_shape_override`. This works
# because: `_is_maybe_batch_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `batch_shape` or we do.
#
# Notice that this implementation parallels the `_event_shape` except that
# the `bijector` doesn't get to alter the `batch_shape`. Recall that
# `batch_shape` is a property of a distribution while `event_shape` is
# shared between both the `distribution` instance and the `bijector`.
static_override = tensor_util.constant_value_as_shape(
self._override_batch_shape)
return (static_override
if self._is_maybe_batch_override
else self.distribution.batch_shape)
def _sample_n(self, n, seed=None):
sample_shape = _concat_vectors(
distribution_util.pick_vector(self._needs_rotation, self._empty, [n]),
self._override_batch_shape,
self._override_event_shape,
distribution_util.pick_vector(self._needs_rotation, [n], self._empty))
x = self.distribution.sample(sample_shape=sample_shape, seed=seed)
x = self._maybe_rotate_dims(x)
# We'll apply the bijector in the `_call_sample_n` function.
return x
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
# We override `_call_sample_n` rather than `_sample_n` so we can ensure that
# the result of `self.bijector.forward` is not modified (and thus caching
# works).
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
# First, generate samples. We will possibly generate extra samples in the
# event that we need to reinterpret the samples as part of the
# event_shape.
x = self._sample_n(n, seed, **kwargs)
# Next, we reshape `x` into its final form. We do this prior to the call
# to the bijector to ensure that the bijector caching works.
batch_event_shape = array_ops.shape(x)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
x = array_ops.reshape(x, final_shape)
# Finally, we apply the bijector's forward transformation. For caching to
# work, it is imperative that this is the last modification to the
# returned result.
y = self.bijector.forward(x, **kwargs)
y = self._set_sample_static_shape(y, sample_shape)
return y
def _log_prob(self, y):
# For caching to work, it is imperative that the bijector is the first to
# modify the input.
x = self.bijector.inverse(y)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(y, event_ndims=event_ndims)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_log_prob_for_one_fiber(y, x, ildj, event_ndims)
lp_on_fibers = [
self._finish_log_prob_for_one_fiber(y, x_i, ildj_i, event_ndims)
for x_i, ildj_i in zip(x, ildj)]
return math_ops.reduce_logsumexp(array_ops.stack(lp_on_fibers), axis=0)
def _finish_log_prob_for_one_fiber(self, y, x, ildj, event_ndims):
"""Finish computation of log_prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
log_prob = self.distribution.log_prob(x)
if self._is_maybe_event_override:
log_prob = math_ops.reduce_sum(log_prob, self._reduce_event_indices)
log_prob += math_ops.cast(ildj, log_prob.dtype)
if self._is_maybe_event_override and isinstance(event_ndims, int):
log_prob.set_shape(
array_ops.broadcast_static_shape(
y.get_shape().with_rank_at_least(1)[:-event_ndims],
self.batch_shape))
return log_prob
def _prob(self, y):
x = self.bijector.inverse(y)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(y, event_ndims=event_ndims)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_prob_for_one_fiber(y, x, ildj, event_ndims)
prob_on_fibers = [
self._finish_prob_for_one_fiber(y, x_i, ildj_i, event_ndims)
for x_i, ildj_i in zip(x, ildj)]
return sum(prob_on_fibers)
def _finish_prob_for_one_fiber(self, y, x, ildj, event_ndims):
"""Finish computation of prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
prob = self.distribution.prob(x)
if self._is_maybe_event_override:
prob = math_ops.reduce_prod(prob, self._reduce_event_indices)
prob *= math_ops.exp(math_ops.cast(ildj, prob.dtype))
if self._is_maybe_event_override and isinstance(event_ndims, int):
prob.set_shape(
array_ops.broadcast_static_shape(
y.get_shape().with_rank_at_least(1)[:-event_ndims],
self.batch_shape))
return prob
def _log_cdf(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("log_cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_cdf is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.log_cdf(x)
def _cdf(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("cdf is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.cdf(x)
def _log_survival_function(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("log_survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_survival_function is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.log_survival_function(x)
def _survival_function(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("survival_function is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.survival_function(x)
def _quantile(self, value):
if self._is_maybe_event_override:
raise NotImplementedError("quantile is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("quantile is not implemented when "
"bijector is not injective.")
# x_q is the "qth quantile" of X iff q = P[X <= x_q]. Now, since X =
# g^{-1}(Y), q = P[X <= x_q] = P[g^{-1}(Y) <= x_q] = P[Y <= g(x_q)],
# implies the qth quantile of Y is g(x_q).
inv_cdf = self.distribution.quantile(value)
return self.bijector.forward(inv_cdf)
def _entropy(self):
if not self.bijector.is_constant_jacobian:
raise NotImplementedError("entropy is not implemented")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("entropy is not implemented when "
"bijector is not injective.")
# Suppose Y = g(X) where g is a diffeomorphism and X is a continuous rv. It
# can be shown that:
# H[Y] = H[X] + E_X[(log o abs o det o J o g)(X)].
# If is_constant_jacobian then:
# E_X[(log o abs o det o J o g)(X)] = (log o abs o det o J o g)(c)
# where c can by anything.
entropy = self.distribution.entropy()
if self._is_maybe_event_override:
# H[X] = sum_i H[X_i] if X_i are mutually independent.
# This means that a reduce_sum is a simple rescaling.
entropy *= math_ops.cast(math_ops.reduce_prod(self._override_event_shape),
dtype=entropy.dtype.base_dtype)
if self._is_maybe_batch_override:
new_shape = array_ops.concat([
_ones_like(self._override_batch_shape),
self.distribution.batch_shape_tensor()
], 0)
entropy = array_ops.reshape(entropy, new_shape)
multiples = array_ops.concat([
self._override_batch_shape,
_ones_like(self.distribution.batch_shape_tensor())
], 0)
entropy = array_ops.tile(entropy, multiples)
dummy = array_ops.zeros(
shape=array_ops.concat(
[self.batch_shape_tensor(), self.event_shape_tensor()],
0),
dtype=self.dtype)
event_ndims = (self.event_shape.ndims if self.event_shape.ndims is not None
else array_ops.size(self.event_shape_tensor()))
ildj = self.bijector.inverse_log_det_jacobian(
dummy, event_ndims=event_ndims)
entropy -= math_ops.cast(ildj, entropy.dtype)
entropy.set_shape(self.batch_shape)
return entropy
def _maybe_validate_shape_override(self, override_shape, base_is_scalar,
validate_args, name):
"""Helper to __init__ which ensures override batch/event_shape are valid."""
if override_shape is None:
override_shape = []
override_shape = ops.convert_to_tensor(override_shape, dtype=dtypes.int32,
name=name)
if not override_shape.dtype.is_integer:
raise TypeError("shape override must be an integer")
override_is_scalar = _is_scalar_from_shape(override_shape)
if tensor_util.constant_value(override_is_scalar):
return self._empty
dynamic_assertions = []
if override_shape.get_shape().ndims is not None:
if override_shape.get_shape().ndims != 1:
raise ValueError("shape override must be a vector")
elif validate_args:
dynamic_assertions += [check_ops.assert_rank(
override_shape, 1,
message="shape override must be a vector")]
if tensor_util.constant_value(override_shape) is not None:
if any(s <= 0 for s in tensor_util.constant_value(override_shape)):
raise ValueError("shape override must have positive elements")
elif validate_args:
dynamic_assertions += [check_ops.assert_positive(
override_shape,
message="shape override must have positive elements")]
is_both_nonscalar = _logical_and(_logical_not(base_is_scalar),
_logical_not(override_is_scalar))
if tensor_util.constant_value(is_both_nonscalar) is not None:
if tensor_util.constant_value(is_both_nonscalar):
raise ValueError("base distribution not scalar")
elif validate_args:
dynamic_assertions += [check_ops.assert_equal(
is_both_nonscalar, False,
message="base distribution not scalar")]
if not dynamic_assertions:
return override_shape
return control_flow_ops.with_dependencies(
dynamic_assertions, override_shape)
def _maybe_rotate_dims(self, x, rotate_right=False):
"""Helper which rolls left event_dims left or right event_dims right."""
needs_rotation_const = tensor_util.constant_value(self._needs_rotation)
if needs_rotation_const is not None and not needs_rotation_const:
return x
ndims = array_ops.rank(x)
n = (ndims - self._rotate_ndims) if rotate_right else self._rotate_ndims
return array_ops.transpose(
x, _concat_vectors(math_ops.range(n, ndims), math_ops.range(0, n)))
def _maybe_get_static_event_ndims(self):
if self.event_shape.ndims is not None:
return self.event_shape.ndims
event_ndims = array_ops.size(self.event_shape_tensor())
event_ndims_ = distribution_util.maybe_get_static_value(event_ndims)
if event_ndims_ is not None:
return event_ndims_
return event_ndims
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/transformed_distribution.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Student's t distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"StudentT",
"StudentTWithAbsDfSoftplusScale",
]
@tf_export(v1=["distributions.StudentT"])
class StudentT(distribution.Distribution):
"""Student's t-distribution.
This distribution has parameters: degree of freedom `df`, location `loc`,
and `scale`.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; df, mu, sigma) = (1 + y**2 / df)**(-0.5 (df + 1)) / Z
where,
y = (x - mu) / sigma
Z = abs(sigma) sqrt(df pi) Gamma(0.5 df) / Gamma(0.5 (df + 1))
```
where:
* `loc = mu`,
* `scale = sigma`, and,
* `Z` is the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The StudentT distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ StudentT(df, loc=0, scale=1)
Y = loc + scale * X
```
Notice that `scale` has semantics more similar to standard deviation than
variance. However it is not actually the std. deviation; the Student's
t-distribution std. dev. is `scale sqrt(df / (df - 2))` when `df > 2`.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Student t distribution.
single_dist = tfd.StudentT(df=3)
# Evaluate the pdf at 1, returning a scalar Tensor.
single_dist.prob(1.)
# Define a batch of two scalar valued Student t's.
# The first has degrees of freedom 2, mean 1, and scale 11.
# The second 3, 2 and 22.
multi_dist = tfd.StudentT(df=[2, 3], loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
multi_dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
multi_dist.sample(3)
```
Arguments are broadcast when possible.
```python
# Define a batch of two Student's t distributions.
# Both have df 2 and mean 1, but different scales.
dist = tfd.StudentT(df=2, loc=1, scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
Compute the gradients of samples w.r.t. the parameters:
```python
df = tf.constant(2.0)
loc = tf.constant(2.0)
scale = tf.constant(11.0)
dist = tfd.StudentT(df=df, loc=loc, scale=scale)
samples = dist.sample(5) # Shape [5]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, [df, loc, scale])
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
df,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="StudentT"):
"""Construct Student's t distributions.
The distributions have degree of freedom `df`, mean `loc`, and scale
`scale`.
The parameters `df`, `loc`, and `scale` must be shaped in a way that
supports broadcasting (e.g. `df + loc + scale` is a valid operation).
Args:
df: Floating-point `Tensor`. The degrees of freedom of the
distribution(s). `df` must contain only positive values.
loc: Floating-point `Tensor`. The mean(s) of the distribution(s).
scale: Floating-point `Tensor`. The scaling factor(s) for the
distribution(s). Note that `scale` is not technically the standard
deviation of this distribution but has semantics more similar to
standard deviation than variance.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[df, loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(df)]
if validate_args else []):
self._df = array_ops.identity(df, name="df")
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype(
(self._df, self._loc, self._scale))
super(StudentT, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._df, self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("df", "loc", "scale"), (
[ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 3)))
@property
def df(self):
"""Degrees of freedom in these Student's t distribution(s)."""
return self._df
@property
def loc(self):
"""Locations of these Student's t distribution(s)."""
return self._loc
@property
def scale(self):
"""Scaling factors of these Student's t distribution(s)."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.df),
array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale)))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
array_ops.broadcast_static_shape(self.df.get_shape(),
self.loc.get_shape()),
self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=math_ops.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
# The sampling method comes from the fact that if:
# X ~ Normal(0, 1)
# Z ~ Chi2(df)
# Y = X / sqrt(Z / df)
# then:
# Y ~ StudentT(df).
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed)
df = self.df * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
gamma_sample = random_ops.random_gamma(
[n],
0.5 * df,
beta=0.5,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, salt="student_t"))
samples = normal_sample * math_ops.rsqrt(gamma_sample / df)
return samples * self.scale + self.loc # Abs(scale) not wanted.
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_unnormalized_prob(self, x):
y = (x - self.loc) / self.scale # Abs(scale) superfluous.
return -0.5 * (self.df + 1.) * math_ops.log1p(y**2. / self.df)
def _log_normalization(self):
return (math_ops.log(math_ops.abs(self.scale)) +
0.5 * math_ops.log(self.df) +
0.5 * np.log(np.pi) +
math_ops.lgamma(0.5 * self.df) -
math_ops.lgamma(0.5 * (self.df + 1.)))
def _cdf(self, x):
# Take Abs(scale) to make subsequent where work correctly.
y = (x - self.loc) / math_ops.abs(self.scale)
x_t = self.df / (y**2. + self.df)
neg_cdf = 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t)
return array_ops.where_v2(math_ops.less(y, 0.), neg_cdf, 1. - neg_cdf)
def _entropy(self):
v = array_ops.ones(self.batch_shape_tensor(),
dtype=self.dtype)[..., array_ops.newaxis]
u = v * self.df[..., array_ops.newaxis]
beta_arg = array_ops.concat([u, v], -1) / 2.
return (math_ops.log(math_ops.abs(self.scale)) +
0.5 * math_ops.log(self.df) +
special_math_ops.lbeta(beta_arg) +
0.5 * (self.df + 1.) *
(math_ops.digamma(0.5 * (self.df + 1.)) -
math_ops.digamma(0.5 * self.df)))
@distribution_util.AppendDocstring(
"""The mean of Student's T equals `loc` if `df > 1`, otherwise it is
`NaN`. If `self.allow_nan_stats=True`, then an exception will be raised
rather than returning `NaN`.""")
def _mean(self):
mean = self.loc * array_ops.ones(self.batch_shape_tensor(),
dtype=self.dtype)
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where_v2(
math_ops.greater(
self.df,
array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)),
mean, array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies(
[
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.df,
message="mean not defined for components of df <= 1"),
],
mean)
@distribution_util.AppendDocstring("""
The variance for Student's T equals
```
df / (df - 2), when df > 2
infinity, when 1 < df <= 2
NaN, when df <= 1
```
""")
def _variance(self):
# We need to put the tf.where inside the outer tf.where to ensure we never
# hit a NaN in the gradient.
denom = array_ops.where_v2(
math_ops.greater(self.df, 2.), self.df - 2.,
array_ops.ones_like(self.df))
# Abs(scale) superfluous.
var = (array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) *
math_ops.square(self.scale) * self.df / denom)
# When 1 < df <= 2, variance is infinite.
inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
result_where_defined = array_ops.where_v2(
self.df > array_ops.fill(self.batch_shape_tensor(), 2.), var,
array_ops.fill(self.batch_shape_tensor(), inf, name="inf"))
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where_v2(
math_ops.greater(
self.df,
array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)),
result_where_defined,
array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies(
[
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.df,
message="variance not defined for components of df <= 1"),
],
result_where_defined)
def _mode(self):
return array_ops.identity(self.loc)
class StudentTWithAbsDfSoftplusScale(StudentT):
"""StudentT with `df = floor(abs(df))` and `scale = softplus(scale)`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.StudentT(tf.floor(tf.abs(df)), loc, "
"tf.nn.softplus(scale)) instead.",
warn_once=True)
def __init__(self,
df,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="StudentTWithAbsDfSoftplusScale"):
parameters = dict(locals())
with ops.name_scope(name, values=[df, scale]) as name:
super(StudentTWithAbsDfSoftplusScale, self).__init__(
df=math_ops.floor(math_ops.abs(df)),
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/student_t.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bijector base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.ops.distributions.bijector_impl import Bijector
# pylint: enable=wildcard-import,unused-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/bijector.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal (Gaussian) distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import special_math
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Normal",
"NormalWithSoftplusScale",
]
@tf_export(v1=["distributions.Normal"])
class Normal(distribution.Distribution):
"""The Normal distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; mu, sigma) = exp(-0.5 (x - mu)**2 / sigma**2) / Z
Z = (2 pi sigma**2)**0.5
```
where `loc = mu` is the mean, `scale = sigma` is the std. deviation, and, `Z`
is the normalization constant.
The Normal distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Normal(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Normal distribution.
dist = tfd.Normal(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = tfd.Normal(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations.
dist = tfd.Normal(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Normal"):
"""Construct Normal distributions with mean and stddev `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor; the means of the distribution(s).
scale: Floating point tensor; the stddevs of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` have different `dtype`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Normal, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the mean."""
return self._loc
@property
def scale(self):
"""Distribution parameter for standard deviation."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc),
array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(),
self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
sampled = random_ops.random_normal(
shape=shape, mean=0., stddev=1., dtype=self.loc.dtype, seed=seed)
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_cdf(self, x):
return special_math.log_ndtr(self._z(x))
def _cdf(self, x):
return special_math.ndtr(self._z(x))
def _log_survival_function(self, x):
return special_math.log_ndtr(-self._z(x))
def _survival_function(self, x):
return special_math.ndtr(-self._z(x))
def _log_unnormalized_prob(self, x):
return -0.5 * math_ops.square(self._z(x))
def _log_normalization(self):
return 0.5 * math.log(2. * math.pi) + math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast scale.
scale = self.scale * array_ops.ones_like(self.loc)
return 0.5 * math.log(2. * math.pi * math.e) + math_ops.log(scale)
def _mean(self):
return self.loc * array_ops.ones_like(self.scale)
def _quantile(self, p):
return self._inv_z(special_math.ndtri(p))
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit normal."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
def _inv_z(self, z):
"""Reconstruct input `x` from a its normalized version."""
with ops.name_scope("reconstruct", values=[z]):
return z * self.scale + self.loc
class NormalWithSoftplusScale(Normal):
"""Normal with softplus applied to `scale`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Normal(loc, tf.nn.softplus(scale)) "
"instead.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="NormalWithSoftplusScale"):
parameters = dict(locals())
with ops.name_scope(name, values=[scale]) as name:
super(NormalWithSoftplusScale, self).__init__(
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Normal, Normal)
def _kl_normal_normal(n_a, n_b, name=None):
"""Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
"""
with ops.name_scope(name, "kl_normal_normal", [n_a.loc, n_b.loc]):
one = constant_op.constant(1, dtype=n_a.dtype)
two = constant_op.constant(2, dtype=n_a.dtype)
half = constant_op.constant(0.5, dtype=n_a.dtype)
s_a_squared = math_ops.square(n_a.scale)
s_b_squared = math_ops.square(n_b.scale)
ratio = s_a_squared / s_b_squared
return (math_ops.squared_difference(n_a.loc, n_b.loc) / (two * s_b_squared)
+ half * (ratio - one - math_ops.log(ratio)))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/normal.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import hashlib
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util import tf_inspect
def assert_integer_form(x,
data=None,
summarize=None,
message=None,
int_dtype=None,
name="assert_integer_form"):
"""Assert that x has integer components (or floats equal to integers).
Args:
x: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
int_dtype: A `tf.dtype` used to cast the float to. The default (`None`)
implies the smallest possible signed int will be used for casting.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.
"""
with ops.name_scope(name, values=[x, data]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return control_flow_ops.no_op()
message = message or "{} has non-integer components".format(x)
if int_dtype is None:
try:
int_dtype = {
dtypes.float16: dtypes.int16,
dtypes.float32: dtypes.int32,
dtypes.float64: dtypes.int64,
}[x.dtype.base_dtype]
except KeyError:
raise TypeError("Unrecognized type {}".format(x.dtype.name))
return check_ops.assert_equal(
x,
math_ops.cast(math_ops.cast(x, int_dtype), x.dtype),
data=data,
summarize=summarize,
message=message,
name=name)
def assert_symmetric(matrix):
matrix_t = array_ops.matrix_transpose(matrix)
return control_flow_ops.with_dependencies(
[check_ops.assert_equal(matrix, matrix_t)], matrix)
def embed_check_nonnegative_integer_form(
x, name="embed_check_nonnegative_integer_form"):
"""Assert x is a non-negative tensor, and optionally of integers."""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
assertions = [
check_ops.assert_non_negative(
x, message="'{}' must be non-negative.".format(x)),
]
if not x.dtype.is_integer:
assertions += [
assert_integer_form(
x,
message="'{}' cannot contain fractional components.".format(x)),
]
return control_flow_ops.with_dependencies(assertions, x)
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
"""
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
# Here we can't just do math_ops.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in math_ops.equal.
def all_shapes_equal():
return math_ops.reduce_all(
math_ops.equal(
array_ops.concat(
[array_ops.shape(a), array_ops.shape(b)], 0),
array_ops.concat(
[array_ops.shape(b), array_ops.shape(a)], 0)))
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(a), array_ops.rank(b)),
all_shapes_equal, lambda: constant_op.constant(False))
def maybe_get_static_value(x, dtype=None):
"""Helper which tries to return a static value.
Given `x`, extract it's value statically, optionally casting to a specific
dtype. If this is not possible, None is returned.
Args:
x: `Tensor` for which to extract a value statically.
dtype: Optional dtype to cast to.
Returns:
Statically inferred value if possible, otherwise None.
"""
if x is None:
return x
try:
# This returns an np.ndarray.
x_ = tensor_util.constant_value(x)
except TypeError:
x_ = x
if x_ is None or dtype is None:
return x_
return np.array(x_, dtype)
def get_logits_and_probs(logits=None,
probs=None,
multidimensional=False,
validate_args=False,
name="get_logits_and_probs",
dtype=None):
"""Converts logit to probabilities (or vice-versa), and returns both.
Args:
logits: Floating-point `Tensor` representing log-odds.
probs: Floating-point `Tensor` representing probabilities.
multidimensional: Python `bool`, default `False`. If `True`, represents
whether the last dimension of `logits` or `probs`, a `[N1, N2, ... k]`
dimensional tensor, representing the logit or probability of `shape[-1]`
classes.
validate_args: Python `bool`, default `False`. When `True`, either assert `0
<= probs <= 1` (if not `multidimensional`) or that the last dimension of
`probs` sums to one.
name: A name for this operation (optional).
dtype: `tf.DType` to prefer when converting args to `Tensor`s.
Returns:
logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or
`1`, then the corresponding entry in the returned logit will be `-Inf` and
`Inf` respectively.
Raises:
ValueError: if neither `probs` nor `logits` were passed in, or both were.
"""
with ops.name_scope(name, values=[probs, logits]):
if (probs is None) == (logits is None):
raise ValueError("Must pass probs or logits, but not both.")
if probs is None:
logits = ops.convert_to_tensor(logits, name="logits", dtype=dtype)
if not logits.dtype.is_floating:
raise TypeError("logits must having floating type.")
# We can early return since we constructed probs and therefore know
# they're valid.
if multidimensional:
if validate_args:
logits = embed_check_categorical_event_shape(logits)
return logits, nn.softmax(logits, name="probs")
return logits, math_ops.sigmoid(logits, name="probs")
probs = ops.convert_to_tensor(probs, name="probs", dtype=dtype)
if not probs.dtype.is_floating:
raise TypeError("probs must having floating type.")
if validate_args:
with ops.name_scope("validate_probs"):
one = constant_op.constant(1., probs.dtype)
dependencies = [check_ops.assert_non_negative(probs)]
if multidimensional:
probs = embed_check_categorical_event_shape(probs)
dependencies += [
check_ops.assert_near(
math_ops.reduce_sum(probs, -1),
one,
message="probs does not sum to 1.")
]
else:
dependencies += [
check_ops.assert_less_equal(
probs, one, message="probs has components greater than 1.")
]
probs = control_flow_ops.with_dependencies(dependencies, probs)
with ops.name_scope("logits"):
if multidimensional:
# Here we don't compute the multidimensional case, in a manner
# consistent with respect to the unidimensional case. We do so
# following the TF convention. Typically, you might expect to see
# logits = log(probs) - log(probs[pivot]). A side-effect of
# being consistent with the TF approach is that the unidimensional case
# implicitly handles the second dimension but the multidimensional case
# explicitly keeps the pivot dimension.
return math_ops.log(probs), probs
return math_ops.log(probs) - math_ops.log1p(-1. * probs), probs
def _is_known_unsigned_by_dtype(dt):
"""Helper returning True if dtype is known to be unsigned."""
return {
dtypes.bool: True,
dtypes.uint8: True,
dtypes.uint16: True,
}.get(dt.base_dtype, False)
def _is_known_signed_by_dtype(dt):
"""Helper returning True if dtype is known to be signed."""
return {
dtypes.float16: True,
dtypes.float32: True,
dtypes.float64: True,
dtypes.int8: True,
dtypes.int16: True,
dtypes.int32: True,
dtypes.int64: True,
}.get(dt.base_dtype, False)
def _is_known_dtype(dt):
"""Helper returning True if dtype is known."""
return _is_known_unsigned_by_dtype(dt) or _is_known_signed_by_dtype(dt)
def _largest_integer_by_dtype(dt):
"""Helper returning the largest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if dt.is_floating:
return int(2**(np.finfo(dt.as_numpy_dtype).nmant + 1))
if dt.is_integer:
return np.iinfo(dt.as_numpy_dtype).max
if dt.base_dtype == dtypes.bool:
return int(1)
# We actually can't land here but keep the case for completeness.
raise TypeError("Unrecognized dtype: {}".format(dt.name))
def _smallest_integer_by_dtype(dt):
"""Helper returning the smallest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if _is_known_unsigned_by_dtype(dt):
return 0
return -1 * _largest_integer_by_dtype(dt)
def _is_integer_like_by_dtype(dt):
"""Helper returning True if dtype.is_integer or is `bool`."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
return dt.is_integer or dt.base_dtype == dtypes.bool
def embed_check_categorical_event_shape(
categorical_param, name="embed_check_categorical_event_shape"):
"""Embeds checks that categorical distributions don't have too many classes.
A categorical-type distribution is one which, e.g., returns the class label
rather than a one-hot encoding. E.g., `Categorical(probs)`.
Since distributions output samples in the same dtype as the parameters, we
must ensure that casting doesn't lose precision. That is, the
`parameter.dtype` implies a maximum number of classes. However, since shape is
`int32` and categorical variables are presumed to be indexes into a `Tensor`,
we must also ensure that the number of classes is no larger than the largest
possible `int32` index, i.e., `2**31-1`.
In other words the number of classes, `K`, must satisfy the following
condition:
```python
K <= min(
int(2**31 - 1), # Largest float as an index.
{
dtypes.float16: int(2**11), # Largest int as a float16.
dtypes.float32: int(2**24),
dtypes.float64: int(2**53),
}.get(categorical_param.dtype.base_dtype, 0))
```
Args:
categorical_param: Floating-point `Tensor` representing parameters of
distribution over categories. The rightmost shape is presumed to be the
number of categories.
name: A name for this operation (optional).
Returns:
categorical_param: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `categorical_param` has an unknown `dtype`.
ValueError: if we can statically identify `categorical_param` as being too
large (for being closed under int32/float casting).
"""
with ops.name_scope(name, values=[categorical_param]):
x = ops.convert_to_tensor(categorical_param, name="categorical_param")
# The size must not exceed both of:
# - The largest possible int32 (since categorical values are presumed to be
# indexes into a Tensor).
# - The largest possible integer exactly representable under the given
# floating-point dtype (since we need to cast to/from).
#
# The chosen floating-point thresholds are 2**(1 + mantissa_bits).
# For more details, see:
# https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation
x_dtype = x.dtype.base_dtype
max_event_size = (
_largest_integer_by_dtype(x_dtype) if x_dtype.is_floating else 0)
if max_event_size == 0:
raise TypeError("Unable to validate size of unrecognized dtype "
"({}).".format(x_dtype.name))
try:
x_shape_static = x.get_shape().with_rank_at_least(1)
except ValueError:
raise ValueError("A categorical-distribution parameter must have "
"at least 1 dimension.")
if tensor_shape.dimension_value(x_shape_static[-1]) is not None:
event_size = x_shape_static.dims[-1].value
if event_size < 2:
raise ValueError("A categorical-distribution parameter must have at "
"least 2 events.")
if event_size > max_event_size:
raise ValueError("Number of classes exceeds `dtype` precision, i.e., "
"{} implies shape ({}) cannot exceed {}.".format(
x_dtype.name, event_size, max_event_size))
return x
else:
event_size = array_ops.shape(x, name="x_shape")[-1]
return control_flow_ops.with_dependencies([
check_ops.assert_rank_at_least(
x,
1,
message=("A categorical-distribution parameter must have "
"at least 1 dimension.")),
check_ops.assert_greater_equal(
array_ops.shape(x)[-1],
2,
message=("A categorical-distribution parameter must have at "
"least 2 events.")),
check_ops.assert_less_equal(
event_size,
max_event_size,
message="Number of classes exceeds `dtype` precision, "
"i.e., {} dtype cannot exceed {} shape.".format(
x_dtype.name, max_event_size)),
], x)
def embed_check_integer_casting_closed(x,
target_dtype,
assert_nonnegative=True,
name="embed_check_casting_closed"):
"""Ensures integers remain unaffected despite casting to/from int/float types.
Example integer-types: `uint8`, `int32`, `bool`.
Example floating-types: `float32`, `float64`.
The largest possible integer representable by an IEEE754 floating-point is
`2**(1 + mantissa_bits)` yet the largest possible integer as an int-type is
`2**(bits - 1) - 1`. This function ensures that a `Tensor` purporting to have
integer-form values can be cast to some other type without loss of precision.
The smallest representable integer is the negative of the largest
representable integer, except for types: `uint8`, `uint16`, `bool`. For these
types, the smallest representable integer is `0`.
Args:
x: `Tensor` representing integer-form values.
target_dtype: TF `dtype` under which `x` should have identical values.
assert_nonnegative: `bool` indicating `x` should contain nonnegative values.
name: A name for this operation (optional).
Returns:
x: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `x` is neither integer- nor floating-type.
TypeError: if `target_dtype` is neither integer- nor floating-type.
TypeError: if neither `x` nor `target_dtype` are integer-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if (not _is_integer_like_by_dtype(x.dtype) and not x.dtype.is_floating):
raise TypeError("{}.dtype must be floating- or "
"integer-type.".format(x.dtype.name))
if (not _is_integer_like_by_dtype(target_dtype) and
not target_dtype.is_floating):
raise TypeError("target_dtype ({}) must be floating- or "
"integer-type.".format(target_dtype.name))
if (not _is_integer_like_by_dtype(x.dtype) and
not _is_integer_like_by_dtype(target_dtype)):
raise TypeError("At least one of {}.dtype ({}) and target_dtype ({}) "
"must be integer-type.".format(x, x.dtype.name,
target_dtype.name))
assertions = []
if assert_nonnegative:
assertions += [
check_ops.assert_non_negative(
x, message="Elements must be non-negative."),
]
if x.dtype.is_floating:
# Being here means _is_integer_like_by_dtype(target_dtype) = True.
# Since this check implies the magnitude check below, we need only it.
assertions += [
assert_integer_form(
x,
int_dtype=target_dtype,
message="Elements must be {}-equivalent.".format(
target_dtype.name)),
]
else:
if (_largest_integer_by_dtype(x.dtype) >
_largest_integer_by_dtype(target_dtype)):
# Cast may lose integer precision.
assertions += [
check_ops.assert_less_equal(
x,
_largest_integer_by_dtype(target_dtype),
message=("Elements cannot exceed {}.".format(
_largest_integer_by_dtype(target_dtype)))),
]
if (not assert_nonnegative and (_smallest_integer_by_dtype(
x.dtype) < _smallest_integer_by_dtype(target_dtype))):
assertions += [
check_ops.assert_greater_equal(
x,
_smallest_integer_by_dtype(target_dtype),
message=("Elements cannot be smaller than {}.".format(
_smallest_integer_by_dtype(target_dtype)))),
]
if not assertions:
return x
return control_flow_ops.with_dependencies(assertions, x)
def log_combinations(n, counts, name="log_combinations"):
"""Multinomial coefficient.
Given `n` and `counts`, where `counts` has last dimension `k`, we compute
the multinomial coefficient as:
```n! / sum_i n_i!```
where `i` runs over all `k` classes.
Args:
n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
counts: Floating-point `Tensor` broadcastable with `n`. This represents
counts in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
`Tensor` representing the multinomial coefficient between `n` and `counts`.
"""
# First a bit about the number of ways counts could have come in:
# E.g. if counts = [1, 2], then this is 3 choose 2.
# In general, this is (sum counts)! / sum(counts!)
# The sum should be along the last dimension of counts. This is the
# "distribution" dimension. Here n a priori represents the sum of counts.
with ops.name_scope(name, values=[n, counts]):
n = ops.convert_to_tensor(n, name="n")
counts = ops.convert_to_tensor(counts, name="counts")
total_permutations = math_ops.lgamma(n + 1)
counts_factorial = math_ops.lgamma(counts + 1)
redundant_permutations = math_ops.reduce_sum(counts_factorial, axis=[-1])
return total_permutations - redundant_permutations
def matrix_diag_transform(matrix, transform=None, name=None):
"""Transform diagonal of [batch-]matrix, leave rest of matrix unchanged.
Create a trainable covariance defined by a Cholesky factor:
```python
# Transform network layer into 2 x 2 array.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
# Make the diagonal positive. If the upper triangle was zero, this would be a
# valid Cholesky factor.
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# LinearOperatorLowerTriangular ignores the upper triangle.
operator = LinearOperatorLowerTriangular(chol)
```
Example of heteroskedastic 2-D linear regression.
```python
tfd = tfp.distributions
# Get a trainable Cholesky factor.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# Get a trainable mean.
mu = tf.contrib.layers.fully_connected(activations, 2)
# This is a fully trainable multivariate normal!
dist = tfd.MultivariateNormalTriL(mu, chol)
# Standard log loss. Minimizing this will "train" mu and chol, and then dist
# will be a distribution predicting labels as multivariate Gaussians.
loss = -1 * tf.reduce_mean(dist.log_prob(labels))
```
Args:
matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are
equal.
transform: Element-wise function mapping `Tensors` to `Tensors`. To be
applied to the diagonal of `matrix`. If `None`, `matrix` is returned
unchanged. Defaults to `None`.
name: A name to give created ops. Defaults to "matrix_diag_transform".
Returns:
A `Tensor` with same shape and `dtype` as `matrix`.
"""
with ops.name_scope(name, "matrix_diag_transform", [matrix]):
matrix = ops.convert_to_tensor(matrix, name="matrix")
if transform is None:
return matrix
# Replace the diag with transformed diag.
diag = array_ops.matrix_diag_part(matrix)
transformed_diag = transform(diag)
transformed_mat = array_ops.matrix_set_diag(matrix, transformed_diag)
return transformed_mat
def rotate_transpose(x, shift, name="rotate_transpose"):
"""Circularly moves dims left or right.
Effectively identical to:
```python
numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift))
```
When `validate_args=False` additional graph-runtime checks are
performed. These checks entail moving data from to GPU to CPU.
Example:
```python
x = tf.random.normal([1, 2, 3, 4]) # Tensor of shape [1, 2, 3, 4].
rotate_transpose(x, -1).shape == [2, 3, 4, 1]
rotate_transpose(x, -2).shape == [3, 4, 1, 2]
rotate_transpose(x, 1).shape == [4, 1, 2, 3]
rotate_transpose(x, 2).shape == [3, 4, 1, 2]
rotate_transpose(x, 7).shape == rotate_transpose(x, 3).shape # [2, 3, 4, 1]
rotate_transpose(x, -7).shape == rotate_transpose(x, -3).shape # [4, 1, 2, 3]
```
Args:
x: `Tensor`.
shift: `Tensor`. Number of dimensions to transpose left (shift<0) or
transpose right (shift>0).
name: Python `str`. The name to give this op.
Returns:
rotated_x: Input `Tensor` with dimensions circularly rotated by shift.
Raises:
TypeError: if shift is not integer type.
"""
with ops.name_scope(name, values=[x, shift]):
x = ops.convert_to_tensor(x, name="x")
shift = ops.convert_to_tensor(shift, name="shift")
# We do not assign back to preserve constant-ness.
check_ops.assert_integer(shift)
shift_value_static = tensor_util.constant_value(shift)
ndims = x.get_shape().ndims
if ndims is not None and shift_value_static is not None:
if ndims < 2:
return x
shift_value_static = np.sign(shift_value_static) * (
abs(shift_value_static) % ndims)
if shift_value_static == 0:
return x
perm = np.roll(np.arange(ndims), shift_value_static)
return array_ops.transpose(x, perm=perm)
else:
# Consider if we always had a positive shift, and some specified
# direction.
# When shifting left we want the new array:
# last(x, n-shift) + first(x, shift)
# and if shifting right then we want:
# last(x, shift) + first(x, n-shift)
# Observe that last(a) == slice(a, n) and first(a) == slice(0, a).
# Also, we can encode direction and shift as one: direction * shift.
# Combining these facts, we have:
# a = cond(shift<0, -shift, n-shift)
# last(x, n-a) + first(x, a) == x[a:n] + x[0:a]
# Finally, we transform shift by modulo length so it can be specified
# independently from the array upon which it operates (like python).
ndims = array_ops.rank(x)
shift = array_ops.where_v2(
math_ops.less(shift, 0), math_ops.mod(-shift, ndims),
ndims - math_ops.mod(shift, ndims))
first = math_ops.range(0, shift)
last = math_ops.range(shift, ndims)
perm = array_ops.concat([last, first], 0)
return array_ops.transpose(x, perm=perm)
def pick_vector(cond, true_vector, false_vector, name="pick_vector"):
"""Picks possibly different length row `Tensor`s based on condition.
Value `Tensor`s should have exactly one dimension.
If `cond` is a python Boolean or `tf.constant` then either `true_vector` or
`false_vector` is immediately returned. I.e., no graph nodes are created and
no validation happens.
Args:
cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.
true_vector: `Tensor` of one dimension. Returned when cond is `True`.
false_vector: `Tensor` of one dimension. Returned when cond is `False`.
name: Python `str`. The name to give this op.
Example: ```python pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15,
18)) # [10, 11] pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15,
18)) # [15, 16, 17] ```
Returns:
true_or_false_vector: `Tensor`.
Raises:
TypeError: if `cond.dtype != tf.bool`
TypeError: if `cond` is not a constant and
`true_vector.dtype != false_vector.dtype`
"""
with ops.name_scope(name, values=(cond, true_vector, false_vector)):
cond = ops.convert_to_tensor(cond, name="cond")
if cond.dtype != dtypes.bool:
raise TypeError("%s.dtype=%s which is not %s" %
(cond, cond.dtype, dtypes.bool))
cond_value_static = tensor_util.constant_value(cond)
if cond_value_static is not None:
return true_vector if cond_value_static else false_vector
true_vector = ops.convert_to_tensor(true_vector, name="true_vector")
false_vector = ops.convert_to_tensor(false_vector, name="false_vector")
if true_vector.dtype != false_vector.dtype:
raise TypeError(
"%s.dtype=%s does not match %s.dtype=%s" %
(true_vector, true_vector.dtype, false_vector, false_vector.dtype))
n = array_ops.shape(true_vector)[0]
return array_ops.slice(
array_ops.concat([true_vector, false_vector], 0),
[array_ops.where_v2(cond, 0, n)], [array_ops.where(cond, n, -1)])
def prefer_static_broadcast_shape(shape1,
shape2,
name="prefer_static_broadcast_shape"):
"""Convenience function which statically broadcasts shape when possible.
Args:
shape1: `1-D` integer `Tensor`. Already converted to tensor!
shape2: `1-D` integer `Tensor`. Already converted to tensor!
name: A string name to prepend to created ops.
Returns:
The broadcast shape, either as `TensorShape` (if broadcast can be done
statically), or as a `Tensor`.
"""
with ops.name_scope(name, values=[shape1, shape2]):
def make_shape_tensor(x):
return ops.convert_to_tensor(x, name="shape", dtype=dtypes.int32)
def get_tensor_shape(s):
if isinstance(s, tensor_shape.TensorShape):
return s
s_ = tensor_util.constant_value(make_shape_tensor(s))
if s_ is not None:
return tensor_shape.TensorShape(s_)
return None
def get_shape_tensor(s):
if not isinstance(s, tensor_shape.TensorShape):
return make_shape_tensor(s)
if s.is_fully_defined():
return make_shape_tensor(s.as_list())
raise ValueError("Cannot broadcast from partially "
"defined `TensorShape`.")
shape1_ = get_tensor_shape(shape1)
shape2_ = get_tensor_shape(shape2)
if shape1_ is not None and shape2_ is not None:
return array_ops.broadcast_static_shape(shape1_, shape2_)
shape1_ = get_shape_tensor(shape1)
shape2_ = get_shape_tensor(shape2)
return array_ops.broadcast_dynamic_shape(shape1_, shape2_)
def prefer_static_rank(x):
"""Return static rank of tensor `x` if available, else `tf.rank(x)`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static rank is obtainable), else `Tensor`.
"""
return prefer_static_value(array_ops.rank(x))
def prefer_static_shape(x):
"""Return static shape of tensor `x` if available, else `tf.shape(x)`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static shape is obtainable), else `Tensor`.
"""
return prefer_static_value(array_ops.shape(x))
def prefer_static_value(x):
"""Return static value of tensor `x` if available, else `x`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static value is obtainable), else `Tensor`.
"""
static_x = tensor_util.constant_value(x)
if static_x is not None:
return static_x
return x
def gen_new_seed(seed, salt):
"""Generate a new seed, from the given seed and salt."""
if seed is None:
return None
string = (str(seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
def fill_triangular(x, upper=False, name=None):
"""Creates a (batch of) triangular matrix from a vector of inputs.
Created matrix can be lower- or upper-triangular. (It is more efficient to
create the matrix as upper or lower, rather than transpose.)
Triangular matrix elements are filled in a clockwise spiral. See example,
below.
If `x.get_shape()` is `[b1, b2, ..., bB, d]` then the output shape is
`[b1, b2, ..., bB, n, n]` where `n` is such that `d = n(n+1)/2`, i.e.,
`n = int(np.sqrt(0.25 + 2. * m) - 0.5)`.
Example:
```python
fill_triangular([1, 2, 3, 4, 5, 6])
# ==> [[4, 0, 0],
# [6, 5, 0],
# [3, 2, 1]]
fill_triangular([1, 2, 3, 4, 5, 6], upper=True)
# ==> [[1, 2, 3],
# [0, 5, 6],
# [0, 0, 4]]
```
For comparison, a pure numpy version of this function can be found in
`util_test.py`, function `_fill_triangular`.
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
tril: `Tensor` with lower (or upper) triangular elements filled from `x`.
Raises:
ValueError: if `x` cannot be mapped to a triangular matrix.
"""
with ops.name_scope(name, "fill_triangular", values=[x]):
x = ops.convert_to_tensor(x, name="x")
if tensor_shape.dimension_value(
x.shape.with_rank_at_least(1)[-1]) is not None:
# Formula derived by solving for n: m = n(n+1)/2.
m = np.int32(x.shape.dims[-1].value)
n = np.sqrt(0.25 + 2. * m) - 0.5
if n != np.floor(n):
raise ValueError("Input right-most shape ({}) does not "
"correspond to a triangular matrix.".format(m))
n = np.int32(n)
static_final_shape = x.shape[:-1].concatenate([n, n])
else:
m = array_ops.shape(x)[-1]
# For derivation, see above. Casting automatically lops off the 0.5, so we
# omit it. We don't validate n is an integer because this has
# graph-execution cost; an error will be thrown from the reshape, below.
n = math_ops.cast(
math_ops.sqrt(0.25 + math_ops.cast(2 * m, dtype=dtypes.float32)),
dtype=dtypes.int32)
static_final_shape = x.shape.with_rank_at_least(1)[:-1].concatenate(
[None, None])
# We now concatenate the "tail" of `x` to `x` (and reverse one of them).
#
# We do this based on the insight that the input `x` provides `ceil(n/2)`
# rows of an `n x n` matrix, some of which will get zeroed out being on the
# wrong side of the diagonal. The first row will not get zeroed out at all,
# and we need `floor(n/2)` more rows, so the first is what we omit from
# `x_tail`. If we then stack those `ceil(n/2)` rows with the `floor(n/2)`
# rows provided by a reversed tail, it is exactly the other set of elements
# of the reversed tail which will be zeroed out for being on the wrong side
# of the diagonal further up/down the matrix. And, in doing-so, we've filled
# the triangular matrix in a clock-wise spiral pattern. Neat!
#
# Try it out in numpy:
# n = 3
# x = np.arange(n * (n + 1) / 2)
# m = x.shape[0]
# n = np.int32(np.sqrt(.25 + 2 * m) - .5)
# x_tail = x[(m - (n**2 - m)):]
# np.concatenate([x_tail, x[::-1]], 0).reshape(n, n) # lower
# # ==> array([[3, 4, 5],
# [5, 4, 3],
# [2, 1, 0]])
# np.concatenate([x, x_tail[::-1]], 0).reshape(n, n) # upper
# # ==> array([[0, 1, 2],
# [3, 4, 5],
# [5, 4, 3]])
#
# Note that we can't simply do `x[..., -(n**2 - m):]` because this doesn't
# correctly handle `m == n == 1`. Hence, we do nonnegative indexing.
# Furthermore observe that:
# m - (n**2 - m)
# = n**2 / 2 + n / 2 - (n**2 - n**2 / 2 + n / 2)
# = 2 (n**2 / 2 + n / 2) - n**2
# = n**2 + n - n**2
# = n
ndims = prefer_static_rank(x)
if upper:
x_list = [x, array_ops.reverse(x[..., n:], axis=[ndims - 1])]
else:
x_list = [x[..., n:], array_ops.reverse(x, axis=[ndims - 1])]
new_shape = (
static_final_shape.as_list() if static_final_shape.is_fully_defined()
else array_ops.concat([array_ops.shape(x)[:-1], [n, n]], axis=0))
x = array_ops.reshape(array_ops.concat(x_list, axis=-1), new_shape)
x = array_ops.matrix_band_part(
x, num_lower=(0 if upper else -1), num_upper=(-1 if upper else 0))
x.set_shape(static_final_shape)
return x
def fill_triangular_inverse(x, upper=False, name=None):
"""Creates a vector from a (batch of) triangular matrix.
The vector is created from the lower-triangular or upper-triangular portion
depending on the value of the parameter `upper`.
If `x.shape` is `[b1, b2, ..., bB, n, n]` then the output shape is
`[b1, b2, ..., bB, d]` where `d = n (n + 1) / 2`.
Example:
```python
fill_triangular_inverse(
[[4, 0, 0],
[6, 5, 0],
[3, 2, 1]])
# ==> [1, 2, 3, 4, 5, 6]
fill_triangular_inverse(
[[1, 2, 3],
[0, 5, 6],
[0, 0, 4]], upper=True)
# ==> [1, 2, 3, 4, 5, 6]
```
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
flat_tril: (Batch of) vector-shaped `Tensor` representing vectorized lower
(or upper) triangular elements from `x`.
"""
with ops.name_scope(name, "fill_triangular_inverse", values=[x]):
x = ops.convert_to_tensor(x, name="x")
if tensor_shape.dimension_value(
x.shape.with_rank_at_least(2)[-1]) is not None:
n = np.int32(x.shape.dims[-1].value)
m = np.int32((n * (n + 1)) // 2)
static_final_shape = x.shape[:-2].concatenate([m])
else:
n = array_ops.shape(x)[-1]
m = (n * (n + 1)) // 2
static_final_shape = x.shape.with_rank_at_least(2)[:-2].concatenate(
[None])
ndims = prefer_static_rank(x)
if upper:
initial_elements = x[..., 0, :]
triangular_portion = x[..., 1:, :]
else:
initial_elements = array_ops.reverse(x[..., -1, :], axis=[ndims - 2])
triangular_portion = x[..., :-1, :]
rotated_triangular_portion = array_ops.reverse(
array_ops.reverse(triangular_portion, axis=[ndims - 1]),
axis=[ndims - 2])
consolidated_matrix = triangular_portion + rotated_triangular_portion
end_sequence = array_ops.reshape(
consolidated_matrix,
array_ops.concat([array_ops.shape(x)[:-2], [n * (n - 1)]], axis=0))
y = array_ops.concat([initial_elements, end_sequence[..., :m - n]], axis=-1)
y.set_shape(static_final_shape)
return y
def tridiag(below=None, diag=None, above=None, name=None):
"""Creates a matrix with values set above, below, and on the diagonal.
Example:
```python
tridiag(below=[1., 2., 3.],
diag=[4., 5., 6., 7.],
above=[8., 9., 10.])
# ==> array([[ 4., 8., 0., 0.],
# [ 1., 5., 9., 0.],
# [ 0., 2., 6., 10.],
# [ 0., 0., 3., 7.]], dtype=float32)
```
Warning: This Op is intended for convenience, not efficiency.
Args:
below: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the below
diagonal part. `None` is logically equivalent to `below = 0`.
diag: `Tensor` of shape `[B1, ..., Bb, d]` corresponding to the diagonal
part. `None` is logically equivalent to `diag = 0`.
above: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the above
diagonal part. `None` is logically equivalent to `above = 0`.
name: Python `str`. The name to give this op.
Returns:
tridiag: `Tensor` with values set above, below and on the diagonal.
Raises:
ValueError: if all inputs are `None`.
"""
def _pad(x):
"""Prepends and appends a zero to every vector in a batch of vectors."""
shape = array_ops.concat([array_ops.shape(x)[:-1], [1]], axis=0)
z = array_ops.zeros(shape, dtype=x.dtype)
return array_ops.concat([z, x, z], axis=-1)
def _add(*x):
"""Adds list of Tensors, ignoring `None`."""
s = None
for y in x:
if y is None:
continue
elif s is None:
s = y
else:
s += y
if s is None:
raise ValueError("Must specify at least one of `below`, `diag`, `above`.")
return s
with ops.name_scope(name, "tridiag", [below, diag, above]):
if below is not None:
below = ops.convert_to_tensor(below, name="below")
below = array_ops.matrix_diag(_pad(below))[..., :-1, 1:]
if diag is not None:
diag = ops.convert_to_tensor(diag, name="diag")
diag = array_ops.matrix_diag(diag)
if above is not None:
above = ops.convert_to_tensor(above, name="above")
above = array_ops.matrix_diag(_pad(above))[..., 1:, :-1]
# TODO(jvdillon): Consider using scatter_nd instead of creating three full
# matrices.
return _add(below, diag, above)
def reduce_weighted_logsumexp(logx,
w=None,
axis=None,
keep_dims=False,
return_sign=False,
name=None):
"""Computes `log(abs(sum(weight * exp(elements across tensor dimensions))))`.
If all weights `w` are known to be positive, it is more efficient to directly
use `reduce_logsumexp`, i.e., `tf.reduce_logsumexp(logx + tf.math.log(w))` is
more
efficient than `du.reduce_weighted_logsumexp(logx, w)`.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(w * exp(input))). It
avoids overflows caused by taking the exp of large inputs and underflows
caused by taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0, 0],
[0, 0, 0]])
w = tf.constant([[-1., 1, 1],
[1, 1, 1]])
du.reduce_weighted_logsumexp(x, w)
# ==> log(-1*1 + 1*1 + 1*1 + 1*1 + 1*1 + 1*1) = log(4)
du.reduce_weighted_logsumexp(x, w, axis=0)
# ==> [log(-1+1), log(1+1), log(1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1)
# ==> [log(-1+1+1), log(1+1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True)
# ==> [[log(-1+1+1)], [log(1+1+1)]]
du.reduce_weighted_logsumexp(x, w, axis=[0, 1])
# ==> log(-1+5)
```
Args:
logx: The tensor to reduce. Should have numeric type.
w: The weight tensor. Should have numeric type identical to `logx`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
return_sign: If `True`, returns the sign of the result.
name: A name for the operation (optional).
Returns:
lswe: The `log(abs(sum(weight * exp(x))))` reduced tensor.
sign: (Optional) The sign of `sum(weight * exp(x))`.
"""
with ops.name_scope(name, "reduce_weighted_logsumexp", [logx, w]):
logx = ops.convert_to_tensor(logx, name="logx")
if w is None:
lswe = math_ops.reduce_logsumexp(logx, axis=axis, keepdims=keep_dims)
if return_sign:
sgn = array_ops.ones_like(lswe)
return lswe, sgn
return lswe
w = ops.convert_to_tensor(w, dtype=logx.dtype, name="w")
log_absw_x = logx + math_ops.log(math_ops.abs(w))
max_log_absw_x = math_ops.reduce_max(log_absw_x, axis=axis, keepdims=True)
# If the largest element is `-inf` or `inf` then we don't bother subtracting
# off the max. We do this because otherwise we'd get `inf - inf = NaN`. That
# this is ok follows from the fact that we're actually free to subtract any
# value we like, so long as we add it back after taking the `log(sum(...))`.
max_log_absw_x = array_ops.where_v2(
math_ops.is_inf(max_log_absw_x), array_ops.zeros_like(max_log_absw_x),
max_log_absw_x)
wx_over_max_absw_x = (
math_ops.sign(w) * math_ops.exp(log_absw_x - max_log_absw_x))
sum_wx_over_max_absw_x = math_ops.reduce_sum(
wx_over_max_absw_x, axis=axis, keepdims=keep_dims)
if not keep_dims:
max_log_absw_x = array_ops.squeeze(max_log_absw_x, axis)
sgn = math_ops.sign(sum_wx_over_max_absw_x)
lswe = max_log_absw_x + math_ops.log(sgn * sum_wx_over_max_absw_x)
if return_sign:
return lswe, sgn
return lswe
# TODO(jvdillon): Merge this test back into:
# tensorflow/python/ops/softplus_op_test.py
# once TF core is accepting new ops.
def softplus_inverse(x, name=None):
"""Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).
Mathematically this op is equivalent to:
```none
softplus_inverse = log(exp(x) - 1.)
```
Args:
x: `Tensor`. Non-negative (not enforced), floating-point.
name: A name for the operation (optional).
Returns:
`Tensor`. Has the same type/shape as input `x`.
"""
with ops.name_scope(name, "softplus_inverse", values=[x]):
x = ops.convert_to_tensor(x, name="x")
# We begin by deriving a more numerically stable softplus_inverse:
# x = softplus(y) = Log[1 + exp{y}], (which means x > 0).
# ==> exp{x} = 1 + exp{y} (1)
# ==> y = Log[exp{x} - 1] (2)
# = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}]
# = Log[(1 - exp{-x}) / 1] + Log[exp{x}]
# = Log[1 - exp{-x}] + x (3)
# (2) is the "obvious" inverse, but (3) is more stable than (2) for large x.
# For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will
# be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0.
#
# In addition to the numerically stable derivation above, we clamp
# small/large values to be congruent with the logic in:
# tensorflow/core/kernels/softplus_op.h
#
# Finally, we set the input to one whenever the input is too large or too
# small. This ensures that no unchosen codepath is +/- inf. This is
# necessary to ensure the gradient doesn't get NaNs. Recall that the
# gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false`
# thus an `inf` in an unselected path results in `0*inf=nan`. We are careful
# to overwrite `x` with ones only when we will never actually use this
# value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`.
threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2.
is_too_small = math_ops.less(x, np.exp(threshold))
is_too_large = math_ops.greater(x, -threshold)
too_small_value = math_ops.log(x)
too_large_value = x
# This `where` will ultimately be a NOP because we won't select this
# codepath whenever we used the surrogate `ones_like`.
x = array_ops.where_v2(
math_ops.logical_or(is_too_small, is_too_large), array_ops.ones_like(x),
x)
y = x + math_ops.log(-math_ops.expm1(-x)) # == log(expm1(x))
return array_ops.where_v2(
is_too_small, too_small_value,
array_ops.where_v2(is_too_large, too_large_value, y))
# TODO(b/35290280): Add unit-tests.
def dimension_size(x, axis):
"""Returns the size of a specific dimension."""
# Since tf.gather isn't "constant-in, constant-out", we must first check the
# static shape or fallback to dynamic shape.
s = tensor_shape.dimension_value(
x.shape.with_rank_at_least(np.abs(axis))[axis])
if s is not None:
return s
return array_ops.shape(x)[axis]
def process_quadrature_grid_and_probs(quadrature_grid_and_probs,
dtype,
validate_args,
name=None):
"""Validates quadrature grid, probs or computes them as necessary.
Args:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight. When `None`, defaults to:
`np.polynomial.hermite.hermgauss(deg=8)`.
dtype: The expected `dtype` of `grid` and `probs`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight.
Raises:
ValueError: if `quadrature_grid_and_probs is not None` and
`len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])`
"""
with ops.name_scope(name, "process_quadrature_grid_and_probs",
[quadrature_grid_and_probs]):
if quadrature_grid_and_probs is None:
grid, probs = np.polynomial.hermite.hermgauss(deg=8)
grid = grid.astype(dtype.as_numpy_dtype)
probs = probs.astype(dtype.as_numpy_dtype)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
grid = ops.convert_to_tensor(grid, name="grid", dtype=dtype)
probs = ops.convert_to_tensor(probs, name="probs", dtype=dtype)
return grid, probs
grid, probs = tuple(quadrature_grid_and_probs)
grid = ops.convert_to_tensor(grid, name="grid", dtype=dtype)
probs = ops.convert_to_tensor(probs, name="unnormalized_probs", dtype=dtype)
probs /= linalg_ops.norm(probs, ord=1, axis=-1, keepdims=True, name="probs")
def _static_event_size(x):
"""Returns the static size of a specific dimension or `None`."""
return tensor_shape.dimension_value(x.shape.with_rank_at_least(1)[-1])
m, n = _static_event_size(probs), _static_event_size(grid)
if m is not None and n is not None:
if m != n:
raise ValueError("`quadrature_grid_and_probs` must be a `tuple` of "
"same-length zero-th-dimension `Tensor`s "
"(saw lengths {}, {})".format(m, n))
elif validate_args:
assertions = [
check_ops.assert_equal(
dimension_size(probs, axis=-1),
dimension_size(grid, axis=-1),
message=("`quadrature_grid_and_probs` must be a `tuple` of "
"same-length zero-th-dimension `Tensor`s")),
]
with ops.control_dependencies(assertions):
grid = array_ops.identity(grid)
probs = array_ops.identity(probs)
return grid, probs
def pad(x, axis, front=False, back=False, value=0, count=1, name=None):
"""Pads `value` to the front and/or back of a `Tensor` dim, `count` times.
Args:
x: `Tensor` input.
axis: Scalar `int`-like `Tensor` representing the single dimension to pad.
(Negative indexing is supported.)
front: Python `bool`; if `True` the beginning of the `axis` dimension is
padded with `value`, `count` times. If `False` no front padding is made.
back: Python `bool`; if `True` the end of the `axis` dimension is padded
with `value`, `count` times. If `False` no end padding is made.
value: Scalar `int`-like `Tensor` representing the actual value added to the
front and/or back of the `axis` dimension of `x`.
count: Scalar `int`-like `Tensor` representing number of elements added to
the front and/or back of the `axis` dimension of `x`. E.g., if `front =
back = True` then `2 * count` elements are added.
name: Python `str` name prefixed to Ops created by this function.
Returns:
pad: The padded version of input `x`.
Raises:
ValueError: if both `front` and `back` are `False`.
TypeError: if `count` is not `int`-like.
"""
with ops.name_scope(name, "pad", [x, value, count]):
x = ops.convert_to_tensor(x, name="x")
value = ops.convert_to_tensor(value, dtype=x.dtype, name="value")
count = ops.convert_to_tensor(count, name="count")
if not count.dtype.is_integer:
raise TypeError("`count.dtype` (`{}`) must be `int`-like.".format(
count.dtype.name))
if not front and not back:
raise ValueError("At least one of `front`, `back` must be `True`.")
ndims = (
x.shape.ndims if x.shape.ndims is not None else array_ops.rank(
x, name="ndims"))
axis = ops.convert_to_tensor(axis, name="axis")
axis_ = tensor_util.constant_value(axis)
if axis_ is not None:
axis = axis_
if axis < 0:
axis = ndims + axis
count_ = tensor_util.constant_value(count)
if axis_ >= 0 or x.shape.ndims is not None:
head = x.shape[:axis]
middle = tensor_shape.TensorShape(None if count_ is None else (
tensor_shape.dimension_at_index(x.shape, axis) + count_ *
(front + back)))
tail = x.shape[axis + 1:]
final_shape = head.concatenate(middle.concatenate(tail))
else:
final_shape = None
else:
axis = array_ops.where_v2(axis < 0, ndims + axis, axis)
final_shape = None
x = array_ops.pad(
x,
paddings=array_ops.one_hot(
indices=array_ops.stack(
[axis if front else -1, axis if back else -1]),
depth=ndims,
axis=0,
on_value=count,
dtype=dtypes.int32),
constant_values=value)
if final_shape is not None:
x.set_shape(final_shape)
return x
def parent_frame_arguments():
"""Returns parent frame arguments.
When called inside a function, returns a dictionary with the caller's function
arguments. These are positional arguments and keyword arguments (**kwargs),
while variable arguments (*varargs) are excluded.
When called at global scope, this will return an empty dictionary, since there
are no arguments.
WARNING: If caller function argument names are overloaded before invoking
this method, then values will reflect the overloaded value. For this reason,
we recommend calling `parent_frame_arguments` at the beginning of the
function.
"""
# All arguments and the names used for *varargs, and **kwargs
arg_names, variable_arg_name, keyword_arg_name, local_vars = (
tf_inspect._inspect.getargvalues( # pylint: disable=protected-access
# Get the first frame of the caller of this method.
tf_inspect._inspect.stack()[1][0])) # pylint: disable=protected-access
# Remove the *varargs, and flatten the **kwargs. Both are
# nested lists.
local_vars.pop(variable_arg_name, {})
keyword_args = local_vars.pop(keyword_arg_name, {})
final_args = {}
# Copy over arguments and their values. In general, local_vars
# may contain more than just the arguments, since this method
# can be called anywhere in a function.
for arg_name in arg_names:
final_args[arg_name] = local_vars.pop(arg_name)
final_args.update(keyword_args)
return final_args
class AppendDocstring(object):
"""Helper class to promote private subclass docstring to public counterpart.
Example:
```python
class TransformedDistribution(Distribution):
@distribution_util.AppendDocstring(
additional_note="A special note!",
kwargs_dict={"foo": "An extra arg."})
def _prob(self, y, foo=None):
pass
```
In this case, the `AppendDocstring` decorator appends the `additional_note` to
the docstring of `prob` (not `_prob`) and adds a new `kwargs`
section with each dictionary item as a bullet-point.
For a more detailed example, see `TransformedDistribution`.
"""
def __init__(self, additional_note="", kwargs_dict=None):
"""Initializes the AppendDocstring object.
Args:
additional_note: Python string added as additional docstring to public
version of function.
kwargs_dict: Python string/string dictionary representing specific kwargs
expanded from the **kwargs input.
Raises:
ValueError: if kwargs_dict.key contains whitespace.
ValueError: if kwargs_dict.value contains newlines.
"""
self._additional_note = additional_note
if kwargs_dict:
bullets = []
for key in sorted(kwargs_dict.keys()):
value = kwargs_dict[key]
if any(x.isspace() for x in key):
raise ValueError("Parameter name \"%s\" contains whitespace." % key)
value = value.lstrip()
if "\n" in value:
raise ValueError(
"Parameter description for \"%s\" contains newlines." % key)
bullets.append("* `%s`: %s" % (key, value))
self._additional_note += ("\n\n##### `kwargs`:\n\n" + "\n".join(bullets))
def __call__(self, fn):
@functools.wraps(fn)
def _fn(*args, **kwargs):
return fn(*args, **kwargs)
if _fn.__doc__ is None:
_fn.__doc__ = self._additional_note
else:
_fn.__doc__ += "\n%s" % self._additional_note
return _fn
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/util.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Beta distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Beta",
"BetaWithSoftplusConcentration",
]
_beta_sample_note = """Note: `x` must have dtype `self.dtype` and be in
`[0, 1].` It must have a shape compatible with `self.batch_shape()`."""
@tf_export(v1=["distributions.Beta"])
class Beta(distribution.Distribution):
"""Beta distribution.
The Beta distribution is defined over the `(0, 1)` interval using parameters
`concentration1` (aka "alpha") and `concentration0` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z
Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta)
```
where:
* `concentration1 = alpha`,
* `concentration0 = beta`,
* `Z` is the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The concentration parameters represent mean total counts of a `1` or a `0`,
i.e.,
```none
concentration1 = alpha = mean * total_concentration
concentration0 = beta = (1. - mean) * total_concentration
```
where `mean` in `(0, 1)` and `total_concentration` is a positive real number
representing a mean `total_count = concentration1 + concentration0`.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
Warning: The samples can be zero due to finite precision.
This happens more often when some of the concentrations are very small.
Make sure to round the samples to `np.finfo(dtype).tiny` before computing the
density.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Create a batch of three Beta distributions.
alpha = [1, 2, 3]
beta = [1, 2, 3]
dist = tfd.Beta(alpha, beta)
dist.sample([4, 5]) # Shape [4, 5, 3]
# `x` has three batch entries, each with two samples.
x = [[.1, .4, .5],
[.2, .3, .5]]
# Calculate the probability of each pair of samples under the corresponding
# distribution in `dist`.
dist.prob(x) # Shape [2, 3]
```
```python
# Create batch_shape=[2, 3] via parameter broadcast:
alpha = [[1.], [2]] # Shape [2, 1]
beta = [3., 4, 5] # Shape [3]
dist = tfd.Beta(alpha, beta)
# alpha broadcast as: [[1., 1, 1,],
# [2, 2, 2]]
# beta broadcast as: [[3., 4, 5],
# [3, 4, 5]]
# batch_Shape [2, 3]
dist.sample([4, 5]) # Shape [4, 5, 2, 3]
x = [.2, .3, .5]
# x will be broadcast as [[.2, .3, .5],
# [.2, .3, .5]],
# thus matching batch_shape [2, 3].
dist.prob(x) # Shape [2, 3]
```
Compute the gradients of samples w.r.t. the parameters:
```python
alpha = tf.constant(1.0)
beta = tf.constant(2.0)
dist = tfd.Beta(alpha, beta)
samples = dist.sample(5) # Shape [5]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, [alpha, beta])
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
concentration1=None,
concentration0=None,
validate_args=False,
allow_nan_stats=True,
name="Beta"):
"""Initialize a batch of Beta distributions.
Args:
concentration1: Positive floating-point `Tensor` indicating mean
number of successes; aka "alpha". Implies `self.dtype` and
`self.batch_shape`, i.e.,
`concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.
concentration0: Positive floating-point `Tensor` indicating mean
number of failures; aka "beta". Otherwise has same semantics as
`concentration1`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[concentration1, concentration0]) as name:
self._concentration1 = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration1, name="concentration1"),
validate_args)
self._concentration0 = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration0, name="concentration0"),
validate_args)
check_ops.assert_same_float_dtype([
self._concentration1, self._concentration0])
self._total_concentration = self._concentration1 + self._concentration0
super(Beta, self).__init__(
dtype=self._total_concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration1,
self._concentration0,
self._total_concentration],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(zip(
["concentration1", "concentration0"],
[ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2))
@property
def concentration1(self):
"""Concentration parameter associated with a `1` outcome."""
return self._concentration1
@property
def concentration0(self):
"""Concentration parameter associated with a `0` outcome."""
return self._concentration0
@property
def total_concentration(self):
"""Sum of concentration parameters."""
return self._total_concentration
def _batch_shape_tensor(self):
return array_ops.shape(self.total_concentration)
def _batch_shape(self):
return self.total_concentration.get_shape()
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
expanded_concentration1 = array_ops.ones_like(
self.total_concentration, dtype=self.dtype) * self.concentration1
expanded_concentration0 = array_ops.ones_like(
self.total_concentration, dtype=self.dtype) * self.concentration0
gamma1_sample = random_ops.random_gamma(
shape=[n],
alpha=expanded_concentration1,
dtype=self.dtype,
seed=seed)
gamma2_sample = random_ops.random_gamma(
shape=[n],
alpha=expanded_concentration0,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, "beta"))
beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)
return beta_sample
@distribution_util.AppendDocstring(_beta_sample_note)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
@distribution_util.AppendDocstring(_beta_sample_note)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
@distribution_util.AppendDocstring(_beta_sample_note)
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
@distribution_util.AppendDocstring(_beta_sample_note)
def _cdf(self, x):
return math_ops.betainc(self.concentration1, self.concentration0, x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return (math_ops.xlogy(self.concentration1 - 1., x) +
(self.concentration0 - 1.) * math_ops.log1p(-x))
def _log_normalization(self):
return (math_ops.lgamma(self.concentration1)
+ math_ops.lgamma(self.concentration0)
- math_ops.lgamma(self.total_concentration))
def _entropy(self):
return (
self._log_normalization()
- (self.concentration1 - 1.) * math_ops.digamma(self.concentration1)
- (self.concentration0 - 1.) * math_ops.digamma(self.concentration0)
+ ((self.total_concentration - 2.) *
math_ops.digamma(self.total_concentration)))
def _mean(self):
return self._concentration1 / self._total_concentration
def _variance(self):
return self._mean() * (1. - self._mean()) / (1. + self.total_concentration)
@distribution_util.AppendDocstring(
"""Note: The mode is undefined when `concentration1 <= 1` or
`concentration0 <= 1`. If `self.allow_nan_stats` is `True`, `NaN`
is used for undefined modes. If `self.allow_nan_stats` is `False` an
exception is raised when one or more modes are undefined.""")
def _mode(self):
mode = (self.concentration1 - 1.) / (self.total_concentration - 2.)
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
is_defined = math_ops.logical_and(self.concentration1 > 1.,
self.concentration0 > 1.)
return array_ops.where_v2(is_defined, mode, nan)
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.concentration1,
message="Mode undefined for concentration1 <= 1."),
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.concentration0,
message="Mode undefined for concentration0 <= 1.")
], mode)
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of a concentration parameter."""
if not validate_args:
return concentration
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
], concentration)
def _maybe_assert_valid_sample(self, x):
"""Checks the validity of a sample."""
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x, message="sample must be positive"),
check_ops.assert_less(
x,
array_ops.ones([], self.dtype),
message="sample must be less than `1`."),
], x)
class BetaWithSoftplusConcentration(Beta):
"""Beta with softplus transform of `concentration1` and `concentration0`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Beta(tf.nn.softplus(concentration1), "
"tf.nn.softplus(concentration2))` instead.",
warn_once=True)
def __init__(self,
concentration1,
concentration0,
validate_args=False,
allow_nan_stats=True,
name="BetaWithSoftplusConcentration"):
parameters = dict(locals())
with ops.name_scope(name, values=[concentration1,
concentration0]) as name:
super(BetaWithSoftplusConcentration, self).__init__(
concentration1=nn.softplus(concentration1,
name="softplus_concentration1"),
concentration0=nn.softplus(concentration0,
name="softplus_concentration0"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Beta, Beta)
def _kl_beta_beta(d1, d2, name=None):
"""Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.
Args:
d1: instance of a Beta distribution object.
d2: instance of a Beta distribution object.
name: (optional) Name to use for created operations.
default is "kl_beta_beta".
Returns:
Batchwise KL(d1 || d2)
"""
def delta(fn, is_property=True):
fn1 = getattr(d1, fn)
fn2 = getattr(d2, fn)
return (fn2 - fn1) if is_property else (fn2() - fn1())
with ops.name_scope(name, "kl_beta_beta", values=[
d1.concentration1,
d1.concentration0,
d1.total_concentration,
d2.concentration1,
d2.concentration0,
d2.total_concentration,
]):
return (delta("_log_normalization", is_property=False)
- math_ops.digamma(d1.concentration1) * delta("concentration1")
- math_ops.digamma(d1.concentration0) * delta("concentration0")
+ (math_ops.digamma(d1.total_concentration)
* delta("total_concentration")))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/beta.py
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/__init__.py
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Functions "ndtr" and "ndtri" are derived from calculations made in:
# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html
# In the following email exchange, the author gives his consent to redistribute
# derived works under an Apache 2.0 license.
#
# From: Stephen Moshier <steve@moshier.net>
# Date: Sat, Jun 9, 2018 at 2:36 PM
# Subject: Re: Licensing cephes under Apache (BSD-like) license.
# To: rif <rif@google.com>
#
#
#
# Hello Rif,
#
# Yes, Google may distribute Cephes files under the Apache 2 license.
#
# If clarification is needed, I do not favor BSD over other free licenses.
# I would agree that Apache 2 seems to cover the concern you mentioned
# about sublicensees.
#
# Best wishes for good luck with your projects!
# Steve Moshier
#
#
#
# On Thu, 31 May 2018, rif wrote:
#
# > Hello Steve.
# > My name is Rif. I work on machine learning software at Google.
# >
# > Your cephes software continues to be incredibly useful and widely used. I
# > was wondering whether it would be permissible for us to use the Cephes code
# > under the Apache 2.0 license, which is extremely similar in permissions to
# > the BSD license (Wikipedia comparisons). This would be quite helpful to us
# > in terms of avoiding multiple licenses on software.
# >
# > I'm sorry to bother you with this (I can imagine you're sick of hearing
# > about this by now), but I want to be absolutely clear we're on the level and
# > not misusing your important software. In former conversation with Eugene
# > Brevdo (ebrevdo@google.com), you wrote "If your licensing is similar to BSD,
# > the formal way that has been handled is simply to add a statement to the
# > effect that you are incorporating the Cephes software by permission of the
# > author." I wanted to confirm that (a) we could use the Apache license, (b)
# > that we don't need to (and probably you don't want to) keep getting
# > contacted about individual uses, because your intent is generally to allow
# > this software to be reused under "BSD-like" license, and (c) you're OK
# > letting incorporators decide whether a license is sufficiently BSD-like?
# >
# > Best,
# >
# > rif
# >
# >
# >
"""Special Math Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
__all__ = [
"erfinv",
"ndtr",
"ndtri",
"log_ndtr",
"log_cdf_laplace",
]
# log_ndtr uses different functions over the ranges
# (-infty, lower](lower, upper](upper, infty)
# Lower bound values were chosen by examining where the support of ndtr
# appears to be zero, relative to scipy's (which is always 64bit). They were
# then made more conservative just to be safe. (Conservative means use the
# expansion more than we probably need to.) See `NdtrTest` in
# special_math_test.py.
LOGNDTR_FLOAT64_LOWER = np.array(-20, np.float64)
LOGNDTR_FLOAT32_LOWER = np.array(-10, np.float32)
# Upper bound values were chosen by examining for which values of 'x'
# Log[cdf(x)] is 0, after which point we need to use the approximation
# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly
# conservative, meaning we use the approximation earlier than needed.
LOGNDTR_FLOAT64_UPPER = np.array(8, np.float64)
LOGNDTR_FLOAT32_UPPER = np.array(5, np.float32)
def ndtr(x, name="ndtr"):
"""Normal distribution function.
Returns the area under the Gaussian probability density function, integrated
from minus infinity to x:
```
1 / x
ndtr(x) = ---------- | exp(-0.5 t**2) dt
sqrt(2 pi) /-inf
= 0.5 (1 + erf(x / sqrt(2)))
= 0.5 erfc(x / sqrt(2))
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtr").
Returns:
ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"x.dtype=%s is not handled, see docstring for supported types."
% x.dtype)
return _ndtr(x)
def _ndtr(x):
"""Implements ndtr core logic."""
half_sqrt_2 = constant_op.constant(
0.5 * np.sqrt(2.), dtype=x.dtype, name="half_sqrt_2")
w = x * half_sqrt_2
z = math_ops.abs(w)
y = array_ops.where_v2(
math_ops.less(z, half_sqrt_2), 1. + math_ops.erf(w),
array_ops.where_v2(
math_ops.greater(w, 0.), 2. - math_ops.erfc(z), math_ops.erfc(z)))
return 0.5 * y
def ndtri(p, name="ndtri"):
"""The inverse of the CDF of the Normal distribution function.
Returns x such that the area under the pdf from minus infinity to x is equal
to p.
A piece-wise rational approximation is done for the function.
This is a port of the implementation in netlib.
Args:
p: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtri").
Returns:
x: `Tensor` with `dtype=p.dtype`.
Raises:
TypeError: if `p` is not floating-type.
"""
with ops.name_scope(name, values=[p]):
p = ops.convert_to_tensor(p, name="p")
if p.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"p.dtype=%s is not handled, see docstring for supported types."
% p.dtype)
return _ndtri(p)
def _ndtri(p):
"""Implements ndtri core logic."""
# Constants used in piece-wise rational approximations. Taken from the cephes
# library:
# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html
p0 = list(reversed([-5.99633501014107895267E1,
9.80010754185999661536E1,
-5.66762857469070293439E1,
1.39312609387279679503E1,
-1.23916583867381258016E0]))
q0 = list(reversed([1.0,
1.95448858338141759834E0,
4.67627912898881538453E0,
8.63602421390890590575E1,
-2.25462687854119370527E2,
2.00260212380060660359E2,
-8.20372256168333339912E1,
1.59056225126211695515E1,
-1.18331621121330003142E0]))
p1 = list(reversed([4.05544892305962419923E0,
3.15251094599893866154E1,
5.71628192246421288162E1,
4.40805073893200834700E1,
1.46849561928858024014E1,
2.18663306850790267539E0,
-1.40256079171354495875E-1,
-3.50424626827848203418E-2,
-8.57456785154685413611E-4]))
q1 = list(reversed([1.0,
1.57799883256466749731E1,
4.53907635128879210584E1,
4.13172038254672030440E1,
1.50425385692907503408E1,
2.50464946208309415979E0,
-1.42182922854787788574E-1,
-3.80806407691578277194E-2,
-9.33259480895457427372E-4]))
p2 = list(reversed([3.23774891776946035970E0,
6.91522889068984211695E0,
3.93881025292474443415E0,
1.33303460815807542389E0,
2.01485389549179081538E-1,
1.23716634817820021358E-2,
3.01581553508235416007E-4,
2.65806974686737550832E-6,
6.23974539184983293730E-9]))
q2 = list(reversed([1.0,
6.02427039364742014255E0,
3.67983563856160859403E0,
1.37702099489081330271E0,
2.16236993594496635890E-1,
1.34204006088543189037E-2,
3.28014464682127739104E-4,
2.89247864745380683936E-6,
6.79019408009981274425E-9]))
def _create_polynomial(var, coeffs):
"""Compute n_th order polynomial via Horner's method."""
coeffs = np.array(coeffs, var.dtype.as_numpy_dtype)
if not coeffs.size:
return array_ops.zeros_like(var)
return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var
maybe_complement_p = array_ops.where_v2(p > -np.expm1(-2.), 1. - p, p)
# Write in an arbitrary value in place of 0 for p since 0 will cause NaNs
# later on. The result from the computation when p == 0 is not used so any
# number that doesn't result in NaNs is fine.
sanitized_mcp = array_ops.where_v2(
maybe_complement_p <= 0.,
array_ops.fill(array_ops.shape(p), np.array(0.5, p.dtype.as_numpy_dtype)),
maybe_complement_p)
# Compute x for p > exp(-2): x/sqrt(2pi) = w + w**3 P0(w**2)/Q0(w**2).
w = sanitized_mcp - 0.5
ww = w ** 2
x_for_big_p = w + w * ww * (_create_polynomial(ww, p0)
/ _create_polynomial(ww, q0))
x_for_big_p *= -np.sqrt(2. * np.pi)
# Compute x for p <= exp(-2): x = z - log(z)/z - (1/z) P(1/z) / Q(1/z),
# where z = sqrt(-2. * log(p)), and P/Q are chosen between two different
# arrays based on whether p < exp(-32).
z = math_ops.sqrt(-2. * math_ops.log(sanitized_mcp))
first_term = z - math_ops.log(z) / z
second_term_small_p = (
_create_polynomial(1. / z, p2) /
_create_polynomial(1. / z, q2) / z)
second_term_otherwise = (
_create_polynomial(1. / z, p1) /
_create_polynomial(1. / z, q1) / z)
x_for_small_p = first_term - second_term_small_p
x_otherwise = first_term - second_term_otherwise
x = array_ops.where_v2(
sanitized_mcp > np.exp(-2.), x_for_big_p,
array_ops.where_v2(z >= 8.0, x_for_small_p, x_otherwise))
x = array_ops.where_v2(p > 1. - np.exp(-2.), x, -x)
infinity_scalar = constant_op.constant(np.inf, dtype=p.dtype)
infinity = array_ops.fill(array_ops.shape(p), infinity_scalar)
x_nan_replaced = array_ops.where_v2(p <= 0.0, -infinity,
array_ops.where_v2(p >= 1.0, infinity, x))
return x_nan_replaced
def log_ndtr(x, series_order=3, name="log_ndtr"):
"""Log Normal distribution function.
For details of the Normal distribution function see `ndtr`.
This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or
using an asymptotic series. Specifically:
- For `x > upper_segment`, use the approximation `-ndtr(-x)` based on
`log(1-x) ~= -x, x << 1`.
- For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique
and take a log.
- For `x <= lower_segment`, we use the series approximation of erf to compute
the log CDF directly.
The `lower_segment` is set based on the precision of the input:
```
lower_segment = { -20, x.dtype=float64
{ -10, x.dtype=float32
upper_segment = { 8, x.dtype=float64
{ 5, x.dtype=float32
```
When `x < lower_segment`, the `ndtr` asymptotic series approximation is:
```
ndtr(x) = scale * (1 + sum) + R_N
scale = exp(-0.5 x**2) / (-x sqrt(2 pi))
sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N}
R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3})
```
where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a
[double-factorial](https://en.wikipedia.org/wiki/Double_factorial).
Args:
x: `Tensor` of type `float32`, `float64`.
series_order: Positive Python `integer`. Maximum depth to
evaluate the asymptotic expansion. This is the `N` above.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
log_ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
TypeError: if `series_order` is a not Python `integer.`
ValueError: if `series_order` is not in `[0, 30]`.
"""
if not isinstance(series_order, int):
raise TypeError("series_order must be a Python integer.")
if series_order < 0:
raise ValueError("series_order must be non-negative.")
if series_order > 30:
raise ValueError("series_order must be <= 30.")
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype == np.float64:
lower_segment = LOGNDTR_FLOAT64_LOWER
upper_segment = LOGNDTR_FLOAT64_UPPER
elif x.dtype.as_numpy_dtype == np.float32:
lower_segment = LOGNDTR_FLOAT32_LOWER
upper_segment = LOGNDTR_FLOAT32_UPPER
else:
raise TypeError("x.dtype=%s is not supported." % x.dtype)
# The basic idea here was ported from:
# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html
# We copy the main idea, with a few changes
# * For x >> 1, and X ~ Normal(0, 1),
# Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x],
# which extends the range of validity of this function.
# * We use one fixed series_order for all of 'x', rather than adaptive.
# * Our docstring properly reflects that this is an asymptotic series, not a
# Taylor series. We also provided a correct bound on the remainder.
# * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when
# x=0. This happens even though the branch is unchosen because when x=0
# the gradient of a select involves the calculation 1*dy+0*(-inf)=nan
# regardless of whether dy is finite. Note that the minimum is a NOP if
# the branch is chosen.
return array_ops.where_v2(
math_ops.greater(x, upper_segment),
-_ndtr(-x), # log(1-x) ~= -x, x << 1
array_ops.where_v2(
math_ops.greater(x, lower_segment),
math_ops.log(_ndtr(math_ops.maximum(x, lower_segment))),
_log_ndtr_lower(math_ops.minimum(x, lower_segment), series_order)))
def _log_ndtr_lower(x, series_order):
"""Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`."""
x_2 = math_ops.square(x)
# Log of the term multiplying (1 + sum)
log_scale = -0.5 * x_2 - math_ops.log(-x) - 0.5 * np.log(2. * np.pi)
return log_scale + math_ops.log(_log_ndtr_asymptotic_series(x, series_order))
def _log_ndtr_asymptotic_series(x, series_order):
"""Calculates the asymptotic series used in log_ndtr."""
dtype = x.dtype.as_numpy_dtype
if series_order <= 0:
return np.array(1, dtype)
x_2 = math_ops.square(x)
even_sum = array_ops.zeros_like(x)
odd_sum = array_ops.zeros_like(x)
x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1.
for n in range(1, series_order + 1):
y = np.array(_double_factorial(2 * n - 1), dtype) / x_2n
if n % 2:
odd_sum += y
else:
even_sum += y
x_2n *= x_2
return 1. + even_sum - odd_sum
def erfinv(x, name="erfinv"):
"""The inverse function for erf, the error function.
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="erfinv").
Returns:
x: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"x.dtype=%s is not handled, see docstring for supported types."
% x.dtype)
return ndtri((x + 1.0) / 2.0) / np.sqrt(2)
def _double_factorial(n):
"""The double factorial function for small Python integer `n`."""
return np.prod(np.arange(n, 1, -2))
def log_cdf_laplace(x, name="log_cdf_laplace"):
"""Log Laplace distribution function.
This function calculates `Log[L(x)]`, where `L(x)` is the cumulative
distribution function of the Laplace distribution, i.e.
```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```
For numerical accuracy, `L(x)` is computed in different ways depending on `x`,
```
x <= 0:
Log[L(x)] = Log[0.5] + x, which is exact
0 < x:
Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
`Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.
lower_solution = -np.log(2.) + x
# safe_exp_neg_x = exp{-x} for x > 0, but is
# bounded above by 1, which avoids
# log[1 - 1] = -inf for x = log(1/2), AND
# exp{-x} --> inf, for x << -1
safe_exp_neg_x = math_ops.exp(-math_ops.abs(x))
# log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used
# internally by log1p, rather than being done explicitly here.
upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x)
return array_ops.where_v2(x < 0., lower_solution, upper_solution)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/special_math.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Multinomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Multinomial",
]
_multinomial_sample_note = """For each batch of counts, `value = [n_0, ...
,n_{k-1}]`, `P[value]` is the probability that after sampling `self.total_count`
draws from this Multinomial distribution, the number of draws falling in class
`j` is `n_j`. Since this definition is [exchangeable](
https://en.wikipedia.org/wiki/Exchangeable_random_variables); different
sequences have the same counts so the probability includes a combinatorial
coefficient.
Note: `value` must be a non-negative tensor with dtype `self.dtype`, have no
fractional components, and such that
`tf.reduce_sum(value, -1) = self.total_count`. Its shape must be broadcastable
with `self.probs` and `self.total_count`."""
@tf_export(v1=["distributions.Multinomial"])
class Multinomial(distribution.Distribution):
"""Multinomial distribution.
This Multinomial distribution is parameterized by `probs`, a (batch of)
length-`K` `prob` (probability) vectors (`K > 1`) such that
`tf.reduce_sum(probs, -1) = 1`, and a `total_count` number of trials, i.e.,
the number of trials per draw from the Multinomial. It is defined over a
(batch of) length-`K` vector `counts` such that
`tf.reduce_sum(counts, -1) = total_count`. The Multinomial is identically the
Binomial distribution when `K = 2`.
#### Mathematical Details
The Multinomial is a distribution over `K`-class counts, i.e., a length-`K`
vector of non-negative integer `counts = n = [n_0, ..., n_{K-1}]`.
The probability mass function (pmf) is,
```none
pmf(n; pi, N) = prod_j (pi_j)**n_j / Z
Z = (prod_j n_j!) / N!
```
where:
* `probs = pi = [pi_0, ..., pi_{K-1}]`, `pi_j > 0`, `sum_j pi_j = 1`,
* `total_count = N`, `N` a positive integer,
* `Z` is the normalization constant, and,
* `N!` denotes `N` factorial.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
#### Pitfalls
The number of classes, `K`, must not exceed:
- the largest integer representable by `self.dtype`, i.e.,
`2**(mantissa_bits+1)` (IEE754),
- the maximum `Tensor` index, i.e., `2**31-1`.
In other words,
```python
K <= min(2**31-1, {
tf.float16: 2**11,
tf.float32: 2**24,
tf.float64: 2**53 }[param.dtype])
```
Note: This condition is validated only when `self.validate_args = True`.
#### Examples
Create a 3-class distribution, with the 3rd class is most likely to be drawn,
using logits.
```python
logits = [-50., -43, 0]
dist = Multinomial(total_count=4., logits=logits)
```
Create a 3-class distribution, with the 3rd class is most likely to be drawn.
```python
p = [.2, .3, .5]
dist = Multinomial(total_count=4., probs=p)
```
The distribution functions can be evaluated on counts.
```python
# counts same shape as p.
counts = [1., 0, 3]
dist.prob(counts) # Shape []
# p will be broadcast to [[.2, .3, .5], [.2, .3, .5]] to match counts.
counts = [[1., 2, 1], [2, 2, 0]]
dist.prob(counts) # Shape [2]
# p will be broadcast to shape [5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7]
```
Create a 2-batch of 3-class distributions.
```python
p = [[.1, .2, .7], [.3, .3, .4]] # Shape [2, 3]
dist = Multinomial(total_count=[4., 5], probs=p)
counts = [[2., 1, 1], [3, 1, 1]]
dist.prob(counts) # Shape [2]
dist.sample(5) # Shape [5, 2, 3]
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
total_count,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="Multinomial"):
"""Initialize a batch of Multinomial distributions.
Args:
total_count: Non-negative floating point tensor with shape broadcastable
to `[N1,..., Nm]` with `m >= 0`. Defines this as a batch of
`N1 x ... x Nm` different Multinomial distributions. Its components
should be equal to integer values.
logits: Floating point tensor representing unnormalized log-probabilities
of a positive event with shape broadcastable to
`[N1,..., Nm, K]` `m >= 0`, and the same dtype as `total_count`. Defines
this as a batch of `N1 x ... x Nm` different `K` class Multinomial
distributions. Only one of `logits` or `probs` should be passed in.
probs: Positive floating point tensor with shape broadcastable to
`[N1,..., Nm, K]` `m >= 0` and same dtype as `total_count`. Defines
this as a batch of `N1 x ... x Nm` different `K` class Multinomial
distributions. `probs`'s components in the last portion of its shape
should sum to `1`. Only one of `logits` or `probs` should be passed in.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[total_count, logits, probs]) as name:
self._total_count = ops.convert_to_tensor(total_count, name="total_count")
if validate_args:
self._total_count = (
distribution_util.embed_check_nonnegative_integer_form(
self._total_count))
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits,
probs=probs,
multidimensional=True,
validate_args=validate_args,
name=name)
self._mean_val = self._total_count[..., array_ops.newaxis] * self._probs
super(Multinomial, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._total_count,
self._logits,
self._probs],
name=name)
@property
def total_count(self):
"""Number of trials used to construct a sample."""
return self._total_count
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def probs(self):
"""Probability of drawing a `1` in that coordinate."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self._mean_val)[:-1]
def _batch_shape(self):
return self._mean_val.get_shape().with_rank_at_least(1)[:-1]
def _event_shape_tensor(self):
return array_ops.shape(self._mean_val)[-1:]
def _event_shape(self):
return self._mean_val.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
k = self.event_shape_tensor()[0]
# broadcast the total_count and logits to same shape
n_draws = array_ops.ones_like(
self.logits[..., 0], dtype=n_draws.dtype) * n_draws
logits = array_ops.ones_like(
n_draws[..., array_ops.newaxis], dtype=self.logits.dtype) * self.logits
# flatten the total_count and logits
flat_logits = array_ops.reshape(logits, [-1, k]) # [B1B2...Bm, k]
flat_ndraws = n * array_ops.reshape(n_draws, [-1]) # [B1B2...Bm]
# computes each total_count and logits situation by map_fn
def _sample_single(args):
logits, n_draw = args[0], args[1] # [K], []
x = random_ops.multinomial(logits[array_ops.newaxis, ...], n_draw,
seed) # [1, n*n_draw]
x = array_ops.reshape(x, shape=[n, -1]) # [n, n_draw]
x = math_ops.reduce_sum(array_ops.one_hot(x, depth=k), axis=-2) # [n, k]
return x
x = map_fn.map_fn(
_sample_single, [flat_logits, flat_ndraws],
dtype=self.dtype) # [B1B2...Bm, n, k]
# reshape the results to proper shape
x = array_ops.transpose(x, perm=[1, 0, 2])
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
x = array_ops.reshape(x, final_shape) # [n, B1, B2,..., Bm, k]
return x
@distribution_util.AppendDocstring(_multinomial_sample_note)
def _log_prob(self, counts):
return self._log_unnormalized_prob(counts) - self._log_normalization(counts)
def _log_unnormalized_prob(self, counts):
counts = self._maybe_assert_valid_sample(counts)
return math_ops.reduce_sum(counts * nn_ops.log_softmax(self.logits), -1)
def _log_normalization(self, counts):
counts = self._maybe_assert_valid_sample(counts)
return -distribution_util.log_combinations(self.total_count, counts)
def _mean(self):
return array_ops.identity(self._mean_val)
def _covariance(self):
p = self.probs * array_ops.ones_like(
self.total_count)[..., array_ops.newaxis]
return array_ops.matrix_set_diag(
-math_ops.matmul(self._mean_val[..., array_ops.newaxis],
p[..., array_ops.newaxis, :]), # outer product
self._variance())
def _variance(self):
p = self.probs * array_ops.ones_like(
self.total_count)[..., array_ops.newaxis]
return self._mean_val - self._mean_val * p
def _maybe_assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return counts
counts = distribution_util.embed_check_nonnegative_integer_form(counts)
return control_flow_ops.with_dependencies([
check_ops.assert_equal(
self.total_count, math_ops.reduce_sum(counts, -1),
message="counts must sum to `self.total_count`"),
], counts)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/multinomial.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Exponential distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import gamma
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Exponential",
"ExponentialWithSoftplusRate",
]
@tf_export(v1=["distributions.Exponential"])
class Exponential(gamma.Gamma):
"""Exponential distribution.
The Exponential distribution is parameterized by an event `rate` parameter.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; lambda, x > 0) = exp(-lambda x) / Z
Z = 1 / lambda
```
where `rate = lambda` and `Z` is the normalizaing constant.
The Exponential distribution is a special case of the Gamma distribution,
i.e.,
```python
Exponential(rate) = Gamma(concentration=1., rate)
```
The Exponential distribution uses a `rate` parameter, or "inverse scale",
which can be intuited as,
```none
X ~ Exponential(rate=1)
Y = X / rate
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
rate,
validate_args=False,
allow_nan_stats=True,
name="Exponential"):
"""Construct Exponential distribution with parameter `rate`.
Args:
rate: Floating point tensor, equivalent to `1 / mean`. Must contain only
positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
# Even though all statistics of are defined for valid inputs, this is not
# true in the parent class "Gamma." Therefore, passing
# allow_nan_stats=True
# through to the parent class results in unnecessary asserts.
with ops.name_scope(name, values=[rate]) as name:
self._rate = ops.convert_to_tensor(rate, name="rate")
super(Exponential, self).__init__(
concentration=array_ops.ones([], dtype=self._rate.dtype),
rate=self._rate,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=name)
self._parameters = parameters
self._graph_parents += [self._rate]
@staticmethod
def _param_shapes(sample_shape):
return {"rate": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def rate(self):
return self._rate
def _log_survival_function(self, value):
return self._log_prob(value) - math_ops.log(self._rate)
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], array_ops.shape(self._rate)], 0)
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
sampled = random_ops.random_uniform(
shape,
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
seed=seed,
dtype=self.dtype)
return -math_ops.log(sampled) / self._rate
class ExponentialWithSoftplusRate(Exponential):
"""Exponential with softplus transform on `rate`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Exponential(tf.nn.softplus(rate)).",
warn_once=True)
def __init__(self,
rate,
validate_args=False,
allow_nan_stats=True,
name="ExponentialWithSoftplusRate"):
parameters = dict(locals())
with ops.name_scope(name, values=[rate]) as name:
super(ExponentialWithSoftplusRate, self).__init__(
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/exponential.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bijector base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import re
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import object_identity
__all__ = [
"Bijector",
]
class _Mapping(collections.namedtuple(
"_Mapping", ["x", "y", "ildj_map", "kwargs"])):
"""Helper class to make it easier to manage caching in `Bijector`."""
def __new__(cls, x=None, y=None, ildj_map=None, kwargs=None):
"""Custom __new__ so namedtuple items have defaults.
Args:
x: `Tensor`. Forward.
y: `Tensor`. Inverse.
ildj_map: `Dictionary`. This is a mapping from event_ndims to a `Tensor`
representing the inverse log det jacobian.
kwargs: Python dictionary. Extra args supplied to
forward/inverse/etc functions.
Returns:
mapping: New instance of _Mapping.
"""
return super(_Mapping, cls).__new__(cls, x, y, ildj_map, kwargs)
@property
def x_key(self):
"""Returns key used for caching Y=g(X)."""
return ((object_identity.Reference(self.x),) +
self._deep_tuple(tuple(sorted(self.kwargs.items()))))
@property
def y_key(self):
"""Returns key used for caching X=g^{-1}(Y)."""
return ((object_identity.Reference(self.y),) +
self._deep_tuple(tuple(sorted(self.kwargs.items()))))
def merge(self, x=None, y=None, ildj_map=None, kwargs=None, mapping=None):
"""Returns new _Mapping with args merged with self.
Args:
x: `Tensor`. Forward.
y: `Tensor`. Inverse.
ildj_map: `Dictionary`. This is a mapping from event_ndims to a `Tensor`
representing the inverse log det jacobian.
kwargs: Python dictionary. Extra args supplied to
forward/inverse/etc functions.
mapping: Instance of _Mapping to merge. Can only be specified if no other
arg is specified.
Returns:
mapping: New instance of `_Mapping` which has inputs merged with self.
Raises:
ValueError: if mapping and any other arg is not `None`.
"""
if mapping is None:
mapping = _Mapping(x=x, y=y, ildj_map=ildj_map, kwargs=kwargs)
elif any(arg is not None for arg in [x, y, ildj_map, kwargs]):
raise ValueError("Cannot simultaneously specify mapping and individual "
"arguments.")
return _Mapping(
x=self._merge(self.x, mapping.x),
y=self._merge(self.y, mapping.y),
ildj_map=self._merge_dicts(self.ildj_map, mapping.ildj_map),
kwargs=self._merge(self.kwargs, mapping.kwargs))
def _merge_dicts(self, old=None, new=None):
"""Helper to merge two dictionaries."""
old = {} if old is None else old
new = {} if new is None else new
for k, v in six.iteritems(new):
val = old.get(k, None)
if val is not None and val is not v:
raise ValueError("Found different value for existing key "
"(key:{} old_value:{} new_value:{}".format(
k, old[k], v))
old[k] = v
return old
def _merge(self, old, new):
"""Helper to merge which handles merging one value."""
if old is None:
return new
elif new is not None and old is not new:
raise ValueError("Incompatible values: %s != %s" % (old, new))
return old
def _deep_tuple(self, x):
"""Converts lists of lists to tuples of tuples."""
return (tuple(map(self._deep_tuple, x))
if isinstance(x, (list, tuple)) else x)
@six.add_metaclass(abc.ABCMeta)
class Bijector(object):
r"""Interface for transformations of a `Distribution` sample.
Bijectors can be used to represent any differentiable and injective
(one to one) function defined on an open subset of `R^n`. Some non-injective
transformations are also supported (see "Non Injective Transforms" below).
#### Mathematical Details
A `Bijector` implements a [smooth covering map](
https://en.wikipedia.org/wiki/Local_diffeomorphism), i.e., a local
diffeomorphism such that every point in the target has a neighborhood evenly
covered by a map ([see also](
https://en.wikipedia.org/wiki/Covering_space#Covering_of_a_manifold)).
A `Bijector` is used by `TransformedDistribution` but can be generally used
for transforming a `Distribution` generated `Tensor`. A `Bijector` is
characterized by three operations:
1. Forward
Useful for turning one random outcome into another random outcome from a
different distribution.
2. Inverse
Useful for "reversing" a transformation to compute one probability in
terms of another.
3. `log_det_jacobian(x)`
"The log of the absolute value of the determinant of the matrix of all
first-order partial derivatives of the inverse function."
Useful for inverting a transformation to compute one probability in terms
of another. Geometrically, the Jacobian determinant is the volume of the
transformation and is used to scale the probability.
We take the absolute value of the determinant before log to avoid NaN
values. Geometrically, a negative determinant corresponds to an
orientation-reversing transformation. It is ok for us to discard the sign
of the determinant because we only integrate everywhere-nonnegative
functions (probability densities) and the correct orientation is always the
one that produces a nonnegative integrand.
By convention, transformations of random variables are named in terms of the
forward transformation. The forward transformation creates samples, the
inverse is useful for computing probabilities.
#### Example Uses
- Basic properties:
```python
x = ... # A tensor.
# Evaluate forward transformation.
fwd_x = my_bijector.forward(x)
x == my_bijector.inverse(fwd_x)
x != my_bijector.forward(fwd_x) # Not equal because x != g(g(x)).
```
- Computing a log-likelihood:
```python
def transformed_log_prob(bijector, log_prob, x):
return (bijector.inverse_log_det_jacobian(x, event_ndims=0) +
log_prob(bijector.inverse(x)))
```
- Transforming a random outcome:
```python
def transformed_sample(bijector, x):
return bijector.forward(x)
```
#### Example Bijectors
- "Exponential"
```none
Y = g(X) = exp(X)
X ~ Normal(0, 1) # Univariate.
```
Implies:
```none
g^{-1}(Y) = log(Y)
|Jacobian(g^{-1})(y)| = 1 / y
Y ~ LogNormal(0, 1), i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= (1 / y) Normal(log(y); 0, 1)
```
Here is an example of how one might implement the `Exp` bijector:
```python
class Exp(Bijector):
def __init__(self, validate_args=False, name="exp"):
super(Exp, self).__init__(
validate_args=validate_args,
forward_min_event_ndims=0,
name=name)
def _forward(self, x):
return math_ops.exp(x)
def _inverse(self, y):
return math_ops.log(y)
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jacobian(self._inverse(y))
def _forward_log_det_jacobian(self, x):
# Notice that we needn't do any reducing, even when`event_ndims > 0`.
# The base Bijector class will handle reducing for us; it knows how
# to do so because we called `super` `__init__` with
# `forward_min_event_ndims = 0`.
return x
```
- "Affine"
```none
Y = g(X) = sqrtSigma * X + mu
X ~ MultivariateNormal(0, I_d)
```
Implies:
```none
g^{-1}(Y) = inv(sqrtSigma) * (Y - mu)
|Jacobian(g^{-1})(y)| = det(inv(sqrtSigma))
Y ~ MultivariateNormal(mu, sqrtSigma) , i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= det(sqrtSigma)^(-d) *
MultivariateNormal(inv(sqrtSigma) * (y - mu); 0, I_d)
```
#### Min_event_ndims and Naming
Bijectors are named for the dimensionality of data they act on (i.e. without
broadcasting). We can think of bijectors having an intrinsic `min_event_ndims`
, which is the minimum number of dimensions for the bijector act on. For
instance, a Cholesky decomposition requires a matrix, and hence
`min_event_ndims=2`.
Some examples:
`AffineScalar: min_event_ndims=0`
`Affine: min_event_ndims=1`
`Cholesky: min_event_ndims=2`
`Exp: min_event_ndims=0`
`Sigmoid: min_event_ndims=0`
`SoftmaxCentered: min_event_ndims=1`
Note the difference between `Affine` and `AffineScalar`. `AffineScalar`
operates on scalar events, whereas `Affine` operates on vector-valued events.
More generally, there is a `forward_min_event_ndims` and an
`inverse_min_event_ndims`. In most cases, these will be the same.
However, for some shape changing bijectors, these will be different
(e.g. a bijector which pads an extra dimension at the end, might have
`forward_min_event_ndims=0` and `inverse_min_event_ndims=1`.
#### Jacobian Determinant
The Jacobian determinant is a reduction over `event_ndims - min_event_ndims`
(`forward_min_event_ndims` for `forward_log_det_jacobian` and
`inverse_min_event_ndims` for `inverse_log_det_jacobian`).
To see this, consider the `Exp` `Bijector` applied to a `Tensor` which has
sample, batch, and event (S, B, E) shape semantics. Suppose the `Tensor`'s
partitioned-shape is `(S=[4], B=[2], E=[3, 3])`. The shape of the `Tensor`
returned by `forward` and `inverse` is unchanged, i.e., `[4, 2, 3, 3]`.
However the shape returned by `inverse_log_det_jacobian` is `[4, 2]` because
the Jacobian determinant is a reduction over the event dimensions.
Another example is the `Affine` `Bijector`. Because `min_event_ndims = 1`, the
Jacobian determinant reduction is over `event_ndims - 1`.
It is sometimes useful to implement the inverse Jacobian determinant as the
negative forward Jacobian determinant. For example,
```python
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jac(self._inverse(y)) # Note negation.
```
The correctness of this approach can be seen from the following claim.
- Claim:
Assume `Y = g(X)` is a bijection whose derivative exists and is nonzero
for its domain, i.e., `dY/dX = d/dX g(X) != 0`. Then:
```none
(log o det o jacobian o g^{-1})(Y) = -(log o det o jacobian o g)(X)
```
- Proof:
From the bijective, nonzero differentiability of `g`, the
[inverse function theorem](
https://en.wikipedia.org/wiki/Inverse_function_theorem)
implies `g^{-1}` is differentiable in the image of `g`.
Applying the chain rule to `y = g(x) = g(g^{-1}(y))` yields
`I = g'(g^{-1}(y))*g^{-1}'(y)`.
The same theorem also implies `g^{-1}'` is non-singular therefore:
`inv[ g'(g^{-1}(y)) ] = g^{-1}'(y)`.
The claim follows from [properties of determinant](
https://en.wikipedia.org/wiki/Determinant#Multiplicativity_and_matrix_groups).
Generally its preferable to directly implement the inverse Jacobian
determinant. This should have superior numerical stability and will often
share subgraphs with the `_inverse` implementation.
#### Is_constant_jacobian
Certain bijectors will have constant jacobian matrices. For instance, the
`Affine` bijector encodes multiplication by a matrix plus a shift, with
jacobian matrix, the same aforementioned matrix.
`is_constant_jacobian` encodes the fact that the jacobian matrix is constant.
The semantics of this argument are the following:
* Repeated calls to "log_det_jacobian" functions with the same
`event_ndims` (but not necessarily same input), will return the first
computed jacobian (because the matrix is constant, and hence is input
independent).
* `log_det_jacobian` implementations are merely broadcastable to the true
`log_det_jacobian` (because, again, the jacobian matrix is input
independent). Specifically, `log_det_jacobian` is implemented as the
log jacobian determinant for a single input.
```python
class Identity(Bijector):
def __init__(self, validate_args=False, name="identity"):
super(Identity, self).__init__(
is_constant_jacobian=True,
validate_args=validate_args,
forward_min_event_ndims=0,
name=name)
def _forward(self, x):
return x
def _inverse(self, y):
return y
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jacobian(self._inverse(y))
def _forward_log_det_jacobian(self, x):
# The full log jacobian determinant would be array_ops.zero_like(x).
# However, we circumvent materializing that, since the jacobian
# calculation is input independent, and we specify it for one input.
return constant_op.constant(0., x.dtype.base_dtype)
```
#### Subclass Requirements
- Subclasses typically implement:
- `_forward`,
- `_inverse`,
- `_inverse_log_det_jacobian`,
- `_forward_log_det_jacobian` (optional).
The `_forward_log_det_jacobian` is called when the bijector is inverted via
the `Invert` bijector. If undefined, a slightly less efficiently
calculation, `-1 * _inverse_log_det_jacobian`, is used.
If the bijector changes the shape of the input, you must also implement:
- _forward_event_shape_tensor,
- _forward_event_shape (optional),
- _inverse_event_shape_tensor,
- _inverse_event_shape (optional).
By default the event-shape is assumed unchanged from input.
- If the `Bijector`'s use is limited to `TransformedDistribution` (or friends
like `QuantizedDistribution`) then depending on your use, you may not need
to implement all of `_forward` and `_inverse` functions.
Examples:
1. Sampling (e.g., `sample`) only requires `_forward`.
2. Probability functions (e.g., `prob`, `cdf`, `survival`) only require
`_inverse` (and related).
3. Only calling probability functions on the output of `sample` means
`_inverse` can be implemented as a cache lookup.
See "Example Uses" [above] which shows how these functions are used to
transform a distribution. (Note: `_forward` could theoretically be
implemented as a cache lookup but this would require controlling the
underlying sample generation mechanism.)
#### Non Injective Transforms
**WARNING** Handing of non-injective transforms is subject to change.
Non injective maps `g` are supported, provided their domain `D` can be
partitioned into `k` disjoint subsets, `Union{D1, ..., Dk}`, such that,
ignoring sets of measure zero, the restriction of `g` to each subset is a
differentiable bijection onto `g(D)`. In particular, this imples that for
`y in g(D)`, the set inverse, i.e. `g^{-1}(y) = {x in D : g(x) = y}`, always
contains exactly `k` distinct points.
The property, `_is_injective` is set to `False` to indicate that the bijector
is not injective, yet satisfies the above condition.
The usual bijector API is modified in the case `_is_injective is False` (see
method docstrings for specifics). Here we show by example the `AbsoluteValue`
bijector. In this case, the domain `D = (-inf, inf)`, can be partitioned
into `D1 = (-inf, 0)`, `D2 = {0}`, and `D3 = (0, inf)`. Let `gi` be the
restriction of `g` to `Di`, then both `g1` and `g3` are bijections onto
`(0, inf)`, with `g1^{-1}(y) = -y`, and `g3^{-1}(y) = y`. We will use
`g1` and `g3` to define bijector methods over `D1` and `D3`. `D2 = {0}` is
an oddball in that `g2` is one to one, and the derivative is not well defined.
Fortunately, when considering transformations of probability densities
(e.g. in `TransformedDistribution`), sets of measure zero have no effect in
theory, and only a small effect in 32 or 64 bit precision. For that reason,
we define `inverse(0)` and `inverse_log_det_jacobian(0)` both as `[0, 0]`,
which is convenient and results in a left-semicontinuous pdf.
```python
abs = tfp.distributions.bijectors.AbsoluteValue()
abs.forward(-1.)
==> 1.
abs.forward(1.)
==> 1.
abs.inverse(1.)
==> (-1., 1.)
# The |dX/dY| is constant, == 1. So Log|dX/dY| == 0.
abs.inverse_log_det_jacobian(1., event_ndims=0)
==> (0., 0.)
# Special case handling of 0.
abs.inverse(0.)
==> (0., 0.)
abs.inverse_log_det_jacobian(0., event_ndims=0)
==> (0., 0.)
```
"""
@abc.abstractmethod
def __init__(self,
graph_parents=None,
is_constant_jacobian=False,
validate_args=False,
dtype=None,
forward_min_event_ndims=None,
inverse_min_event_ndims=None,
name=None):
"""Constructs Bijector.
A `Bijector` transforms random variables into new random variables.
Examples:
```python
# Create the Y = g(X) = X transform.
identity = Identity()
# Create the Y = g(X) = exp(X) transform.
exp = Exp()
```
See `Bijector` subclass docstring for more details and specific examples.
Args:
graph_parents: Python list of graph prerequisites of this `Bijector`.
is_constant_jacobian: Python `bool` indicating that the Jacobian matrix is
not a function of the input.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not
enforced.
forward_min_event_ndims: Python `integer` indicating the minimum number of
dimensions `forward` operates on.
inverse_min_event_ndims: Python `integer` indicating the minimum number of
dimensions `inverse` operates on. Will be set to
`forward_min_event_ndims` by default, if no value is provided.
name: The name to give Ops created by the initializer.
Raises:
ValueError: If neither `forward_min_event_ndims` and
`inverse_min_event_ndims` are specified, or if either of them is
negative.
ValueError: If a member of `graph_parents` is not a `Tensor`.
"""
self._graph_parents = graph_parents or []
if forward_min_event_ndims is None and inverse_min_event_ndims is None:
raise ValueError("Must specify at least one of `forward_min_event_ndims` "
"and `inverse_min_event_ndims`.")
elif inverse_min_event_ndims is None:
inverse_min_event_ndims = forward_min_event_ndims
elif forward_min_event_ndims is None:
forward_min_event_ndims = inverse_min_event_ndims
if not isinstance(forward_min_event_ndims, int):
raise TypeError("Expected forward_min_event_ndims to be of "
"type int, got {}".format(
type(forward_min_event_ndims).__name__))
if not isinstance(inverse_min_event_ndims, int):
raise TypeError("Expected inverse_min_event_ndims to be of "
"type int, got {}".format(
type(inverse_min_event_ndims).__name__))
if forward_min_event_ndims < 0:
raise ValueError("forward_min_event_ndims must be a non-negative "
"integer.")
if inverse_min_event_ndims < 0:
raise ValueError("inverse_min_event_ndims must be a non-negative "
"integer.")
self._forward_min_event_ndims = forward_min_event_ndims
self._inverse_min_event_ndims = inverse_min_event_ndims
self._is_constant_jacobian = is_constant_jacobian
self._constant_ildj_map = {}
self._validate_args = validate_args
self._dtype = dtype
# These dicts can only be accessed using _Mapping.x_key or _Mapping.y_key
self._from_y = {}
self._from_x = {}
if name:
self._name = name
else:
# We want the default convention to be snake_case rather than CamelCase
# since `Chain` uses bijector.name as the kwargs dictionary key.
def camel_to_snake(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
self._name = camel_to_snake(type(self).__name__.lstrip("_"))
for i, t in enumerate(self._graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
@property
def graph_parents(self):
"""Returns this `Bijector`'s graph_parents as a Python list."""
return self._graph_parents
@property
def forward_min_event_ndims(self):
"""Returns the minimal number of dimensions bijector.forward operates on."""
return self._forward_min_event_ndims
@property
def inverse_min_event_ndims(self):
"""Returns the minimal number of dimensions bijector.inverse operates on."""
return self._inverse_min_event_ndims
@property
def is_constant_jacobian(self):
"""Returns true iff the Jacobian matrix is not a function of x.
Note: Jacobian matrix is either constant for both forward and inverse or
neither.
Returns:
is_constant_jacobian: Python `bool`.
"""
return self._is_constant_jacobian
@property
def _is_injective(self):
"""Returns true iff the forward map `g` is injective (one-to-one function).
**WARNING** This hidden property and its behavior are subject to change.
Note: Non-injective maps `g` are supported, provided their domain `D` can
be partitioned into `k` disjoint subsets, `Union{D1, ..., Dk}`, such that,
ignoring sets of measure zero, the restriction of `g` to each subset is a
differentiable bijection onto `g(D)`.
Returns:
is_injective: Python `bool`.
"""
return True
@property
def validate_args(self):
"""Returns True if Tensor arguments will be validated."""
return self._validate_args
@property
def dtype(self):
"""dtype of `Tensor`s transformable by this distribution."""
return self._dtype
@property
def name(self):
"""Returns the string name of this `Bijector`."""
return self._name
def _forward_event_shape_tensor(self, input_shape):
"""Subclass implementation for `forward_event_shape_tensor` function."""
# By default, we assume event_shape is unchanged.
return input_shape
def forward_event_shape_tensor(self,
input_shape,
name="forward_event_shape_tensor"):
"""Shape of a single sample from a single batch as an `int32` 1D `Tensor`.
Args:
input_shape: `Tensor`, `int32` vector indicating event-portion shape
passed into `forward` function.
name: name to give to the op
Returns:
forward_event_shape_tensor: `Tensor`, `int32` vector indicating
event-portion shape after applying `forward`.
"""
with self._name_scope(name, [input_shape]):
input_shape = ops.convert_to_tensor(input_shape, dtype=dtypes.int32,
name="input_shape")
return self._forward_event_shape_tensor(input_shape)
def _forward_event_shape(self, input_shape):
"""Subclass implementation for `forward_event_shape` public function."""
# By default, we assume event_shape is unchanged.
return input_shape
def forward_event_shape(self, input_shape):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `forward_event_shape_tensor`. May be only partially defined.
Args:
input_shape: `TensorShape` indicating event-portion shape passed into
`forward` function.
Returns:
forward_event_shape_tensor: `TensorShape` indicating event-portion shape
after applying `forward`. Possibly unknown.
"""
return self._forward_event_shape(tensor_shape.TensorShape(input_shape))
def _inverse_event_shape_tensor(self, output_shape):
"""Subclass implementation for `inverse_event_shape_tensor` function."""
# By default, we assume event_shape is unchanged.
return output_shape
def inverse_event_shape_tensor(self,
output_shape,
name="inverse_event_shape_tensor"):
"""Shape of a single sample from a single batch as an `int32` 1D `Tensor`.
Args:
output_shape: `Tensor`, `int32` vector indicating event-portion shape
passed into `inverse` function.
name: name to give to the op
Returns:
inverse_event_shape_tensor: `Tensor`, `int32` vector indicating
event-portion shape after applying `inverse`.
"""
with self._name_scope(name, [output_shape]):
output_shape = ops.convert_to_tensor(output_shape, dtype=dtypes.int32,
name="output_shape")
return self._inverse_event_shape_tensor(output_shape)
def _inverse_event_shape(self, output_shape):
"""Subclass implementation for `inverse_event_shape` public function."""
# By default, we assume event_shape is unchanged.
return tensor_shape.TensorShape(output_shape)
def inverse_event_shape(self, output_shape):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `inverse_event_shape_tensor`. May be only partially defined.
Args:
output_shape: `TensorShape` indicating event-portion shape passed into
`inverse` function.
Returns:
inverse_event_shape_tensor: `TensorShape` indicating event-portion shape
after applying `inverse`. Possibly unknown.
"""
return self._inverse_event_shape(output_shape)
def _forward(self, x):
"""Subclass implementation for `forward` public function."""
raise NotImplementedError("forward not implemented.")
def _call_forward(self, x, name, **kwargs):
with self._name_scope(name, [x]):
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
if not self._is_injective: # No caching for non-injective
return self._forward(x, **kwargs)
mapping = self._lookup(x=x, kwargs=kwargs)
if mapping.y is not None:
return mapping.y
mapping = mapping.merge(y=self._forward(x, **kwargs))
self._cache(mapping)
return mapping.y
def forward(self, x, name="forward"):
"""Returns the forward `Bijector` evaluation, i.e., X = g(Y).
Args:
x: `Tensor`. The input to the "forward" evaluation.
name: The name to give this op.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `x.dtype` is not
`self.dtype`.
NotImplementedError: if `_forward` is not implemented.
"""
return self._call_forward(x, name)
def _inverse(self, y):
"""Subclass implementation for `inverse` public function."""
raise NotImplementedError("inverse not implemented")
def _call_inverse(self, y, name, **kwargs):
with self._name_scope(name, [y]):
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
if not self._is_injective: # No caching for non-injective
return self._inverse(y, **kwargs)
mapping = self._lookup(y=y, kwargs=kwargs)
if mapping.x is not None:
return mapping.x
mapping = mapping.merge(x=self._inverse(y, **kwargs))
self._cache(mapping)
return mapping.x
def inverse(self, y, name="inverse"):
"""Returns the inverse `Bijector` evaluation, i.e., X = g^{-1}(Y).
Args:
y: `Tensor`. The input to the "inverse" evaluation.
name: The name to give this op.
Returns:
`Tensor`, if this bijector is injective.
If not injective, returns the k-tuple containing the unique
`k` points `(x1, ..., xk)` such that `g(xi) = y`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if `_inverse` is not implemented.
"""
return self._call_inverse(y, name)
def _inverse_log_det_jacobian(self, y):
"""Subclass implementation of `inverse_log_det_jacobian` public function.
In particular, this method differs from the public function, in that it
does not take `event_ndims`. Thus, this implements the minimal Jacobian
determinant calculation (i.e. over `inverse_min_event_ndims`).
Args:
y: `Tensor`. The input to the "inverse_log_det_jacobian" evaluation.
Returns:
inverse_log_det_jacobian: `Tensor`, if this bijector is injective.
If not injective, returns the k-tuple containing jacobians for the
unique `k` points `(x1, ..., xk)` such that `g(xi) = y`.
"""
raise NotImplementedError("inverse_log_det_jacobian not implemented.")
def _call_inverse_log_det_jacobian(self, y, event_ndims, name, **kwargs):
with self._name_scope(name, [y]):
if event_ndims in self._constant_ildj_map:
return self._constant_ildj_map[event_ndims]
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
with ops.control_dependencies(self._check_valid_event_ndims(
min_event_ndims=self.inverse_min_event_ndims,
event_ndims=event_ndims)):
if not self._is_injective: # No caching for non-injective
try:
ildjs = self._inverse_log_det_jacobian(y, **kwargs)
return tuple(self._reduce_jacobian_det_over_event(
y, ildj, self.inverse_min_event_ndims, event_ndims)
for ildj in ildjs)
except NotImplementedError as original_exception:
try:
x = self._inverse(y, **kwargs)
fldjs = self._forward_log_det_jacobian(x, **kwargs)
return tuple(self._reduce_jacobian_det_over_event(
x, -fldj, self.forward_min_event_ndims, event_ndims)
for fldj in fldjs)
except NotImplementedError:
raise original_exception
mapping = self._lookup(y=y, kwargs=kwargs)
if mapping.ildj_map is not None and event_ndims in mapping.ildj_map:
return mapping.ildj_map[event_ndims]
try:
x = None # Not needed; leave cache as is.
ildj = self._inverse_log_det_jacobian(y, **kwargs)
ildj = self._reduce_jacobian_det_over_event(
y, ildj, self.inverse_min_event_ndims, event_ndims)
except NotImplementedError as original_exception:
try:
x = (mapping.x if mapping.x is not None
else self._inverse(y, **kwargs))
ildj = -self._forward_log_det_jacobian(x, **kwargs)
ildj = self._reduce_jacobian_det_over_event(
x, ildj, self.forward_min_event_ndims, event_ndims)
except NotImplementedError:
raise original_exception
mapping = mapping.merge(x=x, ildj_map={event_ndims: ildj})
self._cache(mapping)
if self.is_constant_jacobian:
self._constant_ildj_map[event_ndims] = ildj
return ildj
def inverse_log_det_jacobian(
self, y, event_ndims, name="inverse_log_det_jacobian"):
"""Returns the (log o det o Jacobian o inverse)(y).
Mathematically, returns: `log(det(dX/dY))(Y)`. (Recall that: `X=g^{-1}(Y)`.)
Note that `forward_log_det_jacobian` is the negative of this function,
evaluated at `g^{-1}(y)`.
Args:
y: `Tensor`. The input to the "inverse" Jacobian determinant evaluation.
event_ndims: Number of dimensions in the probabilistic events being
transformed. Must be greater than or equal to
`self.inverse_min_event_ndims`. The result is summed over the final
dimensions to produce a scalar Jacobian determinant for each event,
i.e. it has shape `y.shape.ndims - event_ndims` dimensions.
name: The name to give this op.
Returns:
`Tensor`, if this bijector is injective.
If not injective, returns the tuple of local log det
Jacobians, `log(det(Dg_i^{-1}(y)))`, where `g_i` is the restriction
of `g` to the `ith` partition `Di`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if `_inverse_log_det_jacobian` is not implemented.
"""
return self._call_inverse_log_det_jacobian(y, event_ndims, name)
def _forward_log_det_jacobian(self, x):
"""Subclass implementation of `forward_log_det_jacobian` public function.
In particular, this method differs from the public function, in that it
does not take `event_ndims`. Thus, this implements the minimal Jacobian
determinant calculation (i.e. over `forward_min_event_ndims`).
Args:
x: `Tensor`. The input to the "forward_log_det_jacobian" evaluation.
Returns:
forward_log_det_jacobian: `Tensor`, if this bijector is injective.
If not injective, returns the k-tuple containing jacobians for the
unique `k` points `(x1, ..., xk)` such that `g(xi) = y`.
"""
raise NotImplementedError(
"forward_log_det_jacobian not implemented.")
def _call_forward_log_det_jacobian(self, x, event_ndims, name, **kwargs):
if not self._is_injective:
raise NotImplementedError(
"forward_log_det_jacobian cannot be implemented for non-injective "
"transforms.")
with self._name_scope(name, [x]):
with ops.control_dependencies(self._check_valid_event_ndims(
min_event_ndims=self.forward_min_event_ndims,
event_ndims=event_ndims)):
if event_ndims in self._constant_ildj_map:
# Need "-1. *" to avoid invalid-unary-operand-type linter warning.
return -1. * self._constant_ildj_map[event_ndims]
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
if not self._is_injective: # No caching for non-injective
try:
fldjs = self._forward_log_det_jacobian(x, **kwargs) # No caching.
return tuple(self._reduce_jacobian_det_over_event(
x, fldj, self.forward_min_event_ndims, event_ndims)
for fldj in fldjs)
except NotImplementedError as original_exception:
try:
y = self._forward(x, **kwargs)
ildjs = self._inverse_log_det_jacobian(y, **kwargs)
return tuple(self._reduce_jacobian_det_over_event(
y, -ildj, self.inverse_min_event_ndims, event_ndims)
for ildj in ildjs)
except NotImplementedError:
raise original_exception
mapping = self._lookup(x=x, kwargs=kwargs)
if mapping.ildj_map is not None and event_ndims in mapping.ildj_map:
return -mapping.ildj_map[event_ndims]
try:
y = None # Not needed; leave cache as is.
ildj = -self._forward_log_det_jacobian(x, **kwargs)
ildj = self._reduce_jacobian_det_over_event(
x, ildj, self.forward_min_event_ndims, event_ndims)
except NotImplementedError as original_exception:
try:
y = (mapping.y if mapping.y is not None
else self._forward(x, **kwargs))
ildj = self._inverse_log_det_jacobian(y, **kwargs)
ildj = self._reduce_jacobian_det_over_event(
y, ildj, self.inverse_min_event_ndims, event_ndims)
except NotImplementedError:
raise original_exception
mapping = mapping.merge(y=y, ildj_map={event_ndims: ildj})
self._cache(mapping)
if self.is_constant_jacobian:
self._constant_ildj_map[event_ndims] = ildj
return -ildj
def forward_log_det_jacobian(
self, x, event_ndims, name="forward_log_det_jacobian"):
"""Returns both the forward_log_det_jacobian.
Args:
x: `Tensor`. The input to the "forward" Jacobian determinant evaluation.
event_ndims: Number of dimensions in the probabilistic events being
transformed. Must be greater than or equal to
`self.forward_min_event_ndims`. The result is summed over the final
dimensions to produce a scalar Jacobian determinant for each event,
i.e. it has shape `x.shape.ndims - event_ndims` dimensions.
name: The name to give this op.
Returns:
`Tensor`, if this bijector is injective.
If not injective this is not implemented.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_forward_log_det_jacobian`
nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented, or
this is a non-injective bijector.
"""
return self._call_forward_log_det_jacobian(x, event_ndims, name)
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(
name, values=(values or []) + self.graph_parents) as scope:
yield scope
def _maybe_assert_dtype(self, x):
"""Helper to check dtype when self.dtype is known."""
if self.dtype is not None and self.dtype.base_dtype != x.dtype.base_dtype:
raise TypeError("Input had dtype %s but expected %s." %
(self.dtype, x.dtype))
def _cache(self, mapping):
"""Helper which stores mapping info in forward/inverse dicts."""
# Merging from lookup is an added check that we're not overwriting anything
# which is not None.
mapping = mapping.merge(mapping=self._lookup(
mapping.x, mapping.y, mapping.kwargs))
if mapping.x is None and mapping.y is None:
raise ValueError("Caching expects at least one of (x,y) to be known, "
"i.e., not None.")
self._from_x[mapping.x_key] = mapping
self._from_y[mapping.y_key] = mapping
def _lookup(self, x=None, y=None, kwargs=None):
"""Helper which retrieves mapping info from forward/inverse dicts."""
mapping = _Mapping(x=x, y=y, kwargs=kwargs)
# Since _cache requires both x,y to be set, we only need to do one cache
# lookup since the mapping is always in both or neither.
if mapping.x is not None:
return self._from_x.get(mapping.x_key, mapping)
if mapping.y is not None:
return self._from_y.get(mapping.y_key, mapping)
return mapping
def _reduce_jacobian_det_over_event(
self, y, ildj, min_event_ndims, event_ndims):
"""Reduce jacobian over event_ndims - min_event_ndims."""
# In this case, we need to tile the Jacobian over the event and reduce.
y_rank = array_ops.rank(y)
y_shape = array_ops.shape(y)[
y_rank - event_ndims : y_rank - min_event_ndims]
ones = array_ops.ones(y_shape, ildj.dtype)
reduced_ildj = math_ops.reduce_sum(
ones * ildj,
axis=self._get_event_reduce_dims(min_event_ndims, event_ndims))
# The multiplication by ones can change the inferred static shape so we try
# to recover as much as possible.
event_ndims_ = self._maybe_get_static_event_ndims(event_ndims)
if (event_ndims_ is not None and
y.shape.ndims is not None and
ildj.shape.ndims is not None):
y_shape = y.shape[y.shape.ndims - event_ndims_ :
y.shape.ndims - min_event_ndims]
broadcast_shape = array_ops.broadcast_static_shape(ildj.shape, y_shape)
reduced_ildj.set_shape(
broadcast_shape[: broadcast_shape.ndims - (
event_ndims_ - min_event_ndims)])
return reduced_ildj
def _get_event_reduce_dims(self, min_event_ndims, event_ndims):
"""Compute the reduction dimensions given event_ndims."""
event_ndims_ = self._maybe_get_static_event_ndims(event_ndims)
if event_ndims_ is not None:
return [-index for index in range(1, event_ndims_ - min_event_ndims + 1)]
else:
reduce_ndims = event_ndims - min_event_ndims
return math_ops.range(-reduce_ndims, 0)
def _check_valid_event_ndims(self, min_event_ndims, event_ndims):
"""Check whether event_ndims is atleast min_event_ndims."""
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
event_ndims_ = tensor_util.constant_value(event_ndims)
assertions = []
if not event_ndims.dtype.is_integer:
raise ValueError("Expected integer dtype, got dtype {}".format(
event_ndims.dtype))
if event_ndims_ is not None:
if event_ndims.shape.ndims != 0:
raise ValueError("Expected scalar event_ndims, got shape {}".format(
event_ndims.shape))
if min_event_ndims > event_ndims_:
raise ValueError("event_ndims ({}) must be larger than "
"min_event_ndims ({})".format(
event_ndims_, min_event_ndims))
elif self.validate_args:
assertions += [
check_ops.assert_greater_equal(event_ndims, min_event_ndims)]
if event_ndims.shape.is_fully_defined():
if event_ndims.shape.ndims != 0:
raise ValueError("Expected scalar shape, got ndims {}".format(
event_ndims.shape.ndims))
elif self.validate_args:
assertions += [
check_ops.assert_rank(event_ndims, 0, message="Expected scalar.")]
return assertions
def _maybe_get_static_event_ndims(self, event_ndims):
"""Helper which returns tries to return an integer static value."""
event_ndims_ = distribution_util.maybe_get_static_value(event_ndims)
if isinstance(event_ndims_, (np.generic, np.ndarray)):
if event_ndims_.dtype not in (np.int32, np.int64):
raise ValueError("Expected integer dtype, got dtype {}".format(
event_ndims_.dtype))
if isinstance(event_ndims_, np.ndarray) and len(event_ndims_.shape):
raise ValueError("Expected a scalar integer, got {}".format(
event_ndims_))
event_ndims_ = int(event_ndims_)
return event_ndims_
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/bijector_impl.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import types
import numpy as np
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"ReparameterizationType",
"FULLY_REPARAMETERIZED",
"NOT_REPARAMETERIZED",
"Distribution",
]
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape",
"batch_shape_tensor",
"cdf",
"covariance",
"cross_entropy",
"entropy",
"event_shape",
"event_shape_tensor",
"kl_divergence",
"log_cdf",
"log_prob",
"log_survival_function",
"mean",
"mode",
"prob",
"sample",
"stddev",
"survival_function",
"variance",
]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str = old_str or ""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
def _convert_to_tensor(value, name=None, preferred_dtype=None):
"""Converts to tensor avoiding an eager bug that loses float precision."""
# TODO(b/116672045): Remove this function.
if (context.executing_eagerly() and preferred_dtype is not None and
(preferred_dtype.is_integer or preferred_dtype.is_bool)):
v = ops.convert_to_tensor(value, name=name)
if v.dtype.is_floating:
return v
return ops.convert_to_tensor(
value, name=name, preferred_dtype=preferred_dtype)
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = tf_inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = tf_inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
@tf_export(v1=["distributions.ReparameterizationType"])
class ReparameterizationType(object):
"""Instances of this class represent how sampling is reparameterized.
Two static instances exist in the distributions library, signifying
one of two possible properties for samples from a distribution:
`FULLY_REPARAMETERIZED`: Samples from the distribution are fully
reparameterized, and straight-through gradients are supported.
`NOT_REPARAMETERIZED`: Samples from the distribution are not fully
reparameterized, and straight-through gradients are either partially
unsupported or are not supported at all. In this case, for purposes of
e.g. RL or variational inference, it is generally safest to wrap the
sample results in a `stop_gradients` call and use policy
gradients / surrogate loss instead.
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self, rep_type):
self._rep_type = rep_type
def __repr__(self):
return "<Reparameteriation Type: %s>" % self._rep_type
def __eq__(self, other):
"""Determine if this `ReparameterizationType` is equal to another.
Since RepaparameterizationType instances are constant static global
instances, equality checks if two instances' id() values are equal.
Args:
other: Object to compare against.
Returns:
`self is other`.
"""
return self is other
# Fully reparameterized distribution: samples from a fully
# reparameterized distribution support straight-through gradients with
# respect to all parameters.
FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED")
tf_export(v1=["distributions.FULLY_REPARAMETERIZED"]).export_constant(
__name__, "FULLY_REPARAMETERIZED")
# Not reparameterized distribution: samples from a non-
# reparameterized distribution do not support straight-through gradients for
# at least some of the parameters.
NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED")
tf_export(v1=["distributions.NOT_REPARAMETERIZED"]).export_constant(
__name__, "NOT_REPARAMETERIZED")
@six.add_metaclass(_DistributionMeta)
@tf_export(v1=["distributions.Distribution"])
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
#### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
#### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.event_shape
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape_tensor()
# Sampling returns a sample per distribution. `samples` has shape
# [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5,
# batch_shape=[2, 2], and event_shape=[].
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
#### Shapes
There are three important concepts associated with TensorFlow Distributions
shapes:
- Event shape describes the shape of a single draw from the distribution;
it may be dependent across dimensions. For scalar distributions, the event
shape is `[]`. For a 5-dimensional MultivariateNormal, the event shape is
`[5]`.
- Batch shape describes independent, not identically distributed draws, aka a
"collection" or "bunch" of distributions.
- Sample shape describes independent, identically distributed draws of batches
from the distribution family.
The event shape and the batch shape are properties of a Distribution object,
whereas the sample shape is associated with a specific call to `sample` or
`log_prob`.
For detailed usage examples of TensorFlow Distributions shapes, see
[this tutorial](
https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb)
#### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `concentration1` and `concentration0`, and does not have
well-defined mode if `concentration1 < 1` or `concentration0 < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
dtype,
reparameterization_type,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
reparameterization_type: Instance of `ReparameterizationType`.
If `distributions.FULLY_REPARAMETERIZED`, this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution. If `distributions.NOT_REPARAMETERIZED`,
then no such reparameterization is available.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
parameters: Python `dict` of parameters used to instantiate this
`Distribution`.
graph_parents: Python `list` of graph prerequisites of this
`Distribution`.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
if not name or name[-1] != "/": # `name` is not a name scope
non_unique_name = name or type(self).__name__
with ops.name_scope(non_unique_name) as name:
pass
self._dtype = dtype
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters or {}
self._graph_parents = graph_parents
self._name = name
@property
def _parameters(self):
return self._parameter_dict
@_parameters.setter
def _parameters(self, value):
"""Intercept assignments to self._parameters to avoid reference cycles.
Parameters are often created using locals(), so we need to clean out any
references to `self` before assigning it to an attribute.
Args:
value: A dictionary of parameters to assign to the `_parameters` property.
"""
if "self" in value:
del value["self"]
self._parameter_dict = value
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`.
Subclasses should override class method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
# Remove "self", "__class__", or other special variables. These can appear
# if the subclass used:
# `parameters = dict(locals())`.
return {k: v for k, v in self._parameters.items()
if not k.startswith("__") and k != "self"}
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`distributions.FULLY_REPARAMETERIZED`
or `distributions.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`.
"""
return self._reparameterization_type
@property
def allow_nan_stats(self):
"""Python `bool` describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance of a
Cauchy distribution is infinity. However, sometimes the statistic is
undefined, e.g., if a distribution's pdf does not achieve a maximum within
the support of the distribution, the mode is undefined. If the mean is
undefined, then by definition the variance is undefined. E.g. the mean for
Student's T for df = 1 is undefined (no clear way to say it is either + or -
infinity), so the variance = E[(X - mean)**2] is also undefined.
Returns:
allow_nan_stats: Python `bool`.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python `bool` indicating possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
initialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` initialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
return type(self)(**parameters)
def _batch_shape_tensor(self):
raise NotImplementedError(
"batch_shape_tensor is not implemented: {}".format(type(self).__name__))
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
if self.batch_shape.is_fully_defined():
return ops.convert_to_tensor(self.batch_shape.as_list(),
dtype=dtypes.int32,
name="batch_shape")
return self._batch_shape_tensor()
def _batch_shape(self):
return tensor_shape.TensorShape(None)
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return tensor_shape.as_shape(self._batch_shape())
def _event_shape_tensor(self):
raise NotImplementedError(
"event_shape_tensor is not implemented: {}".format(type(self).__name__))
def event_shape_tensor(self, name="event_shape_tensor"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
if self.event_shape.is_fully_defined():
return ops.convert_to_tensor(self.event_shape.as_list(),
dtype=dtypes.int32,
name="event_shape")
return self._event_shape_tensor()
def _event_shape(self):
return tensor_shape.TensorShape(None)
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return tensor_shape.as_shape(self._event_shape())
def is_scalar_event(self, name="is_scalar_event"):
"""Indicates that `event_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_event: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.event_shape, self.event_shape_tensor),
name="is_scalar_event")
def is_scalar_batch(self, name="is_scalar_batch"):
"""Indicates that `batch_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_batch: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor),
name="is_scalar_batch")
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented: {}".format(
type(self).__name__))
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
samples = self._sample_n(n, seed, **kwargs)
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
samples = array_ops.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
def sample(self, sample_shape=(), seed=None, name="sample"):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
return self._call_sample_n(sample_shape, seed, name)
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented: {}".format(
type(self).__name__))
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._log_prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_prob(self, value, name="log_prob"):
"""Log probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_prob(value, name)
def _prob(self, value):
raise NotImplementedError("prob is not implemented: {}".format(
type(self).__name__))
def _call_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob"):
"""Probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_prob(value, name)
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented: {}".format(
type(self).__name__))
def _call_log_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._log_cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_cdf(self, value, name="log_cdf"):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_cdf(value, name)
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented: {}".format(
type(self).__name__))
def _call_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def cdf(self, value, name="cdf"):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_cdf(value, name)
def _log_survival_function(self, value):
raise NotImplementedError(
"log_survival_function is not implemented: {}".format(
type(self).__name__))
def _call_log_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log1p(-self.cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_survival_function(self, value, name="log_survival_function"):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```none
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_log_survival_function(value, name)
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented: {}".format(
type(self).__name__))
def _call_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **kwargs)
except NotImplementedError:
raise original_exception
def survival_function(self, value, name="survival_function"):
"""Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_survival_function(value, name)
def _entropy(self):
raise NotImplementedError("entropy is not implemented: {}".format(
type(self).__name__))
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented: {}".format(
type(self).__name__))
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _quantile(self, value):
raise NotImplementedError("quantile is not implemented: {}".format(
type(self).__name__))
def _call_quantile(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
return self._quantile(value, **kwargs)
def quantile(self, value, name="quantile"):
"""Quantile function. Aka "inverse cdf" or "percent point function".
Given random variable `X` and `p in [0, 1]`, the `quantile` is:
```none
quantile(p) := x such that P[X <= x] == p
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_quantile(value, name)
def _variance(self):
raise NotImplementedError("variance is not implemented: {}".format(
type(self).__name__))
def variance(self, name="variance"):
"""Variance.
Variance is defined as,
```none
Var = E[(X - E[X])**2]
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `Var.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
variance: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._variance()
except NotImplementedError as original_exception:
try:
return math_ops.square(self._stddev())
except NotImplementedError:
raise original_exception
def _stddev(self):
raise NotImplementedError("stddev is not implemented: {}".format(
type(self).__name__))
def stddev(self, name="stddev"):
"""Standard deviation.
Standard deviation is defined as,
```none
stddev = E[(X - E[X])**2]**0.5
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `stddev.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
stddev: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._stddev()
except NotImplementedError as original_exception:
try:
return math_ops.sqrt(self._variance())
except NotImplementedError:
raise original_exception
def _covariance(self):
raise NotImplementedError("covariance is not implemented: {}".format(
type(self).__name__))
def covariance(self, name="covariance"):
"""Covariance.
Covariance is (possibly) defined only for non-scalar-event distributions.
For example, for a length-`k`, vector-valued distribution, it is calculated
as,
```none
Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
```
where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
denotes expectation.
Alternatively, for non-vector, multivariate distributions (e.g.,
matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
under some vectorization of the events, i.e.,
```none
Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
```
where `Cov` is a (batch of) `k' x k'` matrices,
`0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
mapping indices of this distribution's event dimensions to indices of a
length-`k'` vector.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
where the first `n` dimensions are batch coordinates and
`k' = reduce_prod(self.event_shape)`.
"""
with self._name_scope(name):
return self._covariance()
def _mode(self):
raise NotImplementedError("mode is not implemented: {}".format(
type(self).__name__))
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
def _cross_entropy(self, other):
return kullback_leibler.cross_entropy(
self, other, allow_nan_stats=self.allow_nan_stats)
def cross_entropy(self, other, name="cross_entropy"):
"""Computes the (Shannon) cross entropy.
Denote this distribution (`self`) by `P` and the `other` distribution by
`Q`. Assuming `P, Q` are absolutely continuous with respect to
one another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shanon)
cross entropy is defined as:
```none
H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
```
where `F` denotes the support of the random variable `X ~ P`.
Args:
other: `tfp.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
cross_entropy: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of (Shanon) cross entropy.
"""
with self._name_scope(name):
return self._cross_entropy(other)
def _kl_divergence(self, other):
return kullback_leibler.kl_divergence(
self, other, allow_nan_stats=self.allow_nan_stats)
def kl_divergence(self, other, name="kl_divergence"):
"""Computes the Kullback--Leibler divergence.
Denote this distribution (`self`) by `p` and the `other` distribution by
`q`. Assuming `p, q` are absolutely continuous with respect to reference
measure `r`, the KL divergence is defined as:
```none
KL[p, q] = E_p[log(p(X)/q(X))]
= -int_F p(x) log q(x) dr(x) + int_F p(x) log p(x) dr(x)
= H[p, q] - H[p]
```
where `F` denotes the support of the random variable `X ~ p`, `H[., .]`
denotes (Shanon) cross entropy, and `H[.]` denotes (Shanon) entropy.
Args:
other: `tfp.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
kl_divergence: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of the Kullback-Leibler
divergence.
"""
with self._name_scope(name):
return self._kl_divergence(other)
def __str__(self):
return ("tfp.distributions.{type_name}("
"\"{self_name}\""
"{maybe_batch_shape}"
"{maybe_event_shape}"
", dtype={dtype})".format(
type_name=type(self).__name__,
self_name=self.name,
maybe_batch_shape=(", batch_shape={}".format(self.batch_shape)
if self.batch_shape.ndims is not None
else ""),
maybe_event_shape=(", event_shape={}".format(self.event_shape)
if self.event_shape.ndims is not None
else ""),
dtype=self.dtype.name))
def __repr__(self):
return ("<tfp.distributions.{type_name} "
"'{self_name}'"
" batch_shape={batch_shape}"
" event_shape={event_shape}"
" dtype={dtype}>".format(
type_name=type(self).__name__,
self_name=self.name,
batch_shape=self.batch_shape,
event_shape=self.event_shape,
dtype=self.dtype.name))
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
([] if values is None else values) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape_to_vector(self, x, name):
"""Helper to `sample` which ensures input is 1D."""
x_static_val = tensor_util.constant_value(x)
if x_static_val is None:
prod = math_ops.reduce_prod(x)
else:
prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype())
ndims = x.get_shape().ndims # != sample_ndims
if ndims is None:
# Maybe expand_dims.
ndims = array_ops.rank(x)
expanded_shape = util.pick_vector(
math_ops.equal(ndims, 0),
np.array([1], dtype=np.int32), array_ops.shape(x))
x = array_ops.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand_dims.
if x_static_val is not None:
x = ops.convert_to_tensor(
np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()),
name=name)
else:
x = array_ops.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
return x, prod
def _set_sample_static_shape(self, x, sample_shape):
"""Helper to `sample`; sets static shape info."""
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(sample_shape))
ndims = x.get_shape().ndims
sample_ndims = sample_shape.ndims
batch_ndims = self.batch_shape.ndims
event_ndims = self.event_shape.ndims
# Infer rank(x).
if (ndims is None and
sample_ndims is not None and
batch_ndims is not None and
event_ndims is not None):
ndims = sample_ndims + batch_ndims + event_ndims
x.set_shape([None] * ndims)
# Infer sample shape.
if ndims is not None and sample_ndims is not None:
shape = sample_shape.concatenate([None]*(ndims - sample_ndims))
x.set_shape(x.get_shape().merge_with(shape))
# Infer event shape.
if ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape(
[None]*(ndims - event_ndims)).concatenate(self.event_shape)
x.set_shape(x.get_shape().merge_with(shape))
# Infer batch shape.
if batch_ndims is not None:
if ndims is not None:
if sample_ndims is None and event_ndims is not None:
sample_ndims = ndims - batch_ndims - event_ndims
elif event_ndims is None and sample_ndims is not None:
event_ndims = ndims - batch_ndims - sample_ndims
if sample_ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate(
self.batch_shape).concatenate([None]*event_ndims)
x.set_shape(x.get_shape().merge_with(shape))
return x
def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if static_shape.ndims is not None:
return static_shape.ndims == 0
shape = dynamic_shape_fn()
if (shape.get_shape().ndims is not None and
shape.get_shape().dims[0].value is not None):
# If the static_shape is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return shape.get_shape().as_list() == [0]
return math_ops.equal(array_ops.shape(shape)[0], 0)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/distribution.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core module for TensorFlow distribution objects and helpers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util import deprecation
# pylint: disable=wildcard-import,unused-import,g-import-not-at-top
with deprecation.silence():
from tensorflow.python.ops.distributions.bernoulli import Bernoulli
from tensorflow.python.ops.distributions.beta import Beta
from tensorflow.python.ops.distributions.categorical import Categorical
from tensorflow.python.ops.distributions.dirichlet import Dirichlet
from tensorflow.python.ops.distributions.dirichlet_multinomial import DirichletMultinomial
from tensorflow.python.ops.distributions.distribution import *
from tensorflow.python.ops.distributions.exponential import Exponential
from tensorflow.python.ops.distributions.gamma import Gamma
from tensorflow.python.ops.distributions.kullback_leibler import *
from tensorflow.python.ops.distributions.laplace import Laplace
from tensorflow.python.ops.distributions.multinomial import Multinomial
from tensorflow.python.ops.distributions.normal import Normal
from tensorflow.python.ops.distributions.student_t import StudentT
from tensorflow.python.ops.distributions.uniform import Uniform
# pylint: enable=wildcard-import,unused-import
del deprecation
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/distributions.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The DirichletMultinomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"DirichletMultinomial",
]
_dirichlet_multinomial_sample_note = """For each batch of counts,
`value = [n_0, ..., n_{K-1}]`, `P[value]` is the probability that after
sampling `self.total_count` draws from this Dirichlet-Multinomial distribution,
the number of draws falling in class `j` is `n_j`. Since this definition is
[exchangeable](https://en.wikipedia.org/wiki/Exchangeable_random_variables);
different sequences have the same counts so the probability includes a
combinatorial coefficient.
Note: `value` must be a non-negative tensor with dtype `self.dtype`, have no
fractional components, and such that
`tf.reduce_sum(value, -1) = self.total_count`. Its shape must be broadcastable
with `self.concentration` and `self.total_count`."""
@tf_export(v1=["distributions.DirichletMultinomial"])
class DirichletMultinomial(distribution.Distribution):
"""Dirichlet-Multinomial compound distribution.
The Dirichlet-Multinomial distribution is parameterized by a (batch of)
length-`K` `concentration` vectors (`K > 1`) and a `total_count` number of
trials, i.e., the number of trials per draw from the DirichletMultinomial. It
is defined over a (batch of) length-`K` vector `counts` such that
`tf.reduce_sum(counts, -1) = total_count`. The Dirichlet-Multinomial is
identically the Beta-Binomial distribution when `K = 2`.
#### Mathematical Details
The Dirichlet-Multinomial is a distribution over `K`-class counts, i.e., a
length-`K` vector of non-negative integer `counts = n = [n_0, ..., n_{K-1}]`.
The probability mass function (pmf) is,
```none
pmf(n; alpha, N) = Beta(alpha + n) / (prod_j n_j!) / Z
Z = Beta(alpha) / N!
```
where:
* `concentration = alpha = [alpha_0, ..., alpha_{K-1}]`, `alpha_j > 0`,
* `total_count = N`, `N` a positive integer,
* `N!` is `N` factorial, and,
* `Beta(x) = prod_j Gamma(x_j) / Gamma(sum_j x_j)` is the
[multivariate beta function](
https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function),
and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
Dirichlet-Multinomial is a [compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e., its
samples are generated as follows.
1. Choose class probabilities:
`probs = [p_0,...,p_{K-1}] ~ Dir(concentration)`
2. Draw integers:
`counts = [n_0,...,n_{K-1}] ~ Multinomial(total_count, probs)`
The last `concentration` dimension parametrizes a single Dirichlet-Multinomial
distribution. When calling distribution functions (e.g., `dist.prob(counts)`),
`concentration`, `total_count` and `counts` are broadcast to the same shape.
The last dimension of `counts` corresponds single Dirichlet-Multinomial
distributions.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
#### Pitfalls
The number of classes, `K`, must not exceed:
- the largest integer representable by `self.dtype`, i.e.,
`2**(mantissa_bits+1)` (IEE754),
- the maximum `Tensor` index, i.e., `2**31-1`.
In other words,
```python
K <= min(2**31-1, {
tf.float16: 2**11,
tf.float32: 2**24,
tf.float64: 2**53 }[param.dtype])
```
Note: This condition is validated only when `self.validate_args = True`.
#### Examples
```python
alpha = [1., 2., 3.]
n = 2.
dist = DirichletMultinomial(n, alpha)
```
Creates a 3-class distribution, with the 3rd class is most likely to be
drawn.
The distribution functions can be evaluated on counts.
```python
# counts same shape as alpha.
counts = [0., 0., 2.]
dist.prob(counts) # Shape []
# alpha will be broadcast to [[1., 2., 3.], [1., 2., 3.]] to match counts.
counts = [[1., 1., 0.], [1., 0., 1.]]
dist.prob(counts) # Shape [2]
# alpha will be broadcast to shape [5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7]
```
Creates a 2-batch of 3-class distributions.
```python
alpha = [[1., 2., 3.], [4., 5., 6.]] # Shape [2, 3]
n = [3., 3.]
dist = DirichletMultinomial(n, alpha)
# counts will be broadcast to [[2., 1., 0.], [2., 1., 0.]] to match alpha.
counts = [2., 1., 0.]
dist.prob(counts) # Shape [2]
```
"""
# TODO(b/27419586) Change docstring for dtype of concentration once int
# allowed.
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
total_count,
concentration,
validate_args=False,
allow_nan_stats=True,
name="DirichletMultinomial"):
"""Initialize a batch of DirichletMultinomial distributions.
Args:
total_count: Non-negative floating point tensor, whose dtype is the same
as `concentration`. The shape is broadcastable to `[N1,..., Nm]` with
`m >= 0`. Defines this as a batch of `N1 x ... x Nm` different
Dirichlet multinomial distributions. Its components should be equal to
integer values.
concentration: Positive floating point tensor, whose dtype is the
same as `n` with shape broadcastable to `[N1,..., Nm, K]` `m >= 0`.
Defines this as a batch of `N1 x ... x Nm` different `K` class Dirichlet
multinomial distributions.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[total_count, concentration]) as name:
# Broadcasting works because:
# * The broadcasting convention is to prepend dimensions of size [1], and
# we use the last dimension for the distribution, whereas
# the batch dimensions are the leading dimensions, which forces the
# distribution dimension to be defined explicitly (i.e. it cannot be
# created automatically by prepending). This forces enough explicitness.
# * All calls involving `counts` eventually require a broadcast between
# `counts` and concentration.
self._total_count = ops.convert_to_tensor(total_count, name="total_count")
if validate_args:
self._total_count = (
distribution_util.embed_check_nonnegative_integer_form(
self._total_count))
self._concentration = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration,
name="concentration"),
validate_args)
self._total_concentration = math_ops.reduce_sum(self._concentration, -1)
super(DirichletMultinomial, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._total_count,
self._concentration],
name=name)
@property
def total_count(self):
"""Number of trials used to construct a sample."""
return self._total_count
@property
def concentration(self):
"""Concentration parameter; expected prior counts for that coordinate."""
return self._concentration
@property
def total_concentration(self):
"""Sum of last dim of concentration parameter."""
return self._total_concentration
def _batch_shape_tensor(self):
return array_ops.shape(self.total_concentration)
def _batch_shape(self):
return self.total_concentration.get_shape()
def _event_shape_tensor(self):
return array_ops.shape(self.concentration)[-1:]
def _event_shape(self):
# Event shape depends only on total_concentration, not "n".
return self.concentration.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
k = self.event_shape_tensor()[0]
unnormalized_logits = array_ops.reshape(
math_ops.log(random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
dtype=self.dtype,
seed=seed)),
shape=[-1, k])
draws = random_ops.multinomial(
logits=unnormalized_logits,
num_samples=n_draws,
seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial"))
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), -2)
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
x = array_ops.reshape(x, final_shape)
return math_ops.cast(x, self.dtype)
@distribution_util.AppendDocstring(_dirichlet_multinomial_sample_note)
def _log_prob(self, counts):
counts = self._maybe_assert_valid_sample(counts)
ordered_prob = (
special_math_ops.lbeta(self.concentration + counts)
- special_math_ops.lbeta(self.concentration))
return ordered_prob + distribution_util.log_combinations(
self.total_count, counts)
@distribution_util.AppendDocstring(_dirichlet_multinomial_sample_note)
def _prob(self, counts):
return math_ops.exp(self._log_prob(counts))
def _mean(self):
return self.total_count * (self.concentration /
self.total_concentration[..., array_ops.newaxis])
@distribution_util.AppendDocstring(
"""The covariance for each batch member is defined as the following:
```none
Var(X_j) = n * alpha_j / alpha_0 * (1 - alpha_j / alpha_0) *
(n + alpha_0) / (1 + alpha_0)
```
where `concentration = alpha` and
`total_concentration = alpha_0 = sum_j alpha_j`.
The covariance between elements in a batch is defined as:
```none
Cov(X_i, X_j) = -n * alpha_i * alpha_j / alpha_0 ** 2 *
(n + alpha_0) / (1 + alpha_0)
```
""")
def _covariance(self):
x = self._variance_scale_term() * self._mean()
return array_ops.matrix_set_diag(
-math_ops.matmul(x[..., array_ops.newaxis],
x[..., array_ops.newaxis, :]), # outer prod
self._variance())
def _variance(self):
scale = self._variance_scale_term()
x = scale * self._mean()
return x * (self.total_count * scale - x)
def _variance_scale_term(self):
"""Helper to `_covariance` and `_variance` which computes a shared scale."""
# We must take care to expand back the last dim whenever we use the
# total_concentration.
c0 = self.total_concentration[..., array_ops.newaxis]
return math_ops.sqrt((1. + c0 / self.total_count) / (1. + c0))
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
concentration = distribution_util.embed_check_categorical_event_shape(
concentration)
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
], concentration)
def _maybe_assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return counts
counts = distribution_util.embed_check_nonnegative_integer_form(counts)
return control_flow_ops.with_dependencies([
check_ops.assert_equal(
self.total_count, math_ops.reduce_sum(counts, -1),
message="counts last-dimension must sum to `self.total_count`"),
], counts)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/dirichlet_multinomial.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Uniform distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["distributions.Uniform"])
class Uniform(distribution.Distribution):
"""Uniform distribution with `low` and `high` parameters.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; a, b) = I[a <= x < b] / Z
Z = b - a
```
where
- `low = a`,
- `high = b`,
- `Z` is the normalizing constant, and
- `I[predicate]` is the [indicator function](
https://en.wikipedia.org/wiki/Indicator_function) for `predicate`.
The parameters `low` and `high` must be shaped in a way that supports
broadcasting (e.g., `high - low` is a valid operation).
#### Examples
```python
# Without broadcasting:
u1 = Uniform(low=3.0, high=4.0) # a single uniform distribution [3, 4]
u2 = Uniform(low=[1.0, 2.0],
high=[3.0, 4.0]) # 2 distributions [1, 3], [2, 4]
u3 = Uniform(low=[[1.0, 2.0],
[3.0, 4.0]],
high=[[1.5, 2.5],
[3.5, 4.5]]) # 4 distributions
```
```python
# With broadcasting:
u1 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) # 3 distributions
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
low=0.,
high=1.,
validate_args=False,
allow_nan_stats=True,
name="Uniform"):
"""Initialize a batch of Uniform distributions.
Args:
low: Floating point tensor, lower boundary of the output interval. Must
have `low < high`.
high: Floating point tensor, upper boundary of the output interval. Must
have `low < high`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
InvalidArgumentError: if `low >= high` and `validate_args=False`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[low, high]) as name:
with ops.control_dependencies([
check_ops.assert_less(
low, high, message="uniform not defined when low >= high.")
] if validate_args else []):
self._low = array_ops.identity(low, name="low")
self._high = array_ops.identity(high, name="high")
check_ops.assert_same_float_dtype([self._low, self._high])
super(Uniform, self).__init__(
dtype=self._low.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._low,
self._high],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("low", "high"),
([ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2)))
@property
def low(self):
"""Lower boundary of the output interval."""
return self._low
@property
def high(self):
"""Upper boundary of the output interval."""
return self._high
def range(self, name="range"):
"""`high - low`."""
with self._name_scope(name):
return self.high - self.low
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.low),
array_ops.shape(self.high))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.low.get_shape(),
self.high.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
samples = random_ops.random_uniform(shape=shape,
dtype=self.dtype,
seed=seed)
return self.low + self.range() * samples
def _prob(self, x):
broadcasted_x = x * array_ops.ones(
self.batch_shape_tensor(), dtype=x.dtype)
return array_ops.where_v2(
math_ops.is_nan(broadcasted_x), broadcasted_x,
array_ops.where_v2(
math_ops.logical_or(broadcasted_x < self.low,
broadcasted_x >= self.high),
array_ops.zeros_like(broadcasted_x),
array_ops.ones_like(broadcasted_x) / self.range()))
def _cdf(self, x):
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(x), self.batch_shape_tensor())
zeros = array_ops.zeros(broadcast_shape, dtype=self.dtype)
ones = array_ops.ones(broadcast_shape, dtype=self.dtype)
broadcasted_x = x * ones
result_if_not_big = array_ops.where_v2(
x < self.low, zeros, (broadcasted_x - self.low) / self.range())
return array_ops.where_v2(x >= self.high, ones, result_if_not_big)
def _entropy(self):
return math_ops.log(self.range())
def _mean(self):
return (self.low + self.high) / 2.
def _variance(self):
return math_ops.square(self.range()) / 12.
def _stddev(self):
return self.range() / math.sqrt(12.)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/uniform.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Bernoulli distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["distributions.Bernoulli"])
class Bernoulli(distribution.Distribution):
"""Bernoulli distribution.
The Bernoulli distribution with `probs` parameter, i.e., the probability of a
`1` outcome (vs a `0` outcome).
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
logits=None,
probs=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="Bernoulli"):
"""Construct Bernoulli distributions.
Args:
logits: An N-D `Tensor` representing the log-odds of a `1` event. Each
entry in the `Tensor` parametrizes an independent Bernoulli distribution
where the probability of an event is sigmoid(logits). Only one of
`logits` or `probs` should be passed in.
probs: An N-D `Tensor` representing the probability of a `1`
event. Each entry in the `Tensor` parameterizes an independent
Bernoulli distribution. Only one of `logits` or `probs` should be passed
in.
dtype: The type of the event samples. Default: `int32`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If p and logits are passed, or if neither are passed.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits,
probs=probs,
validate_args=validate_args,
name=name)
super(Bernoulli, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._logits, self._probs],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return {"logits": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def logits(self):
"""Log-odds of a `1` outcome (vs `0`)."""
return self._logits
@property
def probs(self):
"""Probability of a `1` outcome (vs `0`)."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self._logits)
def _batch_shape(self):
return self._logits.get_shape()
def _event_shape_tensor(self):
return array_ops.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
new_shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
uniform = random_ops.random_uniform(
new_shape, seed=seed, dtype=self.probs.dtype)
sample = math_ops.less(uniform, self.probs)
return math_ops.cast(sample, self.dtype)
def _log_prob(self, event):
if self.validate_args:
event = distribution_util.embed_check_integer_casting_closed(
event, target_dtype=dtypes.bool)
# TODO(jaana): The current sigmoid_cross_entropy_with_logits has
# inconsistent behavior for logits = inf/-inf.
event = math_ops.cast(event, self.logits.dtype)
logits = self.logits
# sigmoid_cross_entropy_with_logits doesn't broadcast shape,
# so we do this here.
def _broadcast(logits, event):
return (array_ops.ones_like(event) * logits,
array_ops.ones_like(logits) * event)
if not (event.get_shape().is_fully_defined() and
logits.get_shape().is_fully_defined() and
event.get_shape() == logits.get_shape()):
logits, event = _broadcast(logits, event)
return -nn.sigmoid_cross_entropy_with_logits(labels=event, logits=logits)
def _entropy(self):
return (-self.logits * (math_ops.sigmoid(self.logits) - 1) +
nn.softplus(-self.logits))
def _mean(self):
return array_ops.identity(self.probs)
def _variance(self):
return self._mean() * (1. - self.probs)
def _mode(self):
"""Returns `1` if `prob > 0.5` and `0` otherwise."""
return math_ops.cast(self.probs > 0.5, self.dtype)
@kullback_leibler.RegisterKL(Bernoulli, Bernoulli)
def _kl_bernoulli_bernoulli(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.
Args:
a: instance of a Bernoulli distribution object.
b: instance of a Bernoulli distribution object.
name: (optional) Name to use for created operations.
default is "kl_bernoulli_bernoulli".
Returns:
Batchwise KL(a || b)
"""
with ops.name_scope(name, "kl_bernoulli_bernoulli",
values=[a.logits, b.logits]):
delta_probs0 = nn.softplus(-b.logits) - nn.softplus(-a.logits)
delta_probs1 = nn.softplus(b.logits) - nn.softplus(a.logits)
return (math_ops.sigmoid(a.logits) * delta_probs0
+ math_ops.sigmoid(-a.logits) * delta_probs1)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/bernoulli.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bijector unit-test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import uniform as uniform_lib
def assert_finite(array):
if not np.isfinite(array).all():
raise AssertionError("array was not all finite. %s" % array[:15])
def assert_strictly_increasing(array):
np.testing.assert_array_less(0., np.diff(array))
def assert_strictly_decreasing(array):
np.testing.assert_array_less(np.diff(array), 0.)
def assert_strictly_monotonic(array):
if array[0] < array[-1]:
assert_strictly_increasing(array)
else:
assert_strictly_decreasing(array)
def assert_scalar_congruency(bijector,
lower_x,
upper_x,
n=int(10e3),
rtol=0.01,
sess=None):
"""Assert `bijector`'s forward/inverse/inverse_log_det_jacobian are congruent.
We draw samples `X ~ U(lower_x, upper_x)`, then feed these through the
`bijector` in order to check that:
1. the forward is strictly monotonic.
2. the forward/inverse methods are inverses of each other.
3. the jacobian is the correct change of measure.
This can only be used for a Bijector mapping open subsets of the real line
to themselves. This is due to the fact that this test compares the `prob`
before/after transformation with the Lebesgue measure on the line.
Args:
bijector: Instance of Bijector
lower_x: Python scalar.
upper_x: Python scalar. Must have `lower_x < upper_x`, and both must be in
the domain of the `bijector`. The `bijector` should probably not produce
huge variation in values in the interval `(lower_x, upper_x)`, or else
the variance based check of the Jacobian will require small `rtol` or
huge `n`.
n: Number of samples to draw for the checks.
rtol: Positive number. Used for the Jacobian check.
sess: `tf.compat.v1.Session`. Defaults to the default session.
Raises:
AssertionError: If tests fail.
"""
# Checks and defaults.
if sess is None:
sess = ops.get_default_session()
# Should be monotonic over this interval
ten_x_pts = np.linspace(lower_x, upper_x, num=10).astype(np.float32)
if bijector.dtype is not None:
ten_x_pts = ten_x_pts.astype(bijector.dtype.as_numpy_dtype)
forward_on_10_pts = bijector.forward(ten_x_pts)
# Set the lower/upper limits in the range of the bijector.
lower_y, upper_y = sess.run(
[bijector.forward(lower_x), bijector.forward(upper_x)])
if upper_y < lower_y: # If bijector.forward is a decreasing function.
lower_y, upper_y = upper_y, lower_y
# Uniform samples from the domain, range.
uniform_x_samps = uniform_lib.Uniform(
low=lower_x, high=upper_x).sample(n, seed=0)
uniform_y_samps = uniform_lib.Uniform(
low=lower_y, high=upper_y).sample(n, seed=1)
# These compositions should be the identity.
inverse_forward_x = bijector.inverse(bijector.forward(uniform_x_samps))
forward_inverse_y = bijector.forward(bijector.inverse(uniform_y_samps))
# For a < b, and transformation y = y(x),
# (b - a) = \int_a^b dx = \int_{y(a)}^{y(b)} |dx/dy| dy
# "change_measure_dy_dx" below is a Monte Carlo approximation to the right
# hand side, which should then be close to the left, which is (b - a).
# We assume event_ndims=0 because we assume scalar -> scalar. The log_det
# methods will handle whether they expect event_ndims > 0.
dy_dx = math_ops.exp(bijector.inverse_log_det_jacobian(
uniform_y_samps, event_ndims=0))
# E[|dx/dy|] under Uniform[lower_y, upper_y]
# = \int_{y(a)}^{y(b)} |dx/dy| dP(u), where dP(u) is the uniform measure
expectation_of_dy_dx_under_uniform = math_ops.reduce_mean(dy_dx)
# dy = dP(u) * (upper_y - lower_y)
change_measure_dy_dx = (
(upper_y - lower_y) * expectation_of_dy_dx_under_uniform)
# We'll also check that dy_dx = 1 / dx_dy.
dx_dy = math_ops.exp(
bijector.forward_log_det_jacobian(
bijector.inverse(uniform_y_samps), event_ndims=0))
[
forward_on_10_pts_v,
dy_dx_v,
dx_dy_v,
change_measure_dy_dx_v,
uniform_x_samps_v,
uniform_y_samps_v,
inverse_forward_x_v,
forward_inverse_y_v,
] = sess.run([
forward_on_10_pts,
dy_dx,
dx_dy,
change_measure_dy_dx,
uniform_x_samps,
uniform_y_samps,
inverse_forward_x,
forward_inverse_y,
])
assert_strictly_monotonic(forward_on_10_pts_v)
# Composition of forward/inverse should be the identity.
np.testing.assert_allclose(
inverse_forward_x_v, uniform_x_samps_v, atol=1e-5, rtol=1e-3)
np.testing.assert_allclose(
forward_inverse_y_v, uniform_y_samps_v, atol=1e-5, rtol=1e-3)
# Change of measure should be correct.
np.testing.assert_allclose(
upper_x - lower_x, change_measure_dy_dx_v, atol=0, rtol=rtol)
# Inverse Jacobian should be equivalent to the reciprocal of the forward
# Jacobian.
np.testing.assert_allclose(
dy_dx_v, np.divide(1., dx_dy_v), atol=1e-5, rtol=1e-3)
def assert_bijective_and_finite(
bijector, x, y, event_ndims, atol=0, rtol=1e-5, sess=None):
"""Assert that forward/inverse (along with jacobians) are inverses and finite.
It is recommended to use x and y values that are very very close to the edge
of the Bijector's domain.
Args:
bijector: A Bijector instance.
x: np.array of values in the domain of bijector.forward.
y: np.array of values in the domain of bijector.inverse.
event_ndims: Integer describing the number of event dimensions this bijector
operates on.
atol: Absolute tolerance.
rtol: Relative tolerance.
sess: TensorFlow session. Defaults to the default session.
Raises:
AssertionError: If tests fail.
"""
sess = sess or ops.get_default_session()
# These are the incoming points, but people often create a crazy range of
# values for which these end up being bad, especially in 16bit.
assert_finite(x)
assert_finite(y)
f_x = bijector.forward(x)
g_y = bijector.inverse(y)
[
x_from_x,
y_from_y,
ildj_f_x,
fldj_x,
ildj_y,
fldj_g_y,
f_x_v,
g_y_v,
] = sess.run([
bijector.inverse(f_x),
bijector.forward(g_y),
bijector.inverse_log_det_jacobian(f_x, event_ndims=event_ndims),
bijector.forward_log_det_jacobian(x, event_ndims=event_ndims),
bijector.inverse_log_det_jacobian(y, event_ndims=event_ndims),
bijector.forward_log_det_jacobian(g_y, event_ndims=event_ndims),
f_x,
g_y,
])
assert_finite(x_from_x)
assert_finite(y_from_y)
assert_finite(ildj_f_x)
assert_finite(fldj_x)
assert_finite(ildj_y)
assert_finite(fldj_g_y)
assert_finite(f_x_v)
assert_finite(g_y_v)
np.testing.assert_allclose(x_from_x, x, atol=atol, rtol=rtol)
np.testing.assert_allclose(y_from_y, y, atol=atol, rtol=rtol)
np.testing.assert_allclose(-ildj_f_x, fldj_x, atol=atol, rtol=rtol)
np.testing.assert_allclose(-ildj_y, fldj_g_y, atol=atol, rtol=rtol)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/bijector_test_util.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registration and usage mechanisms for KL-divergences."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
_DIVERGENCES = {}
__all__ = [
"RegisterKL",
"kl_divergence",
]
def _registered_kl(type_a, type_b):
"""Get the KL function registered for classes a and b."""
hierarchy_a = tf_inspect.getmro(type_a)
hierarchy_b = tf_inspect.getmro(type_b)
dist_to_children = None
kl_fn = None
for mro_to_a, parent_a in enumerate(hierarchy_a):
for mro_to_b, parent_b in enumerate(hierarchy_b):
candidate_dist = mro_to_a + mro_to_b
candidate_kl_fn = _DIVERGENCES.get((parent_a, parent_b), None)
if not kl_fn or (candidate_kl_fn and candidate_dist < dist_to_children):
dist_to_children = candidate_dist
kl_fn = candidate_kl_fn
return kl_fn
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
@tf_export(v1=["distributions.kl_divergence"])
def kl_divergence(distribution_a, distribution_b,
allow_nan_stats=True, name=None):
"""Get the KL-divergence KL(distribution_a || distribution_b).
If there is no KL method registered specifically for `type(distribution_a)`
and `type(distribution_b)`, then the class hierarchies of these types are
searched.
If one KL method is registered between any pairs of classes in these two
parent hierarchies, it is used.
If more than one such registered method exists, the method whose registered
classes have the shortest sum MRO paths to the input types is used.
If more than one such shortest path exists, the first method
identified in the search is used (favoring a shorter MRO distance to
`type(distribution_a)`).
Args:
distribution_a: The first distribution.
distribution_b: The second distribution.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Returns:
A Tensor with the batchwise KL-divergence between `distribution_a`
and `distribution_b`.
Raises:
NotImplementedError: If no KL method is defined for distribution types
of `distribution_a` and `distribution_b`.
"""
kl_fn = _registered_kl(type(distribution_a), type(distribution_b))
if kl_fn is None:
raise NotImplementedError(
"No KL(distribution_a || distribution_b) registered for distribution_a "
"type %s and distribution_b type %s"
% (type(distribution_a).__name__, type(distribution_b).__name__))
with ops.name_scope("KullbackLeibler"):
kl_t = kl_fn(distribution_a, distribution_b, name=name)
if allow_nan_stats:
return kl_t
# Check KL for NaNs
kl_t = array_ops.identity(kl_t, name="kl")
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_not(
math_ops.reduce_any(math_ops.is_nan(kl_t))),
["KL calculation between %s and %s returned NaN values "
"(and was called with allow_nan_stats=False). Values:"
% (distribution_a.name, distribution_b.name), kl_t])]):
return array_ops.identity(kl_t, name="checked_kl")
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def cross_entropy(ref, other,
allow_nan_stats=True, name=None):
"""Computes the (Shannon) cross entropy.
Denote two distributions by `P` (`ref`) and `Q` (`other`). Assuming `P, Q`
are absolutely continuous with respect to one another and permit densities
`p(x) dr(x)` and `q(x) dr(x)`, (Shanon) cross entropy is defined as:
```none
H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
```
where `F` denotes the support of the random variable `X ~ P`.
Args:
ref: `tfd.Distribution` instance.
other: `tfd.Distribution` instance.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` prepended to names of ops created by this function.
Returns:
cross_entropy: `ref.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of (Shanon) cross entropy.
"""
with ops.name_scope(name, "cross_entropy"):
return ref.entropy() + kl_divergence(
ref, other, allow_nan_stats=allow_nan_stats)
@tf_export(v1=["distributions.RegisterKL"])
class RegisterKL(object):
"""Decorator to register a KL divergence implementation function.
Usage:
@distributions.RegisterKL(distributions.Normal, distributions.Normal)
def _kl_normal_mvn(norm_a, norm_b):
# Return KL(norm_a || norm_b)
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self, dist_cls_a, dist_cls_b):
"""Initialize the KL registrar.
Args:
dist_cls_a: the class of the first argument of the KL divergence.
dist_cls_b: the class of the second argument of the KL divergence.
"""
self._key = (dist_cls_a, dist_cls_b)
def __call__(self, kl_fn):
"""Perform the KL registration.
Args:
kl_fn: The function to use for the KL divergence.
Returns:
kl_fn
Raises:
TypeError: if kl_fn is not a callable.
ValueError: if a KL divergence function has already been registered for
the given argument classes.
"""
if not callable(kl_fn):
raise TypeError("kl_fn must be callable, received: %s" % kl_fn)
if self._key in _DIVERGENCES:
raise ValueError("KL(%s || %s) has already been registered to: %s"
% (self._key[0].__name__, self._key[1].__name__,
_DIVERGENCES[self._key]))
_DIVERGENCES[self._key] = kl_fn
return kl_fn
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/kullback_leibler.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Gamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Gamma",
"GammaWithSoftplusConcentrationRate",
]
@tf_export(v1=["distributions.Gamma"])
class Gamma(distribution.Distribution):
"""Gamma distribution.
The Gamma distribution is defined over positive real numbers using
parameters `concentration` (aka "alpha") and `rate` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta, x > 0) = x**(alpha - 1) exp(-x beta) / Z
Z = Gamma(alpha) beta**(-alpha)
```
where:
* `concentration = alpha`, `alpha > 0`,
* `rate = beta`, `beta > 0`,
* `Z` is the normalizing constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The cumulative density function (cdf) is,
```none
cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta x) / Gamma(alpha)
```
where `GammaInc` is the [lower incomplete Gamma function](
https://en.wikipedia.org/wiki/Incomplete_gamma_function).
The parameters can be intuited via their relationship to mean and stddev,
```none
concentration = alpha = (mean / stddev)**2
rate = beta = mean / stddev**2 = concentration / mean
```
Distribution parameters are automatically broadcast in all functions; see
examples for details.
Warning: The samples of this distribution are always non-negative. However,
the samples that are smaller than `np.finfo(dtype).tiny` are rounded
to this value, so it appears more often than it should.
This should only be noticeable when the `concentration` is very small, or the
`rate` is very large. See note in `tf.random.gamma` docstring.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
dist = tfd.Gamma(concentration=3.0, rate=2.0)
dist2 = tfd.Gamma(concentration=[3.0, 4.0], rate=[2.0, 3.0])
```
Compute the gradients of samples w.r.t. the parameters:
```python
concentration = tf.constant(3.0)
rate = tf.constant(2.0)
dist = tfd.Gamma(concentration, rate)
samples = dist.sample(5) # Shape [5]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, [concentration, rate])
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="Gamma"):
"""Construct Gamma with `concentration` and `rate` parameters.
The parameters `concentration` and `rate` must be shaped in a way that
supports broadcasting (e.g. `concentration + rate` is a valid operation).
Args:
concentration: Floating point tensor, the concentration params of the
distribution(s). Must contain only positive values.
rate: Floating point tensor, the inverse scale params of the
distribution(s). Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `concentration` and `rate` are different dtypes.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[concentration, rate]) as name:
with ops.control_dependencies([
check_ops.assert_positive(concentration),
check_ops.assert_positive(rate),
] if validate_args else []):
self._concentration = array_ops.identity(
concentration, name="concentration")
self._rate = array_ops.identity(rate, name="rate")
check_ops.assert_same_float_dtype(
[self._concentration, self._rate])
super(Gamma, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._rate],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("concentration", "rate"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
@property
def rate(self):
"""Rate parameter."""
return self._rate
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.concentration),
array_ops.shape(self.rate))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.concentration.get_shape(),
self.rate.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
@distribution_util.AppendDocstring(
"""Note: See `tf.random.gamma` docstring for sampling details and
caveats.""")
def _sample_n(self, n, seed=None):
return random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
beta=self.rate,
dtype=self.dtype,
seed=seed)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _cdf(self, x):
x = self._maybe_assert_valid_sample(x)
# Note that igamma returns the regularized incomplete gamma function,
# which is what we want for the CDF.
return math_ops.igamma(self.concentration, self.rate * x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return math_ops.xlogy(self.concentration - 1., x) - self.rate * x
def _log_normalization(self):
return (math_ops.lgamma(self.concentration)
- self.concentration * math_ops.log(self.rate))
def _entropy(self):
return (self.concentration
- math_ops.log(self.rate)
+ math_ops.lgamma(self.concentration)
+ ((1. - self.concentration) *
math_ops.digamma(self.concentration)))
def _mean(self):
return self.concentration / self.rate
def _variance(self):
return self.concentration / math_ops.square(self.rate)
def _stddev(self):
return math_ops.sqrt(self.concentration) / self.rate
@distribution_util.AppendDocstring(
"""The mode of a gamma distribution is `(shape - 1) / rate` when
`shape > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is `False`,
an exception will be raised rather than returning `NaN`.""")
def _mode(self):
mode = (self.concentration - 1.) / self.rate
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where_v2(self.concentration > 1., mode, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype),
self.concentration,
message="mode not defined when any concentration <= 1"),
], mode)
def _maybe_assert_valid_sample(self, x):
check_ops.assert_same_float_dtype(tensors=[x], dtype=self.dtype)
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x),
], x)
class GammaWithSoftplusConcentrationRate(Gamma):
"""`Gamma` with softplus of `concentration` and `rate`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Gamma(tf.nn.softplus(concentration), "
"tf.nn.softplus(rate))` instead.",
warn_once=True)
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="GammaWithSoftplusConcentrationRate"):
parameters = dict(locals())
with ops.name_scope(name, values=[concentration, rate]) as name:
super(GammaWithSoftplusConcentrationRate, self).__init__(
concentration=nn.softplus(concentration,
name="softplus_concentration"),
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Gamma, Gamma)
def _kl_gamma_gamma(g0, g1, name=None):
"""Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.
Args:
g0: instance of a Gamma distribution object.
g1: instance of a Gamma distribution object.
name: (optional) Name to use for created operations.
Default is "kl_gamma_gamma".
Returns:
kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
"""
with ops.name_scope(name, "kl_gamma_gamma", values=[
g0.concentration, g0.rate, g1.concentration, g1.rate]):
# Result from:
# http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
# For derivation see:
# http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions pylint: disable=line-too-long
return (((g0.concentration - g1.concentration)
* math_ops.digamma(g0.concentration))
+ math_ops.lgamma(g1.concentration)
- math_ops.lgamma(g0.concentration)
+ g1.concentration * math_ops.log(g0.rate)
- g1.concentration * math_ops.log(g1.rate)
+ g0.concentration * (g1.rate / g0.rate - 1.))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/ops/distributions/gamma.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ScopedAnnotation allows the profiler to track python events.
Usage:
with scoped_annotation.ScopedAnnotation('name'):
...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.pywrap_tensorflow import PythonScopedAnnotation
class ScopedAnnotation(object):
"""Context manager that generates an annotation for the profiler."""
def __init__(self, name, **kwargs):
if PythonScopedAnnotation.IsEnabled():
if kwargs:
name += '#' + ','.join(
[key + '=' + str(value) for key, value in kwargs.iteritems()]) + '#'
self._scoped_annotation = PythonScopedAnnotation(name)
else:
self._scoped_annotation = None
def __enter__(self):
if self._scoped_annotation:
self._scoped_annotation.Enter()
def __exit__(self, exc_type, exc_val, exc_tb):
if self._scoped_annotation:
self._scoped_annotation.Exit()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/scoped_annotation.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging tensorflow::tfprof::OpLogProto.
OpLogProto is used to add extra model information for offline analysis.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import six
from tensorflow.core.profiler import tfprof_log_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.profiler.internal import flops_registry # pylint: disable=unused-import
from tensorflow.python.util.tf_export import tf_export
TRAINABLE_VARIABLES = '_trainable_variables'
REGISTERED_FLOP_STATS = 'flops'
def _fill_missing_graph_shape(graph, run_meta):
"""Fill Tensor shapes in 'graph' with run time shape from 'run_meta'."""
for dev_stat in run_meta.step_stats.dev_stats:
for node_stat in dev_stat.node_stats:
if not node_stat.output:
continue
try:
op = graph.get_operation_by_name(node_stat.node_name)
except KeyError as e:
# Graph doesn't contains the node_stat, usually RecvTensor.
continue
if len(node_stat.output) != len(op.outputs):
# For example, conditional op has only 1 output at run time.
continue
for (i, node_stat_out) in enumerate(node_stat.output):
if op.outputs[i].get_shape().is_fully_defined():
continue
node_stat_dims = node_stat_out.tensor_description.shape.dim
node_stat_shape = tensor_shape.TensorShape(
[d.size for d in node_stat_dims])
try:
op.outputs[i].set_shape(op.outputs[i].get_shape().merge_with(
node_stat_shape))
except ValueError as e:
sys.stderr.write('Node %s incompatible shapes: %s.\n' %
(node_stat.node_name, e))
return graph
def _str_id(s, str_to_id):
"""Maps string to id."""
num = str_to_id.get(s, None)
if num is None:
num = len(str_to_id)
str_to_id[s] = num
return num
def _get_logged_ops(graph, run_meta=None, add_trace=True,
add_trainable_var=True):
"""Extract trainable model parameters and FLOPs for ops from a Graph.
Args:
graph: tf.Graph.
run_meta: RunMetadata proto used to complete shape information.
add_trace: Whether to add op trace information.
add_trainable_var: Whether to assign tf.compat.v1.trainable_variables() op
type '_trainable_variables'.
Returns:
logged_ops: dict mapping from op_name to OpLogEntry.
string_to_id: dict mapping from string to id.
"""
if run_meta:
graph = _fill_missing_graph_shape(graph, run_meta)
op_missing_shape = 0
logged_ops = {}
string_to_id = {}
string_to_id['none'] = len(string_to_id)
# TODO(xpan): Work with Profiler more efficiently.
for op in graph.get_operations():
try:
stats = ops.get_stats_for_node_def(
graph, op.node_def, REGISTERED_FLOP_STATS)
except ValueError:
# Catch Exception When shape is incomplete. Skip it.
op_missing_shape += 1
stats = None
entry = tfprof_log_pb2.OpLogEntry()
entry.name = op.name
add_entry = False
if stats and stats.value:
entry.float_ops = int(stats.value)
add_entry = True
if add_trace:
for tb in op.traceback_with_start_lines:
trace = entry.code_def.traces.add()
trace.file_id = _str_id(tb[0], string_to_id) if tb[0] else 0
trace.lineno = tb[1] if tb[1] else -1
trace.function_id = _str_id(tb[2], string_to_id) if tb[2] else 0
trace.line_id = _str_id(tb[3], string_to_id) if tb[3] else 0
trace.func_start_line = tb[4] if tb[4] else -1
add_entry = True
if add_entry:
logged_ops[entry.name] = entry
if add_trainable_var:
for v in graph.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES):
if v.op.name not in logged_ops:
entry = tfprof_log_pb2.OpLogEntry()
entry.name = v.op.name
entry.types.append(TRAINABLE_VARIABLES)
logged_ops[entry.name] = entry
else:
logged_ops[v.op.name].types.append(TRAINABLE_VARIABLES)
if op_missing_shape > 0 and not run_meta:
sys.stderr.write('%d ops no flops stats due to incomplete shapes.\n' %
op_missing_shape)
return logged_ops, string_to_id
def merge_default_with_oplog(graph, op_log=None, run_meta=None,
add_trace=True, add_trainable_var=True):
"""Merge the tfprof default extra info with caller's op_log.
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
op_log: OpLogProto proto.
run_meta: RunMetadata proto used to complete shape information.
add_trace: Whether to add op trace information.
add_trainable_var: Whether to assign tf.compat.v1.trainable_variables() op
type '_trainable_variables'.
Returns:
tmp_op_log: Merged OpLogProto proto.
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
tmp_op_log = tfprof_log_pb2.OpLogProto()
if not graph:
return tmp_op_log
logged_ops, string_to_id = _get_logged_ops(
graph, run_meta, add_trace=add_trace, add_trainable_var=add_trainable_var)
if not op_log:
tmp_op_log.log_entries.extend(logged_ops.values())
else:
all_ops = {}
for entry in op_log.log_entries:
all_ops[entry.name] = entry
for op_name, entry in six.iteritems(logged_ops):
if op_name in all_ops:
all_ops[op_name].types.extend(entry.types)
if entry.float_ops > 0 and all_ops[op_name].float_ops == 0:
all_ops[op_name].float_ops = entry.float_ops
if entry.code_def.traces and not all_ops[op_name].code_def.traces:
all_ops[op_name].code_def.MergeFrom(entry.code_def)
else:
all_ops[op_name] = entry
tmp_op_log.log_entries.extend(all_ops.values())
for s, i in six.iteritems(string_to_id):
tmp_op_log.id_to_string[i] = s
return tmp_op_log
@tf_export(v1=['profiler.write_op_log'])
def write_op_log(graph, log_dir, op_log=None, run_meta=None, add_trace=True):
"""Log provided 'op_log', and add additional model information below.
The API also assigns ops in tf.compat.v1.trainable_variables() an op type
called '_trainable_variables'.
The API also logs 'flops' statistics for ops with op.RegisterStatistics()
defined. flops calculation depends on Tensor shapes defined in 'graph',
which might not be complete. 'run_meta', if provided, completes the shape
information with best effort.
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
log_dir: directory to write the log file.
op_log: (Optional) OpLogProto proto to be written. If not provided, an new
one is created.
run_meta: (Optional) RunMetadata proto that helps flops computation using
run time shape information.
add_trace: Whether to add python code trace information.
Used to support "code" view.
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
op_log = merge_default_with_oplog(graph, op_log, run_meta, add_trace)
with gfile.Open(os.path.join(log_dir, 'tfprof_log'), 'w') as log:
log.write(op_log.SerializeToString())
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/tfprof_logger.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Context that captures profile and performs profiling/dumping.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import random
import sys
import threading
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as print_mdl
from tensorflow.python.client import session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.util import compat
WARMUP_STEPS = 10
MAX_TRACED_STEPS = 100
def _profiled_init(self, target='', graph=None, config=None):
"""Overwrites the session.__init__."""
self._profiler_init_internal(target, graph, config) # pylint: disable=protected-access
def _profiled_run(self,
fetches,
feed_dict=None,
options=None,
run_metadata=None):
"""Overwrites the session.run()."""
# pylint: disable=protected-access
# Count the session steps.
with self.profile_context._new_step() as state:
step, locked = state
# Fast path if no need for profiling.
if locked and not self.profile_context._is_fast_path(step):
# Maybe trace this step.
if self.profile_context._should_trace(step, self.graph, fetches):
if self.profile_context._debug:
sys.stderr.write('debug: tracing step: %d\n' % step)
# Enable tracing, perform auto profiling or auto dump.
if not run_metadata:
run_metadata = config_pb2.RunMetadata()
if not options:
options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
old_trace_level = options.trace_level
else:
old_trace_level = options.trace_level
options.trace_level = config_pb2.RunOptions.FULL_TRACE
ret = self._profiler_run_internal(
fetches, feed_dict, options, run_metadata)
if self.profile_context._debug:
self.profile_context._dump_file(run_metadata, 'run_meta_%d' % step)
self.profile_context.profiler._graph = self.graph
self.profile_context.profiler.add_step(step, run_metadata)
options.trace_level = old_trace_level
else:
ret = self._profiler_run_internal(fetches, feed_dict, options)
# Maybe dump profile.
self.profile_context._maybe_dump(step)
# Maybe profile:
to_profiles = self.profile_context._profile_candidates()
for to_prof in to_profiles:
cmd, opts, _ = to_prof
saved_views = self.profile_context._views.setdefault(cmd, {})
if self.profile_context._debug:
sys.stderr.write('debug: profiling %s step: %d\n' % (cmd, step))
if cmd == 'graph':
saved_views[step] = self.profile_context.profiler.profile_graph(opts)
elif cmd == 'scope':
saved_views[step] = self.profile_context.profiler.profile_name_scope(
opts)
elif cmd == 'op':
saved_views[step] = self.profile_context.profiler.profile_operations(
opts)
elif cmd == 'code':
saved_views[step] = self.profile_context.profiler.profile_python(opts)
else:
raise ValueError('Unknown cmd: %s\n' % cmd)
return ret
# Fast no lock path.
return self._profiler_run_internal(
fetches, feed_dict, options, run_metadata)
# pylint: enable=protected-access
class ProfileContext(object):
"""A Context that captures RunMetadata and performs profiling.
```python
# Trace steps 100~200, profile at [150, 200] and dump profile at 200.
with tf.contrib.tfprof.ProfileContext('/tmp/train_dir',
trace_steps=range(100, 200, 3),
dump_steps=[200]) as pctx:
opts = tf.profiler.ProfileOptionBuilder.time_and_memory()
pctx.add_auto_profiling('op', opts, [150, 200])
train_loop().
# Tracing only.
with tf.contrib.tfprof.ProfileContext('/tmp/train_dir') as pctx:
# Run train/eval loop for at least few hundred steps. Profiles will be
# dumped to train_dir. Use web UI or command line to do profiling.
train_loop().
# When session object is available, do explicit trace, profile and dump.
with tf.contrib.tfprof.ProfileContext('/tmp/train_dir',
trace_steps=[],
dump_steps=[]) as pctx:
opts = tf.profiler.ProfileOptionBuilder.time_and_memory()
pctx.trace_next_step()
_ = session.run(train_op)
pctx.profiler.profile_operations(options=opts)
```
Args:
profile_dir: Directory to store profiles.
trace_steps: A list of session run steps to trace. If None, use
pre-defined steps.
dump_steps: A list of steps to dump the profile to `profile_dir`. If None,
use pre-defined steps.
enabled: If false, everything is disabled with minimal overhead. It allows
user to only enable profiling when needed.
debug: If true, also dumps the raw trace RunMetadata text file to
profile_dir. And print debugging message. Useful for bug report.
"""
def __init__(self,
profile_dir,
trace_steps=None,
dump_steps=None,
enabled=True,
debug=False):
self._enabled = enabled
if not self._enabled:
return
self._debug = debug
if not profile_dir:
raise ValueError('Must have a directory for profile.\n')
self._profiler_dir = profile_dir
if trace_steps is None:
self._trace_steps = set()
self._auto_tracing = True
else:
if len(trace_steps) > MAX_TRACED_STEPS:
raise ValueError('Only support tracing up to 100 steps.\n')
self._trace_steps = set(trace_steps[:])
self._auto_tracing = False
if dump_steps is None:
self._dump_steps = set([MAX_TRACED_STEPS])
else:
self._dump_steps = set(dump_steps[:])
self._rng = random.Random(111)
self._fetched = set()
self._slow_path_steps = self._dump_steps | self._trace_steps
self._trace_next_step = False
self._dump_next_step = False
self._step = 0
self._traced_steps = 0
self._auto_profiles = []
self._profiler = None
self._views = {}
self._lock = threading.Lock()
def get_profiles(self, cmd):
"""Returns profiling results for each step at which `cmd` was run.
Args:
cmd: string, profiling command used in an `add_auto_profiling` call.
Returns:
dict[int: (MultiGraphNodeProto | GraphNodeProto)]. Keys are steps at which
the profiling command was run. Values are the outputs of profiling.
For "code" and "op" commands this will be a `MultiGraphNodeProto`, for
"scope" and "graph" commands this will be a `GraphNodeProto.
Raises:
ValueError: if `cmd` was never run (either because no session.run call was
made or because there was no `add_auto_profiling` call with the specified
`cmd`.
"""
if cmd not in self._views:
raise ValueError('No autoprofiler for command: {}, was run'.format(cmd))
return self._views[cmd]
def add_auto_profiling(self, cmd, options, profile_steps):
"""Traces and profiles at some session run steps.
Args:
cmd: The profiling commands. (i.e. scope, op, python, graph)
options: The profiling options.
profile_steps: A list/set of integers. The profiling command and options
will be run automatically at these integer steps. Each step is
a session.run.
"""
if not self._enabled:
return
self._auto_profiles.append((cmd, options, profile_steps[:]))
self._slow_path_steps |= set(profile_steps)
self._trace_steps |= set(profile_steps)
@property
def profiler(self):
"""Returns the current profiler object."""
if not self._enabled:
return None
if not self._profiler:
self._profiler = model_analyzer.Profiler(ops.get_default_graph())
return self._profiler
def trace_next_step(self):
"""Enables tracing and adds traces to profiler at next step."""
if not self._enabled:
return
self._trace_next_step = True
self._slow_path_steps.add(self._step)
def dump_next_step(self):
"""Enable tracing and dump profiles at next step."""
if not self._enabled:
return
self._dump_next_step = True
self._slow_path_steps.add(self._step)
def _is_fast_path(self, step):
if step in self._slow_path_steps:
return False
# When user doesn't set the tracing steps explicitly, auto decide it.
if (self._auto_tracing and step > WARMUP_STEPS and
self._traced_steps <= MAX_TRACED_STEPS):
return False
return True
def _should_trace(self, step, graph, fetches):
"""Whether should do tracing at current step."""
if self._traced_steps > MAX_TRACED_STEPS:
return False
# Check user-set tracing steps.
if step in self._trace_steps or self._trace_next_step:
self._traced_steps += 1
return True
# If no user-set tracing steps set and passes warm up steps, auto trace.
if self._auto_tracing and step > WARMUP_STEPS:
# If the fetches have not been seen before, trace it.
with graph.as_default():
fetch_names = [f.name for f in
session._FetchMapper.for_fetch(fetches).unique_fetches()] # pylint: disable=protected-access
fetch_name = '-'.join(sorted(fetch_names))
if self._debug:
sys.stderr.write('debug: trace fetches: %s\n' % fetch_name)
if fetch_name not in self._fetched:
self._fetched.add(fetch_name)
self._traced_steps += 1
return True
# If the trace coverage is low, does some random tracing.
if (self.profiler._coverage < 0.5 and step < MAX_TRACED_STEPS and # pylint: disable=protected-access
self._rng.randint(0, 10) < 2):
self._traced_steps += 1
return True
return False
def _maybe_dump(self, step):
"""Maybe dump the profile file."""
if not (step in self._dump_steps or self._dump_next_step):
return
if self._debug:
sys.stderr.write('debug: dumping file at step: %d\n' % step)
if not gfile.Exists(self._profiler_dir):
gfile.MakeDirs(self._profiler_dir)
filename = os.path.join(compat.as_bytes(self._profiler_dir),
compat.as_bytes('profile_%d' % step))
self.profiler._write_profile(filename) # pylint: disable=protected-access
def _dump_file(self, pb, basename):
if not gfile.Exists(self._profiler_dir):
gfile.MakeDirs(self._profiler_dir)
with gfile.Open(os.path.join(self._profiler_dir, basename), 'w') as f:
f.write('%s' % pb)
@contextlib.contextmanager
def _new_step(self):
acquired = self._lock.acquire(False)
yield (self._step, acquired)
self._step += 1
self._trace_next_step = False
self._dump_next_step = False
if acquired:
self._lock.release()
def _profile_candidates(self):
to_profile = []
for auto_prof in self._auto_profiles:
_, _, prof_steps = auto_prof
if self._step in prof_steps:
to_profile.append(auto_prof)
return to_profile
def __enter__(self):
if self._enabled:
self.old_run = getattr(session.BaseSession, 'run', None)
self.old_init = getattr(session.BaseSession, '__init__', None)
if not self.old_run:
raise errors.InternalError(None, None, 'BaseSession misses run method.')
elif not self.old_init:
raise errors.InternalError(None, None,
'BaseSession misses __init__ method.')
elif getattr(session.BaseSession, '_profiler_run_internal', None):
raise errors.InternalError(None, None,
'Already in context or context not cleaned.')
elif getattr(session.BaseSession, '_profiler_init_internal', None):
raise errors.InternalError(None, None,
'Already in context or context not cleaned.')
else:
setattr(session.BaseSession, 'run', _profiled_run)
setattr(session.BaseSession, '__init__', _profiled_init)
setattr(session.BaseSession, '_profiler_run_internal', self.old_run)
setattr(session.BaseSession, '_profiler_init_internal', self.old_init)
setattr(session.BaseSession, 'profile_context', self)
return self
else:
return self
def __exit__(self, exec_type, exec_value, exec_tb):
if not self._enabled:
return
print_mdl.DeleteProfiler()
setattr(session.BaseSession, 'run', self.old_run)
setattr(session.BaseSession, '__init__', self.old_init)
setattr(session.BaseSession, '_profiler_run_internal', None)
setattr(session.BaseSession, '_profiler_init_internal', None)
setattr(session.BaseSession, 'profile_context', None)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/profile_context.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pprof_profiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
from proto import profile_pb2
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.profiler import pprof_profiler
class PprofProfilerTest(test.TestCase):
def testDataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
graph.get_operations.return_value = []
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEquals(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEquals(0, len(profile_files))
def testRunMetadataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [('a/b/file1', 10, 'some_var')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEquals(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEquals(0, len(profile_files))
def testValidProfile(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
node1 = step_stats_pb2.NodeExecStats(
node_name='Add/123',
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = 'deviceA'
device1.node_stats.extend([node1])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [
('a/b/file1', 10, 'apply_op', 'abc'), ('a/c/file2', 12, 'my_op', 'def')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
expected_proto = """sample_type {
type: 5
unit: 5
}
sample_type {
type: 6
unit: 7
}
sample_type {
type: 8
unit: 7
}
sample {
value: 1
value: 4
value: 2
label {
key: 1
str: 2
}
label {
key: 3
str: 4
}
}
string_table: ""
string_table: "node_name"
string_table: "Add/123"
string_table: "op_type"
string_table: "add"
string_table: "count"
string_table: "all_time"
string_table: "nanoseconds"
string_table: "op_time"
string_table: "Device 1 of 1: deviceA"
comment: 9
"""
# Test with protos
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEquals(1, len(profiles))
self.assertTrue('deviceA' in profiles)
self.assertEquals(expected_proto, str(profiles['deviceA']))
# Test with files
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEquals(1, len(profile_files))
with gzip.open(profile_files[0]) as profile_file:
profile_contents = profile_file.read()
profile = profile_pb2.Profile()
profile.ParseFromString(profile_contents)
self.assertEquals(expected_proto, str(profile))
@test_util.run_v1_only('b/120545219')
def testProfileWithWhileLoop(self):
options = config_pb2.RunOptions()
options.trace_level = config_pb2.RunOptions.FULL_TRACE
run_metadata = config_pb2.RunMetadata()
num_iters = 5
with self.cached_session() as sess:
i = constant_op.constant(0)
c = lambda i: math_ops.less(i, num_iters)
b = lambda i: math_ops.add(i, 1)
r = control_flow_ops.while_loop(c, b, [i])
sess.run(r, options=options, run_metadata=run_metadata)
profiles = pprof_profiler.get_profiles(sess.graph, run_metadata)
self.assertEquals(1, len(profiles))
profile = next(iter(profiles.values()))
add_samples = [] # Samples for the while/Add node
for sample in profile.sample:
if profile.string_table[sample.label[0].str] == 'while/Add':
add_samples.append(sample)
# Values for same nodes are aggregated.
self.assertEquals(1, len(add_samples))
# Value of "count" should be equal to number of iterations.
self.assertEquals(num_iters, add_samples[0].value[0])
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/pprof_profiler_test.py
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/__init__.py
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.profiler import option_builder
# pylint: disable=g-bad-import-order
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.profiler.internal import model_analyzer_testlib as lib
builder = option_builder.ProfileOptionBuilder
class ProfilerTest(test.TestCase):
@test_util.run_deprecated_v1
def testProfileBasic(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.select(['params', 'float_ops', 'micros', 'bytes',
'device', 'op_types', 'occurrence']).build())
# Test the output without run_meta.
sess = session.Session()
r = lib.BuildFullModel()
sess.run(variables.global_variables_initializer())
# Test the output with run_meta.
run_meta = config_pb2.RunMetadata()
_ = sess.run(r,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
profiler = model_analyzer.Profiler(sess.graph)
profiler.add_step(1, run_meta)
profiler.profile_graph(opts)
with gfile.Open(outfile, 'r') as f:
profiler_str = f.read()
model_analyzer.profile(
sess.graph, cmd='graph', run_meta=run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
pma_str = f.read()
self.assertEqual(pma_str, profiler_str)
profiler.profile_name_scope(opts)
with gfile.Open(outfile, 'r') as f:
profiler_str = f.read()
model_analyzer.profile(
sess.graph, cmd='scope', run_meta=run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
pma_str = f.read()
self.assertEqual(pma_str, profiler_str)
profiler.profile_python(opts)
with gfile.Open(outfile, 'r') as f:
profiler_str = f.read()
model_analyzer.profile(
sess.graph, cmd='code', run_meta=run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
pma_str = f.read()
self.assertEqual(pma_str, profiler_str)
profiler.profile_operations(opts)
with gfile.Open(outfile, 'r') as f:
profiler_str = f.read()
model_analyzer.profile(
sess.graph, cmd='op', run_meta=run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
pma_str = f.read()
self.assertEqual(pma_str, profiler_str)
model_analyzer.profile(
sess.graph, cmd='scope', run_meta=run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
pma_str = f.read()
self.assertNotEqual(pma_str, profiler_str)
def testMultiStepProfile(self):
ops.reset_default_graph()
opts = builder.time_and_memory(min_bytes=0)
with session.Session() as sess:
r1, r2, r3 = lib.BuildSplitableModel()
sess.run(variables.global_variables_initializer())
profiler = model_analyzer.Profiler(sess.graph)
pb0 = profiler.profile_name_scope(opts)
run_meta = config_pb2.RunMetadata()
_ = sess.run(r1,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
profiler.add_step(1, run_meta)
pb1 = profiler.profile_name_scope(opts)
self.assertNotEqual(lib.SearchTFProfNode(pb1, 'DW'), None)
self.assertEqual(lib.SearchTFProfNode(pb1, 'DW2'), None)
self.assertEqual(lib.SearchTFProfNode(pb1, 'add'), None)
run_meta2 = config_pb2.RunMetadata()
_ = sess.run(r2,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta2)
profiler.add_step(2, run_meta2)
pb2 = profiler.profile_name_scope(opts)
self.assertNotEqual(lib.SearchTFProfNode(pb2, 'DW'), None)
self.assertNotEqual(lib.SearchTFProfNode(pb2, 'DW2'), None)
self.assertEqual(lib.SearchTFProfNode(pb2, 'add'), None)
run_meta3 = config_pb2.RunMetadata()
_ = sess.run(r3,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta3)
profiler.add_step(3, run_meta3)
pb3 = profiler.profile_name_scope(opts)
self.assertNotEqual(lib.SearchTFProfNode(pb3, 'DW'), None)
self.assertNotEqual(lib.SearchTFProfNode(pb3, 'DW2'), None)
self.assertNotEqual(lib.SearchTFProfNode(pb3, 'add'), None)
self.assertEqual(lib.SearchTFProfNode(pb0, 'Conv2D'), None)
self.assertGreater(lib.SearchTFProfNode(pb1, 'Conv2D').exec_micros, 0)
self.assertEqual(lib.SearchTFProfNode(pb1, 'Conv2D_1'), None)
self.assertGreater(lib.SearchTFProfNode(pb2, 'Conv2D_1').exec_micros, 0)
self.assertEqual(lib.SearchTFProfNode(pb2, 'add'), None)
self.assertGreater(lib.SearchTFProfNode(pb3, 'add').exec_micros, 0)
advice_pb = profiler.advise(model_analyzer.ALL_ADVICE)
self.assertTrue('AcceleratorUtilizationChecker' in advice_pb.checkers)
self.assertTrue('ExpensiveOperationChecker' in advice_pb.checkers)
self.assertTrue('OperationChecker' in advice_pb.checkers)
checker = advice_pb.checkers['AcceleratorUtilizationChecker']
if test.is_gpu_available():
self.assertGreater(len(checker.reports), 0)
else:
self.assertEqual(len(checker.reports), 0)
checker = advice_pb.checkers['ExpensiveOperationChecker']
self.assertGreater(len(checker.reports), 0)
@test_util.run_deprecated_v1
def testMultipleProfilePerStep(self):
ops.reset_default_graph()
opts = (builder(builder.trainable_variables_parameter())
.with_empty_output()
.with_accounted_types(['.*'])
.select(['micros', 'bytes', 'peak_bytes',
'residual_bytes', 'output_bytes']).build())
r = lib.BuildSmallModel()
sess = session.Session()
profiler = model_analyzer.Profiler(sess.graph)
init_var_run_meta = config_pb2.RunMetadata()
sess.run(variables.global_variables_initializer(),
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=init_var_run_meta)
train_run_meta = config_pb2.RunMetadata()
sess.run(r,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=train_run_meta)
profiler.add_step(0, train_run_meta)
ret1 = profiler.profile_name_scope(opts)
n1 = lib.SearchTFProfNode(
ret1, 'DW/Initializer/random_normal/RandomStandardNormal')
# Without the var initialization run_meta, it doesn't have the
# information of var_initialization.
self.assertEqual(n1.exec_micros, 0)
self.assertEqual(n1.requested_bytes, 0)
self.assertEqual(n1.peak_bytes, 0)
self.assertEqual(n1.residual_bytes, 0)
profiler.add_step(0, init_var_run_meta)
ret2 = profiler.profile_name_scope(opts)
n2 = lib.SearchTFProfNode(
ret2, 'DW/Initializer/random_normal/RandomStandardNormal')
# After adding the var initialization run_meta.
self.assertGreater(n2.exec_micros, 0)
self.assertGreater(n2.requested_bytes, 0)
self.assertGreater(n2.peak_bytes, 0)
self.assertGreater(n2.residual_bytes, 0)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/profiler_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for building profiler options."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.profiler import tfprof_logger
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['profiler.ProfileOptionBuilder'])
class ProfileOptionBuilder(object):
# pylint: disable=line-too-long
"""Option Builder for Profiling API.
For tutorial on the options, see
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/options.md
```python
# Users can use pre-built options:
opts = (
tf.profiler.ProfileOptionBuilder.trainable_variables_parameter())
# Or, build your own options:
opts = (tf.compat.v1.profiler.ProfileOptionBuilder()
.with_max_depth(10)
.with_min_micros(1000)
.select(['accelerator_micros'])
.with_stdout_output()
.build()
# Or customize the pre-built options:
opts = (tf.compat.v1.profiler.ProfileOptionBuilder(
tf.profiler.ProfileOptionBuilder.time_and_memory())
.with_displaying_options(show_name_regexes=['.*rnn.*'])
.build())
# Finally, profiling with the options:
_ = tf.compat.v1.profiler.profile(tf.compat.v1.get_default_graph(),
run_meta=run_meta,
cmd='scope',
options=opts)
```
"""
# pylint: enable=line-too-long
def __init__(self, options=None):
"""Constructor.
Args:
options: Optional initial option dict to start with.
"""
if options is not None:
self._options = copy.deepcopy(options)
else:
self._options = {'max_depth': 100,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'min_occurrence': 0,
'order_by': 'name',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': False,
'select': ['micros'],
'step': -1,
'output': 'stdout'}
@staticmethod
def trainable_variables_parameter():
"""Options used to profile trainable variable parameters.
Normally used together with 'scope' view.
Returns:
A dict of profiling options.
"""
return {'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'min_occurrence': 0,
'order_by': 'name',
'account_type_regexes': [tfprof_logger.TRAINABLE_VARIABLES],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['params'],
'step': -1,
'output': 'stdout'}
@staticmethod
def float_operation():
# pylint: disable=line-too-long
"""Options used to profile float operations.
Please see https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/profile_model_architecture.md
on the caveats of calculating float operations.
Returns:
A dict of profiling options.
"""
# pylint: enable=line-too-long
return {'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 1,
'min_occurrence': 0,
'order_by': 'float_ops',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['float_ops'],
'step': -1,
'output': 'stdout'}
@staticmethod
def time_and_memory(min_micros=1, min_bytes=1, min_accelerator_micros=0,
min_cpu_micros=0, min_peak_bytes=0, min_residual_bytes=0,
min_output_bytes=0):
"""Show operation time and memory consumptions.
Args:
min_micros: Only show profiler nodes with execution time
no less than this. It sums accelerator and cpu times.
min_bytes: Only show profiler nodes requested to allocate no less bytes
than this.
min_accelerator_micros: Only show profiler nodes spend no less than
this time on accelerator (e.g. GPU).
min_cpu_micros: Only show profiler nodes spend no less than
this time on cpu.
min_peak_bytes: Only show profiler nodes using no less than this bytes
at peak (high watermark). For profiler nodes consist of multiple
graph nodes, it sums the graph nodes' peak_bytes.
min_residual_bytes: Only show profiler nodes have no less than
this bytes not being de-allocated after Compute() ends. For
profiler nodes consist of multiple graph nodes, it sums the
graph nodes' residual_bytes.
min_output_bytes: Only show profiler nodes have no less than this bytes
output. The output are not necessarily allocated by this profiler
nodes.
Returns:
A dict of profiling options.
"""
return {'max_depth': 10000,
'min_bytes': min_bytes,
'min_peak_bytes': min_peak_bytes,
'min_residual_bytes': min_residual_bytes,
'min_output_bytes': min_output_bytes,
'min_micros': min_micros,
'min_accelerator_micros': min_accelerator_micros,
'min_cpu_micros': min_cpu_micros,
'min_params': 0,
'min_float_ops': 0,
'min_occurrence': 0,
'order_by': 'micros',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['micros', 'bytes'],
'step': -1,
'output': 'stdout'}
def build(self):
"""Build a profiling option.
Returns:
A dict of profiling options.
"""
return copy.deepcopy(self._options)
def with_max_depth(self, max_depth):
"""Set the maximum depth of display.
The depth depends on profiling view. For 'scope' view, it's the
depth of name scope hierarchy (tree), for 'op' view, it's the number
of operation types (list), etc.
Args:
max_depth: Maximum depth of the data structure to display.
Returns:
self
"""
self._options['max_depth'] = max_depth
return self
def with_min_memory(self,
min_bytes=0,
min_peak_bytes=0,
min_residual_bytes=0,
min_output_bytes=0):
"""Only show profiler nodes consuming no less than 'min_bytes'.
Args:
min_bytes: Only show profiler nodes requested to allocate no less bytes
than this.
min_peak_bytes: Only show profiler nodes using no less than this bytes
at peak (high watermark). For profiler nodes consist of multiple
graph nodes, it sums the graph nodes' peak_bytes.
min_residual_bytes: Only show profiler nodes have no less than
this bytes not being de-allocated after Compute() ends. For
profiler nodes consist of multiple graph nodes, it sums the
graph nodes' residual_bytes.
min_output_bytes: Only show profiler nodes have no less than this bytes
output. The output are not necessarily allocated by this profiler
nodes.
Returns:
self
"""
self._options['min_bytes'] = min_bytes
self._options['min_peak_bytes'] = min_peak_bytes
self._options['min_residual_bytes'] = min_residual_bytes
self._options['min_output_bytes'] = min_output_bytes
return self
def with_min_execution_time(self,
min_micros=0,
min_accelerator_micros=0,
min_cpu_micros=0):
"""Only show profiler nodes consuming no less than 'min_micros'.
Args:
min_micros: Only show profiler nodes with execution time
no less than this. It sums accelerator and cpu times.
min_accelerator_micros: Only show profiler nodes spend no less than
this time on accelerator (e.g. GPU).
min_cpu_micros: Only show profiler nodes spend no less than
this time on cpu.
Returns:
self
"""
self._options['min_micros'] = min_micros
self._options['min_accelerator_micros'] = min_accelerator_micros
self._options['min_cpu_micros'] = min_cpu_micros
return self
def with_min_parameters(self, min_params):
"""Only show profiler nodes holding no less than 'min_params' parameters.
'Parameters' normally refers the weights of in TensorFlow variables.
It reflects the 'capacity' of models.
Args:
min_params: Only show profiler nodes holding number parameters
no less than this.
Returns:
self
"""
self._options['min_params'] = min_params
return self
def with_min_occurrence(self, min_occurrence):
# pylint: disable=line-too-long
"""Only show profiler nodes including no less than 'min_occurrence' graph nodes.
A "node" means a profiler output node, which can be a python line
(code view), an operation type (op view), or a graph node
(graph/scope view). A python line includes all graph nodes created by that
line, while an operation type includes all graph nodes of that type.
Args:
min_occurrence: Only show nodes including no less than this.
Returns:
self
"""
# pylint: enable=line-too-long
self._options['min_occurrence'] = min_occurrence
return self
def with_min_float_operations(self, min_float_ops):
# pylint: disable=line-too-long
"""Only show profiler nodes consuming no less than 'min_float_ops'.
Please see https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/profile_model_architecture.md
on the caveats of calculating float operations.
Args:
min_float_ops: Only show profiler nodes with float operations
no less than this.
Returns:
self
"""
# pylint: enable=line-too-long
self._options['min_float_ops'] = min_float_ops
return self
def with_accounted_types(self, account_type_regexes):
"""Selectively counting statistics based on node types.
Here, 'types' means the profiler nodes' properties. Profiler by default
consider device name (e.g. /job:xx/.../device:GPU:0) and operation type
(e.g. MatMul) as profiler nodes' properties. User can also associate
customized 'types' to profiler nodes through OpLogProto proto.
For example, user can select profiler nodes placed on gpu:0 with:
`account_type_regexes=['.*gpu:0.*']`
If none of a node's properties match the specified regexes, the node is
not displayed nor accounted.
Args:
account_type_regexes: A list of regexes specifying the types.
Returns:
self.
"""
self._options['account_type_regexes'] = copy.copy(account_type_regexes)
return self
def with_node_names(self,
start_name_regexes=None,
show_name_regexes=None,
hide_name_regexes=None,
trim_name_regexes=None):
"""Regular expressions used to select profiler nodes to display.
After 'with_accounted_types' is evaluated, 'with_node_names' are
evaluated as follows:
For a profile data structure, profiler first finds the profiler
nodes matching 'start_name_regexes', and starts displaying profiler
nodes from there. Then, if a node matches 'show_name_regexes' and
doesn't match 'hide_name_regexes', it's displayed. If a node matches
'trim_name_regexes', profiler stops further searching that branch.
Args:
start_name_regexes: list of node name regexes to start displaying.
show_name_regexes: list of node names regexes to display.
hide_name_regexes: list of node_names regexes that should be hidden.
trim_name_regexes: list of node name regexes from where to stop.
Returns:
self
"""
if start_name_regexes is not None:
self._options['start_name_regexes'] = copy.copy(start_name_regexes)
if show_name_regexes is not None:
self._options['show_name_regexes'] = copy.copy(show_name_regexes)
if hide_name_regexes is not None:
self._options['hide_name_regexes'] = copy.copy(hide_name_regexes)
if trim_name_regexes is not None:
self._options['trim_name_regexes'] = copy.copy(trim_name_regexes)
return self
def account_displayed_op_only(self, is_true):
"""Whether only account the statistics of displayed profiler nodes.
Args:
is_true: If true, only account statistics of nodes eventually
displayed by the outputs.
Otherwise, a node's statistics are accounted by its parents
as long as it's types match 'account_type_regexes', even if
it is hidden from the output, say, by hide_name_regexes.
Returns:
self
"""
self._options['account_displayed_op_only'] = is_true
return self
def with_empty_output(self):
"""Do not generate side-effect outputs."""
self._options['output'] = 'none'
return self
def with_stdout_output(self):
"""Print the result to stdout."""
self._options['output'] = 'stdout'
return self
def with_file_output(self, outfile):
"""Print the result to a file."""
self._options['output'] = 'file:outfile=%s' % outfile
return self
def with_timeline_output(self, timeline_file):
"""Generate a timeline json file."""
self._options['output'] = 'timeline:outfile=%s' % timeline_file
return self
def with_pprof_output(self, pprof_file):
"""Generate a pprof profile gzip file.
To use the pprof file:
pprof -png --nodecount=100 --sample_index=1 <pprof_file>
Args:
pprof_file: filename for output, usually suffixed with .pb.gz.
Returns:
self.
"""
self._options['output'] = 'pprof:outfile=%s' % pprof_file
return self
def order_by(self, attribute):
# pylint: disable=line-too-long
"""Order the displayed profiler nodes based on a attribute.
Supported attribute includes micros, bytes, occurrence, params, etc.
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/options.md
Args:
attribute: An attribute the profiler node has.
Returns:
self
"""
# pylint: enable=line-too-long
self._options['order_by'] = attribute
return self
def select(self, attributes):
# pylint: disable=line-too-long
"""Select the attributes to display.
See https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/options.md
for supported attributes.
Args:
attributes: A list of attribute the profiler node has.
Returns:
self
"""
# pylint: enable=line-too-long
self._options['select'] = copy.copy(attributes)
return self
def with_step(self, step):
"""Which profile step to use for profiling.
The 'step' here refers to the step defined by `Profiler.add_step()` API.
Args:
step: When multiple steps of profiles are available, select which step's
profile to use. If -1, use average of all available steps.
Returns:
self
"""
self._options['step'] = step
return self
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/option_builder.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.profiler import option_builder
# pylint: disable=g-bad-import-order
from tensorflow.python.profiler import profile_context
from tensorflow.python.profiler.internal import model_analyzer_testlib as lib
builder = option_builder.ProfileOptionBuilder
class ProfilerContextTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasics(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), "dump")
opts = builder(builder.time_and_memory()
).with_file_output(outfile).build()
x = lib.BuildFullModel()
profile_str = None
profile_step100 = os.path.join(test.get_temp_dir(), "profile_100")
with profile_context.ProfileContext(test.get_temp_dir()) as pctx:
pctx.add_auto_profiling("op", options=opts, profile_steps=[15, 50, 100])
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
total_steps = 101
for i in range(total_steps):
self.evaluate(x)
if i == 14 or i == 49:
self.assertTrue(gfile.Exists(outfile))
gfile.Remove(outfile)
if i == 99:
self.assertTrue(gfile.Exists(profile_step100))
with gfile.Open(outfile, "r") as f:
profile_str = f.read()
gfile.Remove(outfile)
self.assertEqual(set([15, 50, 100]), set(pctx.get_profiles("op").keys()))
with lib.ProfilerFromFile(
os.path.join(test.get_temp_dir(), "profile_100")) as profiler:
profiler.profile_operations(options=opts)
with gfile.Open(outfile, "r") as f:
if test.is_built_with_rocm():
# The profiler output for ROCm mode, includes an extra warning
# related to the lack of stream tracing in ROCm mode.
# Need to skip this warning when doing the diff
profile_str = "\n".join(profile_str.split("\n")[7:])
self.assertEqual(profile_str, f.read())
@test_util.run_deprecated_v1
def testAutoTracingInDeubMode(self):
ops.reset_default_graph()
x = lib.BuildFullModel()
with profile_context.ProfileContext(test.get_temp_dir(), debug=True):
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
for _ in range(10):
self.evaluate(x)
for f in gfile.ListDirectory(test.get_temp_dir()):
# Warm up, no tracing.
self.assertFalse("run_meta" in f)
self.evaluate(x)
self.assertTrue(
gfile.Exists(os.path.join(test.get_temp_dir(), "run_meta_11")))
gfile.Remove(os.path.join(test.get_temp_dir(), "run_meta_11"))
# fetched already.
self.evaluate(x)
for f in gfile.ListDirectory(test.get_temp_dir()):
self.assertFalse("run_meta" in f)
@test_util.run_deprecated_v1
def testDisabled(self):
ops.reset_default_graph()
x = lib.BuildFullModel()
with profile_context.ProfileContext(test.get_temp_dir(),
enabled=False) as pctx:
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
for _ in range(10):
self.evaluate(x)
self.assertTrue(pctx.profiler is None)
self.assertTrue(
getattr(session.BaseSession, "profile_context", None) is None)
with profile_context.ProfileContext(test.get_temp_dir()) as pctx:
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
for _ in range(10):
self.evaluate(x)
self.assertFalse(pctx.profiler is None)
self.assertFalse(
getattr(session.BaseSession, "profile_context", None) is None)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/profile_context_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import io
import os
import random
import re
import numpy as np
from tensorflow.core.profiler import profile_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.profiler import option_builder
from tensorflow.python.profiler import profile_context
from tensorflow.python.profiler.internal import model_analyzer_testlib as lib
from tensorflow.python.util import compat
builder = option_builder.ProfileOptionBuilder
class PrintModelAnalysisTest(test.TestCase):
def _no_rewrite_session_config(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
pin_to_host_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def testDumpToFile(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = builder(builder.trainable_variables_parameter()
).with_file_output(outfile).build()
with session.Session(config=self._no_rewrite_session_config()) as sess:
_ = lib.BuildSmallModel()
model_analyzer.profile(sess.graph, options=opts)
with gfile.Open(outfile, 'r') as f:
self.assertEqual(u'node name | # parameters\n'
'_TFProfRoot (--/451 params)\n'
' DW (3x3x3x6, 162/162 params)\n'
' DW2 (2x2x6x12, 288/288 params)\n'
' ScalarW (1, 1/1 params)\n',
lib.CheckAndRemoveDoc(f.read()))
@test_util.run_v1_only('b/120545219')
def testSelectEverythingDetail(self):
ops.reset_default_graph()
dev = '/device:GPU:0' if test.is_gpu_available() else '/device:CPU:0'
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.select(['micros', 'bytes', 'params', 'float_ops', 'occurrence',
'device', 'op_types', 'input_shapes']).build())
with profile_context.ProfileContext(test.get_temp_dir(),
trace_steps=[],
dump_steps=[]) as pctx:
with session.Session(
config=self._no_rewrite_session_config()) as sess, ops.device(dev):
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
pctx.trace_next_step()
pctx.dump_next_step()
_ = self.evaluate(x)
pctx.profiler.profile_name_scope(options=opts)
with gfile.Open(outfile, 'r') as f:
# pylint: disable=line-too-long
dump_str = lib.CheckAndRemoveDoc(f.read())
outputs = dump_str.split('\n')
self.assertEqual(outputs[0],
'node name | # parameters | # float_ops | requested bytes | total execution time | accelerator execution time | cpu execution time | assigned devices | op types | op count (run|defined) | input shapes')
for o in outputs[1:]:
if o.find('Conv2D ') > 0:
metrics = o[o.find('(') +1: o.find(')')].split(',')
# Make sure time is profiled.
gap = 1 if test.is_gpu_available() else 2
for i in range(3, 6, gap):
mat = re.search('(.*)(?:us|ms|sec)/(.*)(?:us|ms|sec)', metrics[i])
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# Make sure device is profiled.
if test.is_gpu_available():
self.assertTrue(metrics[6].find('gpu') > 0)
self.assertFalse(metrics[6].find('cpu') > 0)
else:
self.assertFalse(metrics[6].find('gpu') > 0)
self.assertTrue(metrics[6].find('cpu') > 0)
# Make sure float_ops is profiled.
mat = re.search('(.*)k/(.*)k flops', metrics[1].strip())
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# Make sure op_count is profiled.
self.assertEqual(metrics[8].strip(), '1/1|1/1')
# Make sure input_shapes is profiled.
self.assertEqual(metrics[9].strip(), '0:2x6x6x3|1:3x3x3x6')
if o.find('DW (3x3x3x6') > 0:
metrics = o[o.find('(') +1: o.find(')')].split(',')
mat = re.search('(.*)/(.*) params', metrics[1].strip())
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# pylint: enable=line-too-long
# Test that profiler restored from profile file gives the same result.
gfile.Remove(outfile)
profile_file = os.path.join(test.get_temp_dir(), 'profile_1')
with lib.ProfilerFromFile(profile_file) as profiler:
profiler.profile_name_scope(options=opts)
with gfile.Open(outfile, 'r') as f:
self.assertEqual(dump_str, lib.CheckAndRemoveDoc(f.read()))
def testSelectEverything(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.select(['params', 'float_ops', 'occurrence', 'device', 'op_types',
'input_shapes']).build())
with session.Session(config=self._no_rewrite_session_config()
) as sess, ops.device('/device:CPU:0'):
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
model_analyzer.profile(
sess.graph, run_meta, options=opts)
def testSimpleCodeView(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
# TODO(xpan): Test 'micros'. Since the execution time changes each run,
# it's a bit difficult to test it now.
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.with_node_names(show_name_regexes=['.*model_analyzer_testlib.*'])
.account_displayed_op_only(False)
.select(['bytes', 'params', 'float_ops', 'num_hidden_ops', 'device',
'input_shapes']).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
model_analyzer.profile(
sess.graph, run_meta, cmd='code', options=opts)
with gfile.Open(outfile, 'r') as f:
# pylint: disable=line-too-long
self.assertEqual(
'node name | requested bytes | # parameters | # float_ops | assigned devices | in',
lib.CheckAndRemoveDoc(f.read())[0:80])
# pylint: enable=line-too-long
@test_util.run_v1_only('b/120545219')
def testComplexCodeView(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.with_node_names(show_name_regexes=
['.*model_analyzer_testlib.py.*'])
.account_displayed_op_only(False)
.select(['params', 'float_ops']).build())
with profile_context.ProfileContext(test.get_temp_dir(),
trace_steps=[],
dump_steps=[]) as pctx:
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
self.evaluate(variables.global_variables_initializer())
pctx.trace_next_step()
_ = self.evaluate(x)
tfprof_node = pctx.profiler.profile_python(options=opts)
# pylint: disable=line-too-long
with gfile.Open(outfile, 'r') as f:
lines = f.read().split('\n')
self.assertGreater(len(lines), 5)
result = '\n'.join([l[:min(len(l), 80)] for l in lines])
self.assertTrue(
compat.as_text(lib.CheckAndRemoveDoc(result))
.startswith('node name | # parameters | # float_ops'))
self.assertLess(0, tfprof_node.total_exec_micros)
self.assertEqual(2844, tfprof_node.total_parameters)
#The graph is modifed when MKL is enabled,total_float_ops will
#be different
if test_util.IsMklEnabled():
self.assertLess(101600, tfprof_node.total_float_ops)
else:
self.assertLess(145660, tfprof_node.total_float_ops)
self.assertEqual(10, len(tfprof_node.children))
self.assertEqual('_TFProfRoot', tfprof_node.name)
self.assertEqual(
'model_analyzer_testlib.py:63:BuildFullModel',
tfprof_node.children[0].name)
self.assertEqual(
'model_analyzer_testlib.py:63:BuildFullModel (gradient)',
tfprof_node.children[1].name)
self.assertEqual(
'model_analyzer_testlib.py:66:BuildFullModel',
tfprof_node.children[2].name)
self.assertEqual(
'model_analyzer_testlib.py:66:BuildFullModel (gradient)',
tfprof_node.children[3].name)
self.assertEqual(
'model_analyzer_testlib.py:67:BuildFullModel',
tfprof_node.children[4].name)
self.assertEqual(
'model_analyzer_testlib.py:67:BuildFullModel (gradient)',
tfprof_node.children[5].name)
self.assertEqual(
'model_analyzer_testlib.py:69:BuildFullModel',
tfprof_node.children[6].name)
self.assertEqual(
'model_analyzer_testlib.py:70:BuildFullModel',
tfprof_node.children[7].name)
self.assertEqual(
'model_analyzer_testlib.py:70:BuildFullModel (gradient)',
tfprof_node.children[8].name)
self.assertEqual(
'model_analyzer_testlib.py:72:BuildFullModel',
tfprof_node.children[9].name)
# pylint: enable=line-too-long
def testCodeViewLeafGraphNode(self):
ops.reset_default_graph()
opts = (builder(builder.trainable_variables_parameter())
.with_empty_output()
.with_accounted_types(['.*'])
.account_displayed_op_only(False)
.select(['bytes', 'params', 'float_ops', 'device']).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
tfprof_node = model_analyzer.profile(
sess.graph, run_meta, cmd='code', options=opts)
leaf = tfprof_node
while leaf.children:
self.assertEqual(0, len(leaf.graph_nodes))
leaf = leaf.children[0]
self.assertEqual(1, len(leaf.graph_nodes))
def testTimeline(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'timeline')
opts = (builder(builder.trainable_variables_parameter())
.with_max_depth(100000)
.with_step(0)
.with_timeline_output(outfile)
.with_accounted_types(['.*']).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
_ = model_analyzer.profile(
sess.graph, run_meta, cmd='graph', options=opts)
with gfile.Open(outfile + '_0', 'r') as f:
# Test that a json file is created.
# TODO(xpan): tfprof Timeline isn't quite correct on Windows.
# Investigate why.
if os.name != 'nt':
self.assertLess(1000, len(f.read()))
else:
self.assertLess(1, len(f.read()))
def testOpView(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.with_min_occurrence(10)
.order_by('occurrence')
.select(['params', 'micros', 'bytes',
'peak_bytes', 'residual_bytes',
'output_bytes', 'occurrence', 'input_shapes']).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
tfprof_node = model_analyzer.profile(
sess.graph, run_meta, cmd='op', options=opts)
with gfile.Open(outfile, 'r') as f:
# pylint: disable=line-too-long
self.assertEqual(
'nodename|requestedbytes|peakbytes|residualbytes|outputbytes|totalexecutiontime|acceleratorexecutiontime|cpuexecutiontime|#parameters|opoccurrence(run|defined)|inputshapes',
lib.CheckAndRemoveDoc(f.read()).replace('\t',
'').replace(' ', '')[0:170])
# pylint: enable=line-too-long
total_children = 0
last_occurrence = 1e32
input_shapes = 0
last_total_micros = tfprof_node.total_exec_micros
last_micros = tfprof_node.exec_micros
while tfprof_node.children:
for gnode in tfprof_node.graph_nodes:
input_shapes += len(gnode.input_shapes)
self.assertEqual(len(tfprof_node.children), 1)
tfprof_node = tfprof_node.children[0]
self.assertEqual(
last_total_micros, tfprof_node.total_exec_micros + last_micros)
last_total_micros = tfprof_node.total_exec_micros
last_micros = tfprof_node.exec_micros
total_children += 1
self.assertLessEqual(len(tfprof_node.graph_nodes), last_occurrence)
last_occurrence = len(tfprof_node.graph_nodes)
self.assertGreater(input_shapes, 0)
def testAdvisor(self):
ops.reset_default_graph()
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
advice_pb = model_analyzer.advise(sess.graph, run_meta)
self.assertTrue('AcceleratorUtilizationChecker' in advice_pb.checkers)
self.assertTrue('ExpensiveOperationChecker' in advice_pb.checkers)
self.assertTrue('OperationChecker' in advice_pb.checkers)
checker = advice_pb.checkers['AcceleratorUtilizationChecker']
if test.is_gpu_available():
self.assertGreater(len(checker.reports), 0)
else:
self.assertEqual(len(checker.reports), 0)
checker = advice_pb.checkers['ExpensiveOperationChecker']
self.assertGreater(len(checker.reports), 0)
def pprof_test_helper(self, attribute, should_fail=False):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), attribute + '_pprof.pb.gz')
opts = (builder(builder.time_and_memory())
.select([attribute])
.with_max_depth(100000)
.with_node_names(trim_name_regexes=['ops.py.*'])
.with_pprof_output(outfile).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
_ = model_analyzer.profile(
sess.graph, run_meta, cmd='code', options=opts)
if should_fail:
self.assertFalse(gfile.Exists(outfile))
return
profile_pb = profile_pb2.Profile()
with gfile.Open(outfile, 'rb') as f:
with gzip.GzipFile(fileobj=io.BytesIO(f.read())) as gzipf:
profile_pb.ParseFromString(gzipf.read())
self.assertGreater(len(profile_pb.sample), 10)
self.assertGreater(len(profile_pb.location), 10)
self.assertGreater(len(profile_pb.function), 10)
self.assertGreater(len(profile_pb.string_table), 30)
has_rnn = False
has_loop = False
for s in profile_pb.string_table:
if s.find('rnn') > 0:
has_rnn = True
if s.find('while') > 0:
has_loop = True
self.assertFalse(s.startswith('ops.py'))
self.assertTrue(has_rnn)
self.assertTrue(has_loop)
def testPprof(self):
for attr in ['micros', 'bytes', 'accelerator_micros', 'cpu_micros',
'params', 'float_ops']:
self.pprof_test_helper(attr)
for attr in ['op_types', 'device', 'input_shapes']:
self.pprof_test_helper(attr, True)
def testMinOption(self):
ops.reset_default_graph()
def check_min(nodes, mm=0, mam=0, mcm=0, mb=0, mpb=0, mrb=0, mob=0):
for n in nodes:
if mm > 0:
self.assertGreaterEqual(n.exec_micros, mm)
if mam > 0:
self.assertGreaterEqual(n.accelerator_exec_micros, mam)
if mcm > 0:
self.assertGreaterEqual(n.cpu_exec_micros, mcm)
if mb > 0:
self.assertGreaterEqual(n.requested_bytes, mb)
if mpb > 0:
self.assertGreaterEqual(n.peak_bytes, mpb)
if mrb > 0:
self.assertGreaterEqual(n.residual_bytes, mrb)
if mob > 0:
self.assertGreaterEqual(n.output_bytes, mob)
check_min(n.children, mm, mam, mcm, mb, mpb, mrb, mob)
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
min_val = random.randint(0, 10000)
opts = builder(builder.time_and_memory(min_micros=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mm=min_val)
opts = builder(builder.time_and_memory(min_accelerator_micros=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mam=min_val)
opts = builder(builder.time_and_memory(min_cpu_micros=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mcm=min_val)
opts = builder(builder.time_and_memory(min_bytes=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mb=min_val)
opts = builder(builder.time_and_memory(min_peak_bytes=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mpb=min_val)
opts = builder(builder.time_and_memory(min_residual_bytes=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mrb=min_val)
opts = builder(builder.time_and_memory(min_output_bytes=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mob=min_val)
def testSelectOption(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
def check_selection(selected, not_selected):
with gfile.Open(outfile, 'r') as f:
s = f.read()
for attr in selected:
self.assertTrue(s.find(attr) > 0, s)
for attr in not_selected:
self.assertFalse(s.find(attr) > 0, s)
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
opts = builder(builder.time_and_memory()
).with_file_output(outfile).select(['micros']).build()
_ = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_selection(['total execution time', 'accelerator execution time'],
['bytes'])
opts = builder(builder.time_and_memory()
).with_file_output(outfile).select(['bytes']).build()
_ = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_selection(['requested bytes'],
['peak bytes', 'residual bytes', 'output bytes'])
opts = builder(builder.time_and_memory()).with_file_output(
outfile).select(
['peak_bytes', 'residual_bytes', 'output_bytes']).build()
_ = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_selection(['peak bytes', 'residual bytes', 'output bytes'],
['requested_bytes'])
def _trainLoop(self, train_op, train_steps, time_dir, time_step,
memory_dir, memory_step, profile_dir, dump_step):
with session.Session(config=self._no_rewrite_session_config()) as sess:
self.evaluate(variables.global_variables_initializer())
# start from 1 because variable_initializer took one step.
for i in range(1, train_steps + 1):
_ = self.evaluate(train_op)
if i in time_step:
ret = gfile.ListDirectory(time_dir)
self.assertEqual(len(ret), 1)
self.assertTrue(
gfile.Open(os.path.join(time_dir, ret[0]), 'r').read()
.find('execution time') > 0)
_ = [gfile.Remove(os.path.join(time_dir, x)) for x in ret]
else:
self.assertEqual(len(gfile.ListDirectory(time_dir)), 0)
if i in memory_step:
ret = gfile.ListDirectory(memory_dir)
self.assertEqual(len(ret), 1)
self.assertTrue(
gfile.Open(os.path.join(memory_dir, ret[0]), 'r').read()
.find('requested bytes') > 0)
_ = [gfile.Remove(os.path.join(memory_dir, x)) for x in ret]
else:
self.assertEqual(len(gfile.ListDirectory(memory_dir)), 0)
if i in dump_step:
ret = gfile.ListDirectory(profile_dir)
self.assertAllEqual(ret, ['profile_%d' % i])
_ = [gfile.Remove(os.path.join(profile_dir, x)) for x in ret]
else:
if i < dump_step[0]:
self.assertFalse(gfile.Exists(profile_dir))
else:
self.assertEqual(len(gfile.ListDirectory(profile_dir)), 0)
@test_util.run_v1_only('b/120545219')
def testAutoProfiling(self):
ops.reset_default_graph()
time_dir = os.path.join(test.get_temp_dir(), 'time')
memory_dir = os.path.join(test.get_temp_dir(), 'memory')
profile_dir = os.path.join(test.get_temp_dir(), 'dir/dir2/profile')
# TODO(xpan): Should we create parent directory for them?
gfile.MkDir(time_dir)
gfile.MkDir(memory_dir)
time_opts = (builder(builder.time_and_memory())
.with_file_output(os.path.join(time_dir, 'profile'))
.select(['micros']).build())
memory_opts = (builder(builder.time_and_memory())
.with_file_output(os.path.join(memory_dir, 'profile'))
.select(['bytes']).build())
time_steps = [2, 3]
memory_steps = [1, 3]
dump_steps = [3, 4]
x = lib.BuildSmallModel()
with profile_context.ProfileContext(profile_dir,
trace_steps=[1, 2, 3],
dump_steps=[3, 4]) as pctx:
pctx.add_auto_profiling('scope', time_opts, time_steps)
pctx.add_auto_profiling('scope', memory_opts, memory_steps)
self._trainLoop(x, 10, time_dir, time_steps,
memory_dir, memory_steps, profile_dir, dump_steps)
@test_util.run_v1_only('b/120545219')
def testOOM(self):
if not test.is_gpu_available():
return
ops.reset_default_graph()
with ops.device('/device:GPU:0'):
a = random_ops.random_normal([1, 10000, 20000], name='test_random1')
b = random_ops.random_normal([30000, 10000, 1], name='test_random2')
c = a * b
try:
with session.Session(config=self._no_rewrite_session_config()) as sess:
sess.run(c, options=config_pb2.RunOptions(
report_tensor_allocations_upon_oom=True))
except Exception as e: # pylint: disable=broad-except
exception_str = '%s' % e
# This trace reports allocations for to random tensor.
self.assertTrue(
'OOM when allocating tensor with shape[30000,10000,20000]' in
exception_str)
mat = re.search('(.*)GiB from test_random2/RandomStandardNormal',
exception_str)
self.assertGreater(float(mat.group(1)), 0.0)
mat = re.search('(.*)MiB from test_random1/RandomStandardNormal',
exception_str)
self.assertGreater(float(mat.group(1)), 0.0)
@test_util.run_v1_only('b/120545219')
def testDistributedOOM(self):
if not test.is_gpu_available():
return
ops.reset_default_graph()
workers, _ = test_util.create_local_cluster(2, 0)
with ops.device('/job:worker/replica:0/task:0/gpu:0'):
a = random_ops.random_normal([1, 10000, 20000], name='test_random1')
with ops.device('/job:worker/replica:0/task:1/gpu:0'):
b = random_ops.random_normal([30000, 10000, 1], name='test_random2')
c = a * b
try:
with session.Session(workers[1].target) as sess:
sess.run(c, options=config_pb2.RunOptions(
report_tensor_allocations_upon_oom=True))
except Exception as e: # pylint: disable=broad-except
exception_str = '%s' % e
# test_random2 is reported because it's allocated in worker 1.
self.assertTrue('Current usage from device: '
'/job:worker/replica:0/task:1/device:GPU:0, '
'allocator: GPU_0_bfc' in exception_str)
mat = re.search('(.*)GiB from test_random2/RandomStandardNormal',
exception_str)
self.assertGreater(float(mat.group(1)), 0.0)
# test_random1 is not reported because it's allocated in worker 0.
mat = re.search('(.*)MiB from test_random1/RandomStandardNormal',
exception_str)
self.assertTrue(mat is None)
@test_util.run_v1_only('b/120545219')
def testTrackPersistentBytes(self):
ops.reset_default_graph()
a = array_ops.constant(np.ones((100, 100)))
b = array_ops.constant(np.ones((100, 100)))
c = a * b
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.min_graph_nodes = -1
with session.Session(config=config) as sess:
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess.run(c, options=run_options, run_metadata=run_metadata)
options = option_builder.ProfileOptionBuilder.time_and_memory()
options['min_bytes'] = 0
options['select'] = ('bytes', 'peak_bytes', 'output_bytes',
'residual_bytes')
ret = model_analyzer.profile(
sess.graph, run_meta=run_metadata, cmd='scope', options=options)
run_metadata = config_pb2.RunMetadata()
sess.run(c, options=run_options, run_metadata=run_metadata)
ret2 = model_analyzer.profile(
sess.graph, run_meta=run_metadata, cmd='scope', options=options)
n = lib.SearchTFProfNode(ret, 'mul')
n2 = lib.SearchTFProfNode(ret2, 'mul')
self.assertGreater(n.peak_bytes, 0)
self.assertGreater(n.output_bytes, 0)
self.assertGreater(n.residual_bytes, 0)
self.assertEqual(n.peak_bytes, n2.peak_bytes)
self.assertEqual(n.output_bytes, n2.output_bytes)
self.assertEqual(n.residual_bytes, n2.residual_bytes)
@test_util.run_v1_only('b/120545219')
def testTraceLoopBytes(self):
if not test.is_gpu_available(): return
ops.reset_default_graph()
steps = 100
with ops.device('/gpu:0'):
x = array_ops.ones((100, 100), dtype=dtypes.float32)
n = array_ops.constant(steps, dtype=dtypes.int32)
x1 = array_ops.ones((100, 100))
x *= x1
def loop_body(i, x):
x *= x
return i + 1, x
_, y = control_flow_ops.while_loop(
lambda i, x: i < n, loop_body,
[array_ops.constant(0), x])
grad = gradients.gradients(y, [x1])
with session.Session(config=self._no_rewrite_session_config()) as sess:
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess.run(grad, options=run_options, run_metadata=run_metadata)
options = option_builder.ProfileOptionBuilder.time_and_memory()
options['min_bytes'] = 0
options['min_micros'] = 0
options['select'] = ('bytes', 'peak_bytes', 'output_bytes',
'residual_bytes')
options['output'] = 'none'
ret_pb = model_analyzer.profile(
sess.graph, run_meta=run_metadata, cmd='scope', options=options)
self.assertGreater(ret_pb.total_requested_bytes, 1000000)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/model_analyzer_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TraceMe allows the profiler to trace python events.
Usage:
with profiler.TraceMe('name'):
...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.pywrap_tensorflow import PythonTraceMe
class TraceMe(object):
"""Context manager that generates a trace event in the profiler."""
def __init__(self, name, **kwargs):
if PythonTraceMe.IsEnabled():
if kwargs:
name += '#' + ','.join(
[key + '=' + str(value) for key, value in kwargs.iteritems()]) + '#'
self._traceme = PythonTraceMe(name)
else:
self._traceme = None
def __enter__(self):
if self._traceme:
self._traceme.Enter()
def __exit__(self, exc_type, exc_val, exc_tb):
if self._traceme:
self._traceme.Exit()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/traceme.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class TFProfLoggerTest(test.TestCase):
def _BuildSmallPlaceholderlModel(self):
a = array_ops.placeholder(dtypes.int32, [2, 2])
b = array_ops.placeholder(dtypes.int32, [2, 2])
y = math_ops.matmul(a, b)
return a, b, y
def _BuildSmallModel(self):
a = constant_op.constant([[1, 2], [3, 4]])
b = constant_op.constant([[1, 2], [3, 4]])
return math_ops.matmul(a, b)
# pylint: disable=pointless-string-statement
"""# TODO(xpan): This out of core so it doesn't depend on contrib.
def testFillMissingShape(self):
a, b, y = self._BuildSmallPlaceholderlModel()
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess = session.Session()
sess.run(y,
options=run_options,
run_metadata=run_metadata,
feed_dict={a: [[1, 2], [2, 3]],
b: [[1, 2], [2, 3]]})
graph2 = ops.Graph()
# Use copy_op_to_graph to remove shape information.
y2 = copy_elements.copy_op_to_graph(y, graph2, [])
self.assertEquals('<unknown>', str(y2.get_shape()))
tfprof_logger._fill_missing_graph_shape(graph2, run_metadata)
self.assertEquals('(2, 2)', str(y2.get_shape()))
def testFailedFillMissingShape(self):
y = self._BuildSmallModel()
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess = session.Session()
sess.run(y, options=run_options, run_metadata=run_metadata)
graph2 = ops.Graph()
y2 = copy_elements.copy_op_to_graph(y, graph2, [])
self.assertEquals('<unknown>', str(y2.get_shape()))
# run_metadata has special name for MatMul, hence failed to fill shape.
tfprof_logger._fill_missing_graph_shape(graph2, run_metadata)
self.assertEquals('<unknown>', str(y2.get_shape()))
"""
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/tfprof_logger_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""profiler python module provides APIs to profile TensorFlow models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.core.profiler.tfprof_log_pb2 import OpLogProto
from tensorflow.core.profiler.tfprof_output_pb2 import AdviceProto
from tensorflow.core.profiler.tfprof_output_pb2 import GraphNodeProto
from tensorflow.core.profiler.tfprof_output_pb2 import MultiGraphNodeProto
from tensorflow.python.profiler.model_analyzer import advise
from tensorflow.python.profiler.model_analyzer import profile
from tensorflow.python.profiler.model_analyzer import Profiler
from tensorflow.python.profiler.option_builder import ProfileOptionBuilder
from tensorflow.python.profiler.tfprof_logger import write_op_log
from tensorflow.python.util.tf_export import tf_export
_allowed_symbols = [
'Profiler',
'profile',
'ProfileOptionBuilder',
'advise',
'write_op_log',
]
_allowed_symbols.extend([
'GraphNodeProto',
'MultiGraphNodeProto',
'AdviceProto',
'OpLogProto',
])
# Export protos
tf_export(v1=['profiler.GraphNodeProto'])(GraphNodeProto)
tf_export(v1=['profiler.MultiGraphNodeProto'])(MultiGraphNodeProto)
tf_export(v1=['profiler.AdviceProto'])(AdviceProto)
tf_export(v1=['profiler.OpLogProto'])(OpLogProto)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/profiler.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Profiler for TensorFlow models that outputs data in pprof format.
See https://github.com/google/pprof/blob/master/proto/profile.proto for pprof
profile format.
The following needs to be set for profiler to work:
* trace_level needs to be set to FULL_TRACE
* run_metadata object should be passed in to session.run call
Sample usage:
options = tf.compat.v1.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.compat.v1.RunMetadata()
with tf.compat.v1.Session as sess:
...
sess.run(computation, run_metadata=run_metadata, options=options)
pprof_profiler.profile(sess.graph, run_metadata, output_dir)
The code above would output a pprof profile to separate output_dir/.*.pb.gz
file for each device. These files can be passed to pprof for formatting.
For e.g.:
pprof -png --nodecount=100 --sample_index=1 output_dir/profile_output.pb.gz
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
from collections import namedtuple
import gzip
import os
import string
import sys
import time
from proto import profile_pb2
if sys.version_info < (3,):
maketrans = string.maketrans
else:
maketrans = str.maketrans
ProfileDatum = namedtuple('ProfileDatum', [
'node_exec_stats', 'op_type', 'traceback'])
class StringTable(object):
"""Keeps track of strings to add to string_table in pprof proto."""
def __init__(self):
# Pprof requires first entry in string_table to be ''.
self._string_table = ['']
self._string_to_index = {'': 0}
def index_of(self, value_str):
"""Get index of value_str in the string table.
If value_str is not in the string table, we will add it at the end
and then return the new index.
Args:
value_str: (string) Value to lookup/add in/to the string table.
Returns:
Index of value_str in the string table.
"""
if value_str is None:
value_str = ''
if value_str in self._string_to_index:
return self._string_to_index[value_str]
index = len(self._string_table)
self._string_table.append(value_str)
self._string_to_index[value_str] = index
return index
def next_index(self):
"""Gets index that would be assigned to the next added string.
Returns:
Index of the next string if it was added.
"""
return len(self._string_table)
def string_table(self):
"""Returns a list of strings to store in pprof's string_table."""
return self._string_table
class Functions(object):
"""Keeps track of `Function` protos for pprof profile."""
def __init__(self, string_table):
"""Constructor.
Args:
string_table: A `StringTable` object.
"""
self._string_table = string_table
# Maps tuples in the form (file_path, function_name, start_line_number)
# to `Function` protos.
self._function_key_to_function = {}
def index_of(self, file_path, function_name, function_start_line):
"""Returns index of the function, adding the function if needed.
Args:
file_path: (string) Path to file where the function is defined.
function_name: (string) Function name.
function_start_line: (integer) Start line number of function definition.
Returns:
Function index.
"""
function_key = (file_path, function_name, function_start_line)
if function_key in self._function_key_to_function:
return self._function_key_to_function[function_key].id
else:
# Function indexes should start from 1
function_index = len(self._function_key_to_function) + 1
function = profile_pb2.Function()
function.id = function_index
function.name = self._string_table.index_of(function_name)
function.filename = self._string_table.index_of(file_path)
function.start_line = function_start_line
self._function_key_to_function[function_key] = function
return function_index
def function_protos(self):
"""Returns list of `profile_pb2.Function` protos."""
return self._function_key_to_function.values()
class Locations(object):
"""Keeps track of `Location` protos for pprof profile.
`Locations` store information about function call locations.
"""
def __init__(self, functions):
"""Constructor.
Args:
functions: A `Functions` object.
"""
self._functions = functions
# Maps tuples in the form (file_path, called_function_name, line_number)
# to `Location` protos.
self._location_key_to_location = {}
def index_of(
self, file_path, line_number, called_function_name, called_file_path,
called_function_start_line):
"""Returns index of the location, adding the location if needed.
Args:
file_path: (string) Path to file that makes the call.
line_number: (integer) Call line number.
called_function_name: (string) Function name of the function called at
`file_path` and `line_number`.
called_file_path: (string) Path to file where the called function is
defined.
called_function_start_line: (integer) Start line number of called
function definition in `called_file_path` file.
Returns:
Index of location.
"""
location_key = (file_path, called_function_name, line_number)
if location_key in self._location_key_to_location:
location = self._location_key_to_location[location_key]
return location.id
else:
# Location indexes should start from 1
location_index = len(self._location_key_to_location) + 1
location = profile_pb2.Location()
location.id = location_index
self._location_key_to_location[location_key] = location
line = location.line.add()
line.function_id = self._functions.index_of(
called_file_path, called_function_name, called_function_start_line)
line.line = line_number
return location_index
def location_protos(self):
"""Returns list of `profile_pb2.Location` protos."""
return self._location_key_to_location.values()
class Samples(object):
"""Keeps track of `Sample` protos for pprof profile.
Samples store the following statistics in order:
count, all_time, op_time
"""
def __init__(self, string_table):
"""Constructor.
Args:
string_table: A `StringTable` object.
"""
self._string_table = string_table
# TODO(annarev): figure out if location is unique for each node name.
# If not, also key this dictionary based on location ids.
self._node_name_to_sample = {}
def add(self, datum, location_ids):
"""Adds a sample data point.
Args:
datum: `ProfileDatum` to add a sample for.
location_ids: List of numberic location ids for this
sample.
"""
node_name = datum.node_exec_stats.node_name
if node_name in self._node_name_to_sample:
sample = self._node_name_to_sample[node_name]
sample.location_id.extend(location_ids)
else:
sample = profile_pb2.Sample()
# Sample stores 3 values: count, all_time, op_time
sample.value.extend([0, 0, 0])
label = sample.label.add()
label.key = self._string_table.index_of('node_name')
label.str = self._string_table.index_of(node_name)
label = sample.label.add()
label.key = self._string_table.index_of('op_type')
label.str = self._string_table.index_of(datum.op_type)
self._node_name_to_sample[node_name] = sample
sample.value[0] += 1
sample.value[1] += datum.node_exec_stats.all_end_rel_micros
sample.value[2] += (
datum.node_exec_stats.op_end_rel_micros -
datum.node_exec_stats.op_start_rel_micros)
def get_sample_protos(self):
"""Returns list of `Sample` protos for pprof profile."""
return self._node_name_to_sample.values()
class PprofProfiler(object):
"""Creates profiles in pprof format."""
def __init__(self, graph, run_metadata):
"""Constructor.
Args:
graph: A `Graph` instance.
run_metadata: A list of `RunMetadata` objects.
"""
self._graph = graph
self._run_metadata = run_metadata
self._string_table = StringTable()
self._functions = Functions(self._string_table)
self._locations = Locations(self._functions)
def profile(self):
"""Generates pprof profiles.
Returns:
Dictionary mapping from device name to proto in `profile_pb2.Profile`
format.
"""
profiles = {}
data_generator_func = self._get_profile_data_generator()
for device_index, device_stats in enumerate(
self._run_metadata.step_stats.dev_stats):
# Create profile
pprof_proto = self._get_pprof_proto(data_generator_func(device_stats))
if not pprof_proto.sample:
print(
'Not enough data to create profile for device %s. Did you pass '
'RunMetadata to session.run call?' % device_stats.device)
continue
# Add device name comment
device_count = len(self._run_metadata.step_stats.dev_stats)
device_description = (
'Device %d of %d: %s' %
(device_index + 1, device_count, device_stats.device))
device_description_str_index = self._string_table.next_index()
pprof_proto.string_table.append(device_description)
pprof_proto.comment.append(device_description_str_index)
profiles[device_stats.device] = pprof_proto
return profiles
def _get_pprof_proto(self, profile_datum_generator):
"""Returns profile data in pprof proto format.
Args:
profile_datum_generator: Generator outputting `ProfileDatum` objects.
Returns:
A proto in pprof format.
"""
pprof_profile = profile_pb2.Profile()
samples = Samples(self._string_table)
for datum in profile_datum_generator:
if not datum.traceback:
continue
stack_frame = datum.traceback[-1]
after_apply_op = False
location_ids = []
# We add locations from stack trace in bottom-up order.
for stack_frame_index in reversed(range(len(datum.traceback) - 1)):
prev_stack_frame = stack_frame
stack_frame = datum.traceback[stack_frame_index]
# Call at current frame calls function at previous frame.
prev_file_path = prev_stack_frame[0]
prev_function = prev_stack_frame[2]
prev_function_start_line = prev_stack_frame[4]
curr_file_path = stack_frame[0]
curr_line_number = stack_frame[1]
# Skip all calls up to apply_op since they are the same for all ops.
if not after_apply_op:
if prev_function == 'apply_op':
after_apply_op = True
continue
location_index = self._locations.index_of(
curr_file_path, curr_line_number,
prev_function, prev_file_path, prev_function_start_line)
location_ids.append(location_index)
samples.add(datum, location_ids)
sample_type_description = 'count'
sample_type = pprof_profile.sample_type.add()
sample_type.type = self._string_table.index_of(sample_type_description)
sample_type.unit = self._string_table.index_of('count')
sample_type_description = 'all_time'
sample_type = pprof_profile.sample_type.add()
sample_type.type = self._string_table.index_of(sample_type_description)
sample_type.unit = self._string_table.index_of('nanoseconds')
sample_type_description = 'op_time'
sample_type = pprof_profile.sample_type.add()
sample_type.type = self._string_table.index_of(sample_type_description)
sample_type.unit = self._string_table.index_of('nanoseconds')
pprof_profile.string_table.extend(self._string_table.string_table())
pprof_profile.sample.extend(samples.get_sample_protos())
pprof_profile.function.extend(self._functions.function_protos())
pprof_profile.location.extend(self._locations.location_protos())
return pprof_profile
def _get_profile_data_generator(self):
"""Get function that generates `ProfileDatum` objects.
Returns:
A function that generates `ProfileDatum` objects.
"""
node_to_traceback = defaultdict(list)
node_to_op_type = defaultdict(str)
for op in self._graph.get_operations():
node_to_traceback[op.name] = op.traceback_with_start_lines
node_to_op_type[op.name] = op.type
def profile_data_generator(device_step_stats):
for node_stats in device_step_stats.node_stats:
if node_stats.node_name == '_SOURCE' or node_stats.node_name == '_SINK':
continue
yield ProfileDatum(
node_stats,
node_to_op_type[node_stats.node_name],
node_to_traceback[node_stats.node_name])
return profile_data_generator
def get_profiles(graph, run_metadata):
"""Generate profiles in pprof format.
See https://github.com/google/pprof/blob/master/proto/profile.proto
for pprof proto format.
Args:
graph: A `Graph` object.
run_metadata: A `RunMetadata` proto.
Returns:
A dictionary mapping from device name to pprof proto for that device.
"""
return PprofProfiler(graph, run_metadata).profile()
def profile(graph, run_metadata, output_dir=None):
"""Generate profiles in pprof format.
See https://github.com/google/pprof/blob/master/proto/profile.proto
for pprof proto format.
Args:
graph: A `Graph` object.
run_metadata: A `RunMetadata` proto.
output_dir: (string) Directory to output pprof profile to.
Profile files for each device will be stored in compressed
serialized proto format. If output_dir is None, profile protos
will be printed to stdout instead.
Returns:
List of output files created by this profile call.
(Note: this list will be empty if output_dir is None)
"""
profiles = get_profiles(graph, run_metadata)
output_file_template = None
if output_dir:
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
time_suffix = time.strftime('%Y%m%d%H%M%S')
output_file_template = os.path.join(
output_dir, '%s_' + time_suffix + '.pb.gz')
profile_files = []
for device, pprof_proto in profiles.items():
if output_file_template is None:
print('No output directory specified, printing to stdout instead.')
print(pprof_proto)
else:
device_name = str(device).strip('/').translate(
maketrans('/:', '__'))
profile_file = output_file_template % device_name
profile_files.append(profile_file)
with gzip.open(profile_file, 'w') as output_file:
print('Writing profile to %s...' % profile_file)
output_file.write(pprof_proto.SerializeToString())
return profile_files
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/pprof_profiler.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model Analyzer.
Analyze model, including shape, params, time, memory, structure, etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from google.protobuf import message
from tensorflow.core.profiler import tfprof_options_pb2
from tensorflow.core.profiler import tfprof_output_pb2
from tensorflow.python import pywrap_tensorflow as print_mdl
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.profiler import option_builder
from tensorflow.python.profiler import tfprof_logger
from tensorflow.python.util.tf_export import tf_export
_DEFAULT_PROFILE_OPTIONS = 0
_DEFAULT_ADVISE_OPTIONS = 0
# The following options are for 'advise' cmd.
# Show all advice.
ALL_ADVICE = {
'ExpensiveOperationChecker': {},
'AcceleratorUtilizationChecker': {},
'JobChecker': {}, # Only available internally.
'OperationChecker': {},
}
def _graph_string(graph):
"""Helper to serialize a graph to string."""
if graph:
return graph.as_graph_def(add_shapes=True).SerializeToString()
else:
return b''
def _build_options(options):
"""Build tfprof.OptionsProto.
Args:
options: A dictionary of options.
Returns:
tfprof.OptionsProto.
"""
opts = tfprof_options_pb2.OptionsProto()
opts.max_depth = options.get('max_depth', 10)
opts.min_bytes = options.get('min_bytes', 0)
opts.min_peak_bytes = options.get('min_peak_bytes', 0)
opts.min_residual_bytes = options.get('min_residual_bytes', 0)
opts.min_output_bytes = options.get('min_output_bytes', 0)
opts.min_micros = options.get('min_micros', 0)
opts.min_accelerator_micros = options.get('min_accelerator_micros', 0)
opts.min_cpu_micros = options.get('min_cpu_micros', 0)
opts.min_params = options.get('min_params', 0)
opts.min_float_ops = options.get('min_float_ops', 0)
opts.min_occurrence = options.get('min_occurrence', 0)
opts.step = options.get('step', -1)
opts.order_by = options.get('order_by', 'name')
for p in options.get('account_type_regexes', []):
opts.account_type_regexes.append(p)
for p in options.get('start_name_regexes', []):
opts.start_name_regexes.append(p)
for p in options.get('trim_name_regexes', []):
opts.trim_name_regexes.append(p)
for p in options.get('show_name_regexes', []):
opts.show_name_regexes.append(p)
for p in options.get('hide_name_regexes', []):
opts.hide_name_regexes.append(p)
opts.account_displayed_op_only = options.get('account_displayed_op_only',
False)
for p in options.get('select', []):
opts.select.append(p)
opts.output = options.get('output', 'stdout')
opts.dump_to_file = options.get('dump_to_file', '')
return opts
def _build_advisor_options(options):
"""Build tfprof.AdvisorOptionsProto.
Args:
options: A dictionary of options. See ALL_ADVICE example.
Returns:
tfprof.AdvisorOptionsProto.
"""
opts = tfprof_options_pb2.AdvisorOptionsProto()
if options is None:
return opts
for checker, checker_opts in six.iteritems(options):
checker_ops_pb = tfprof_options_pb2.AdvisorOptionsProto.CheckerOption()
for k, v in six.iteritems(checker_opts):
checker_ops_pb[k] = v
opts.checkers[checker].MergeFrom(checker_ops_pb)
return opts
@tf_export(v1=['profiler.Profiler'])
class Profiler(object):
"""TensorFlow multi-step profiler.
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
```python
Typical use case:
# Currently we are only allowed to create 1 profiler per process.
profiler = Profiler(sess.graph)
for i in xrange(total_steps):
if i % 10000 == 0:
run_meta = tf.compat.v1.RunMetadata()
_ = sess.run(...,
options=tf.compat.v1.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_meta)
profiler.add_step(i, run_meta)
# Profile the parameters of your model.
profiler.profile_name_scope(options=(option_builder.ProfileOptionBuilder
.trainable_variables_parameter()))
# Or profile the timing of your model operations.
opts = option_builder.ProfileOptionBuilder.time_and_memory()
profiler.profile_operations(options=opts)
# Or you can generate a timeline:
opts = (option_builder.ProfileOptionBuilder(
option_builder.ProfileOptionBuilder.time_and_memory())
.with_step(i)
.with_timeline_output(filename).build())
profiler.profile_graph(options=opts)
else:
_ = sess.run(...)
# Auto detect problems and generate advice.
profiler.advise()
```
"""
def __init__(self, graph=None, op_log=None):
"""Constructor.
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define
extra op types.
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
self._coverage = 0.0
self._graph = graph
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
self._graph, op_log=op_log)
# pylint: enable=protected-access
print_mdl.NewProfiler(
_graph_string(self._graph), op_log.SerializeToString())
def __del__(self):
print_mdl.DeleteProfiler()
def add_step(self, step, run_meta):
"""Add statistics of a step.
Args:
step: int, An id used to group one or more different `run_meta` together.
When profiling with the profile_xxx APIs, user can use the `step`
id in the `options` to profile these `run_meta` together.
run_meta: RunMetadata proto that contains statistics of a session run.
"""
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
self._graph, run_meta=run_meta)
# pylint: enable=protected-access
# TODO(xpan): P1: Better to find the current graph.
self._coverage = print_mdl.AddStep(step, _graph_string(self._graph),
run_meta.SerializeToString(),
op_log.SerializeToString())
def profile_python(self, options):
"""Profile the statistics of the Python codes.
By default, it shows the call stack from root. To avoid
redundant output, you may use options to filter as below
options['show_name_regexes'] = ['.*my_code.py.*']
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('code'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_operations(self, options):
"""Profile the statistics of the Operation types (e.g. MatMul, Conv2D).
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('op'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_name_scope(self, options):
"""Profile the statistics of graph nodes, organized by name scope.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('scope'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_graph(self, options):
"""Profile the statistics of graph nodes, organized by dataflow graph.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('graph'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def advise(self, options):
"""Automatically detect problems and generate reports.
Args:
options: A dict of options. See ALL_ADVICE example above.
Returns:
A Advise proto that conains the reports from all checkers.
"""
advise_pb = tfprof_output_pb2.AdviceProto()
opts = _build_advisor_options(options)
advise_pb.ParseFromString(
print_mdl.Profile('advise'.encode('utf-8'), opts.SerializeToString()))
return advise_pb
def serialize_to_string(self):
"""Serialize the ProfileProto to a binary string.
Users can write it to file for offline analysis by tfprof commandline
or graphical interface.
Returns:
ProfileProto binary string.
"""
return print_mdl.SerializeToString()
def _write_profile(self, filename):
"""Writes the profile to a file."""
print_mdl.WriteProfile(filename)
@tf_export(v1=['profiler.profile'])
def profile(graph=None,
run_meta=None,
op_log=None,
cmd='scope',
options=_DEFAULT_PROFILE_OPTIONS):
"""Profile model.
Tutorials and examples can be found in:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
op_log: tensorflow.tfprof.OpLogProto proto. User can assign "types" to
graph nodes with op_log. "types" allow user to flexibly group and
account profiles using options['accounted_type_regexes'].
cmd: string. Either 'op', 'scope', 'graph' or 'code'.
'op' view organizes profile using operation type. (e.g. MatMul)
'scope' view organizes profile using graph node name scope.
'graph' view organizes profile using graph node inputs/outputs.
'code' view organizes profile using Python call stack.
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
If cmd is 'scope' or 'graph', returns GraphNodeProto proto.
If cmd is 'op' or 'code', returns MultiGraphNodeProto proto.
Side effect: stdout/file/timeline.json depending on options['output']
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
if options == _DEFAULT_PROFILE_OPTIONS:
options = (option_builder.ProfileOptionBuilder
.trainable_variables_parameter())
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
graph, op_log, run_meta, add_trace=cmd == 'code')
# pylint: enable=protected-access
opts = _build_options(options)
run_meta_str = run_meta.SerializeToString() if run_meta else b''
graph_str = _graph_string(graph)
if cmd == 'code' or cmd == 'op':
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
elif cmd == 'graph' or cmd == 'scope':
tfprof_node = tfprof_output_pb2.GraphNodeProto()
ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
else:
raise errors.InvalidArgumentError(
None, None, 'unknown cmd: %s\n' % cmd)
return tfprof_node
@tf_export(v1=['profiler.advise'])
def advise(graph=None, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):
"""Auto profile and advise.
Builds profiles and automatically check anomalies of various
aspects. For more details:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
options: see ALL_ADVICE example above. Default checks everything.
Returns:
Returns AdviceProto proto
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
if options == _DEFAULT_ADVISE_OPTIONS:
options = ALL_ADVICE.copy()
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
graph, None, run_meta, add_trace=True)
# pylint: enable=protected-access
run_meta_str = run_meta.SerializeToString() if run_meta else b''
opts = _build_advisor_options(options)
ret = tfprof_output_pb2.AdviceProto()
ret.ParseFromString(
print_mdl.PrintModelAnalysis(
_graph_string(graph), run_meta_str, op_log.SerializeToString(),
'advise'.encode('utf-8'), opts.SerializeToString()))
return ret
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/model_analyzer.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""print_model_analysis test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
# pylint: disable=bad-whitespace
# pylint: disable=bad-continuation
TEST_OPTIONS = {
'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'order_by': 'name',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['params'],
'output': 'stdout',
}
# pylint: enable=bad-whitespace
# pylint: enable=bad-continuation
class PrintModelAnalysisTest(test.TestCase):
def _BuildSmallModel(self):
image = array_ops.zeros([2, 6, 6, 3])
kernel = variable_scope.get_variable(
'DW', [6, 6, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
return x
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/internal/print_model_analysis_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""test the RunMetadata proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.profiler import option_builder
# pylint: disable=g-bad-import-order
# XXX: this depends on pywrap_tensorflow and must come later
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.profiler.internal import model_analyzer_testlib as lib
SIZE = 1300
builder = option_builder.ProfileOptionBuilder
def _extract_node(run_meta, node_name):
ret = defaultdict(list)
for dev_stat in run_meta.step_stats.dev_stats:
dev = dev_stat.device.lower()
if dev.find('cpu:') > 0:
dev = dev[dev.find('cpu:'):]
elif dev.find('gpu:') > 0:
dev = dev[dev.find('gpu:'):]
elif '/host:cpu' not in dev:
assert False, 'Unrecognized device name: %s' % dev
for node_stat in dev_stat.node_stats:
nname = node_stat.node_name
if nname.find(':') > 0:
nname = nname[:nname.find(':')]
if nname == node_name:
ret[dev].append(node_stat)
return ret
def _run_model():
x = random_ops.random_normal(shape=[1, SIZE])
w = random_ops.random_normal(shape=[SIZE, 2 * SIZE])
y = math_ops.matmul(x, w)
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
with session.Session(config=config) as sess:
run_metadata = config_pb2.RunMetadata()
opts = builder.time_and_memory()
opts['min_micros'] = 0
opts['min_bytes'] = 0
opts['order_by'] = 'name'
opts['output'] = 'none'
_ = sess.run(y,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_metadata)
tfprof_node = model_analyzer.profile(
sess.graph,
run_meta=run_metadata,
options=opts)
return tfprof_node, run_metadata
def _run_loop_model():
config = config_pb2.ConfigProto()
# Grappler might fuse MatMul with BiasAdd in remapper optimizer.
config.graph_options.rewrite_options.remapping = (
rewriter_config_pb2.RewriterConfig.OFF)
with session.Session(config=config) as sess:
x = lib.BuildFullModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
opts = builder.time_and_memory()
opts['order_by'] = 'name'
opts['output'] = 'none'
tfprof_node = model_analyzer.profile(
sess.graph, run_meta, options=opts)
return tfprof_node, run_meta
class RunMetadataTest(test.TestCase):
@test_util.run_deprecated_v1
def testGPU(self):
if not test.is_gpu_available(cuda_only=True):
return
gpu_dev = test.gpu_device_name()
ops.reset_default_graph()
with ops.device(gpu_dev):
tfprof_node, run_meta = _run_model()
self.assertEqual(tfprof_node.children[0].name, 'MatMul')
self.assertGreater(tfprof_node.children[0].exec_micros, 10)
ret = _extract_node(run_meta, 'MatMul')
self.assertEqual(len(ret['gpu:0']), 1)
if not test.is_built_with_rocm():
# skip this check for the ROCm platform
# stream level tracing is not yet supported on the ROCm platform
self.assertEqual(len(ret['gpu:0/stream:all']), 1, '%s' % run_meta)
@test_util.run_deprecated_v1
def testAllocationHistory(self):
if not test.is_gpu_available(cuda_only=True):
return
gpu_dev = test.gpu_device_name()
ops.reset_default_graph()
with ops.device(gpu_dev):
_, run_meta = _run_model()
mm = _extract_node(run_meta, 'MatMul')['gpu:0'][0]
mm_allocs = mm.memory[0].allocation_records
# has allocation and deallocation.
self.assertEqual(len(mm_allocs), 2)
# first allocated.
self.assertGreater(mm_allocs[1].alloc_micros, mm_allocs[0].alloc_micros)
self.assertGreater(mm_allocs[0].alloc_bytes, 0)
# Then deallocated.
self.assertLess(mm_allocs[1].alloc_bytes, 0)
# All memory deallocated.
self.assertEqual(mm_allocs[0].alloc_bytes + mm_allocs[1].alloc_bytes, 0)
rand = _extract_node(
run_meta, 'random_normal/RandomStandardNormal')['gpu:0'][0]
random_allocs = rand.memory[0].allocation_records
# random normal must allocated first since matmul depends on it.
self.assertLess(random_allocs[0].alloc_micros, mm.all_start_micros)
# deallocates the memory after matmul started.
self.assertGreater(random_allocs[1].alloc_micros, mm.all_start_micros)
@test_util.run_deprecated_v1
def testCPU(self):
ops.reset_default_graph()
with ops.device('/cpu:0'):
tfprof_node, run_meta = _run_model()
self.assertEqual(tfprof_node.children[0].name, 'MatMul')
self.assertGreater(tfprof_node.children[0].exec_micros, 0)
ret = _extract_node(run_meta, 'MatMul')
self.assertEqual(len(ret['cpu:0']), 1)
ret = _extract_node(run_meta, 'MatMul:MatMul')
self.assertEqual(len(ret), 0)
@test_util.run_v1_only('b/120545219')
def testLoopCPU(self):
ops.reset_default_graph()
with ops.device('/cpu:0'):
tfprof_node, run_meta = _run_loop_model()
# The while-loop caused a node to appear 4 times in scheduling.
ret = _extract_node(run_meta,
'rnn/while/basic_rnn_cell/MatMul')
self.assertEqual(len(ret['cpu:0']), 4)
total_cpu_execs = 0
for node in ret['cpu:0']:
total_cpu_execs += node.op_end_rel_micros
mm_node = lib.SearchTFProfNode(
tfprof_node,
'rnn/while/basic_rnn_cell/MatMul')
self.assertEqual(mm_node.run_count, 4)
self.assertEqual(mm_node.cpu_exec_micros, total_cpu_execs)
self.assertEqual(mm_node.exec_micros, total_cpu_execs)
def testGradientGraph(self):
# Note: Please don't just adjust the test to make it pass.
# The code view logic depends on it.
ops.reset_default_graph()
_, _ = _run_loop_model()
graph = ops.get_default_graph()
forward_op = set()
backward_op = set()
back_to_forward = {}
for op in graph.get_operations():
if op.name.find('gradients/') > 0 and op.name.find('_grad/') > 0:
backward_op.add(op.name)
idx1 = op.name.find('gradients/') + 10
idx2 = op.name.find('_grad/')
back_to_forward[op.name] = op.name[idx1:idx2]
else:
forward_op.add(op.name)
for _, f in six.iteritems(back_to_forward):
self.assertTrue(f in forward_op)
def testLoopGPU(self):
if not test.is_gpu_available():
return
ops.reset_default_graph()
with ops.device('/device:GPU:0'):
_, run_meta = _run_loop_model()
# The while-loop caused a node to appear 4 times in scheduling.
ret = _extract_node(run_meta,
'rnn/while/basic_rnn_cell/MatMul')
self.assertEqual(len(ret['gpu:0']), 4, '%s' % run_meta)
total_cpu_execs = 0
for node in ret['gpu:0']:
total_cpu_execs += node.op_end_rel_micros
if not test.is_built_with_rocm():
# skip this check for the ROCm platform
# stream level tracing is not yet supported on the ROCm platform
self.assertGreaterEqual(
len(ret['gpu:0/stream:all']), 4, '%s' % run_meta)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/internal/run_metadata_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Register flops statistics for various TensorFlow operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
# List of all ops which have implemented flops statistics.
IMPLEMENTED_OPS = set([
# Unary ops
"Reciprocal", "Square", "Rsqrt", "Log", "Neg", "AssignSub", "AssignAdd",
"L2Loss", "Softmax",
# Binary ops
"Add", "Sub", "Mul", "RealDiv", "Maximum", "Minimum", "Pow", "RsqrtGrad",
"GreaterEqual", "Greater", "LessEqual", "Less", "Equal", "NotEqual",
"SquaredDifference",
# Reduction ops
"Mean", "Sum", "ArgMax", "ArgMin", "BiasAddGrad",
# Convolution and pooling
"AvgPool", "MaxPool", "AvgPoolGrad", "MaxPoolGrad", "Conv2DBackpropInput",
"Conv2DBackpropFilter",
# Other ops
"AddN",
# Ops implemented in core tensorflow:
"MatMul", "Conv2D", "DepthwiseConv2dNative", "BiasAdd", "Dilation2D",
])
def _zero_flops(graph, node):
"""Returns zero flops."""
del graph, node # graph and node are unused
return ops.OpStats("flops", 0)
def _list_product(lst):
"""Computes product of element of the list."""
result = 1
for item in lst:
result *= item
return result
################################################################################
# Unary operations
################################################################################
def _unary_op_flops(graph, node, ops_per_element=1):
"""Common code which compute flops for unary operations."""
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
return ops.OpStats("flops", in_shape.num_elements() * ops_per_element)
@ops.RegisterStatistics("Reciprocal", "flops")
def _reciprocal_flops(graph, node):
"""Compute flops for Reciprocal operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("Square", "flops")
def _square_flops(graph, node):
"""Compute flops for Square operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("Rsqrt", "flops")
def _rsqrt_flops(graph, node):
"""Compute flops for Rsqrt operation."""
# Rsqrt(x) = 1 / sqrt(x)
return _unary_op_flops(graph, node, ops_per_element=2)
@ops.RegisterStatistics("Log", "flops")
def _log_flops(graph, node):
"""Compute flops for Log operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("Neg", "flops")
def _neg_flops(graph, node):
"""Compute flops for Neg operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("AssignSub", "flops")
def _assign_sub_flops(graph, node):
"""Compute flops for AssignSub operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("AssignAdd", "flops")
def _assign_add_flops(graph, node):
"""Compute flops for AssignAdd operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("L2Loss", "flops")
def _l2_loss_flops(graph, node):
"""Compute flops for L2Loss operation."""
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
# Tensorflow uses inefficient implementation, with (3*N-1) flops:
# Optimal implementation is 2*N flops
return ops.OpStats("flops", in_shape.num_elements() * 3 - 1)
@ops.RegisterStatistics("Softmax", "flops")
def _softmax_flops(graph, node):
"""Compute flops for Softmax operation."""
# Softmax implenetation:
#
# Approximate flops breakdown:
# 2*n -- compute shifted logits
# n -- exp of shifted logits
# 2*n -- compute softmax from exp of shifted logits
return _unary_op_flops(graph, node, ops_per_element=5)
################################################################################
# Binary operations
################################################################################
def _binary_per_element_op_flops(graph, node, ops_per_element=1):
"""Common code which compute flops for binary operations."""
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
return ops.OpStats("flops", out_shape.num_elements() * ops_per_element)
@ops.RegisterStatistics("Add", "flops")
def _add_flops(graph, node):
"""Compute flops for Add operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Sub", "flops")
def _sub_flops(graph, node):
"""Compute flops for Sub operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Mul", "flops")
def _mul_flops(graph, node):
"""Compute flops for Mul operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("RealDiv", "flops")
def _real_div_flops(graph, node):
"""Compute flops for RealDiv operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Maximum", "flops")
def _maximum_flops(graph, node):
"""Compute flops for Maximum operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Minimum", "flops")
def _minimum_flops(graph, node):
"""Compute flops for Minimum operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Pow", "flops")
def _pow_flops(graph, node):
"""Compute flops for Pow operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("RsqrtGrad", "flops")
def _rsqrt_grad_flops(graph, node):
"""Compute flops for RsqrtGrad operation."""
return _binary_per_element_op_flops(graph, node, ops_per_element=4)
@ops.RegisterStatistics("GreaterEqual", "flops")
def _greater_equal_flops(graph, node):
"""Compute flops for GreaterEqual operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Greater", "flops")
def _greater_flops(graph, node):
"""Compute flops for Greater operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("LessEqual", "flops")
def _less_equal_flops(graph, node):
"""Compute flops for LessEqual operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Less", "flops")
def _less_flops(graph, node):
"""Compute flops for Less operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Equal", "flops")
def _equal_flops(graph, node):
"""Compute flops for Equal operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("NotEqual", "flops")
def _not_equal_flops(graph, node):
"""Compute flops for NotEqual operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("SquaredDifference", "flops")
def _squared_difference_flops(graph, node):
"""Compute flops for SquaredDifference operation."""
return _binary_per_element_op_flops(graph, node, ops_per_element=2)
################################################################################
# Reduction ops
################################################################################
def _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0):
"""Common code which compute flops for reduction operations."""
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
num_flops = (in_shape.num_elements() * reduce_flops
+ out_shape.num_elements() * (finalize_flops - reduce_flops))
return ops.OpStats("flops", num_flops)
@ops.RegisterStatistics("Mean", "flops")
def _mean_flops(graph, node):
"""Compute flops for Mean operation."""
# reduction - sum, finalization - divide
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=1)
@ops.RegisterStatistics("Sum", "flops")
def _sum_flops(graph, node):
"""Compute flops for Sum operation."""
# reduction - sum, no finalization
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
@ops.RegisterStatistics("ArgMax", "flops")
def _arg_max_flops(graph, node):
"""Compute flops for ArgMax operation."""
# reduction - comparison, no finalization
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
@ops.RegisterStatistics("ArgMin", "flops")
def _arg_min_flops(graph, node):
"""Compute flops for ArgMin operation."""
# reduction - comparison, no finalization
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
@ops.RegisterStatistics("BiasAddGrad", "flops")
def _bias_add_grad_flops(graph, node):
"""Compute flops for BiasAddGrad operation."""
# Implementation of BiasAddGrad, essentially it's a reduce sum and reshaping:
# So computing flops same way as for "Sum"
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
################################################################################
# Convolution and pooling
# Note: all flops statistics are implemented only for NHWC data format
################################################################################
def _verify_conv_data_format(node):
"""Verifies data format for pooling and convolutional operations."""
# TODO(xpan): P1: Support NCHW
if node.attr["data_format"].s != b"NHWC":
raise ValueError("Only NHWC format is supported in flops computations")
def _pool_flops(graph, node):
"""Common code which compute flops for pooling operations."""
# compute flops for average and max pooling
_verify_conv_data_format(node)
#
# Pooling declaration:
# Inputs:
# - value
# Outputs:
# - output
# Attributes:
# - ksize
# - strides
# - padding
# - data_format
#
# Pooling implenetation:
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
kernel_shape = list(node.attr["ksize"].list.i)
kernel_area = _list_product(kernel_shape)
return ops.OpStats("flops", kernel_area * out_shape.num_elements())
@ops.RegisterStatistics("AvgPool", "flops")
def _avg_pool_flops(graph, node):
"""Compute flops for AvgPool operation."""
return _pool_flops(graph, node)
@ops.RegisterStatistics("MaxPool", "flops")
def _max_pool_flops(graph, node):
"""Compute flops for MaxPool operation."""
return _pool_flops(graph, node)
@ops.RegisterStatistics("AvgPoolGrad", "flops")
def _avg_pool_grad_flops(graph, node):
"""Compute flops for AvgPoolGrad operation."""
_verify_conv_data_format(node)
# Pooling gradient implementation:
out_backprop_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
out_backprop_shape.assert_is_fully_defined()
kernel_shape = list(node.attr["ksize"].list.i)
kernel_area = _list_product(kernel_shape)
# TensorFlow multiply each element of pooling window by coefficient,
# then sum up all of them, thus we have 2 flops per element:
# More optimal implementation - if division is done after.
return ops.OpStats("flops",
kernel_area * out_backprop_shape.num_elements() * 2)
@ops.RegisterStatistics("MaxPoolGrad", "flops")
def _max_pool_grad_flops(graph, node):
"""Compute flops for MaxPoolGrad operation."""
_verify_conv_data_format(node)
#
# MaxPoolGrad declaration:
# Inputs:
# - orig_input -- original input tensor (of max_pool)
# - orig_output -- original output tensor (of max_pool)
# - grad -- gradient with respect to output of max_pool
# Outputs:
# - output -- gradient with respect to input of max_pool
# Attributes:
# - ksize
# - strides
# - padding
# - data_format
# It computes MaxPool first, then one flop per each element of original output
#
kernel_shape = list(node.attr["ksize"].list.i)
kernel_area = _list_product(kernel_shape)
orig_out_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
orig_out_shape.assert_is_fully_defined()
max_pool_ops = kernel_area * orig_out_shape.num_elements()
return ops.OpStats("flops", max_pool_ops + orig_out_shape.num_elements())
@ops.RegisterStatistics("Conv2DBackpropInput", "flops")
def _conv_2d_backprop_input_flops(graph, node):
"""Compute flops for Conv2DBackpropInput operation."""
# Formula:
# batch_size * image_x_dim * image_y_dim * kernel_x_dim * kernel_y_dim
# * input_depth * output_depth * 2 / (image_x_stride * image_x_stride)
#
# Where:
# image_x_dim, image_y_dim and input_depth --- size of input to source (no
# backprop) convolution, in other words they are sizes of backprop output.
# output_depth --- number of filters in the original convolution, thus
# depth of backprop input.
# kernel_x_dim and kernel_y_dim --- sizes of filter in spatial dimension
# image_x_stride and image_x_stride --- strides of the convolution
#
_verify_conv_data_format(node)
# out_shape = [batch_size, image_y_dim, image_x_dim, input_depth]
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
# kernel_shape = [kernel_y_dim, kernel_x_dim, input_depth, output_depth]
kernel_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
kernel_shape.assert_is_fully_defined()
# strides
strides_shape = list(node.attr["strides"].list.i)
strides_product = strides_shape[1] * strides_shape[2]
return ops.OpStats("flops",
(2 * out_shape.num_elements()
* kernel_shape.num_elements()
/ (out_shape.dims[-1].value * strides_product)))
@ops.RegisterStatistics("Conv2DBackpropFilter", "flops")
def _conv_2d_backprop_filter_flops(graph, node):
"""Compute flops for Conv2DBackpropFilter operation."""
# Formula same as for Conv2DBackpropInput:
# batch_size * image_x_dim * image_y_dim * kernel_x_dim * kernel_y_dim
# * input_depth * output_depth * 2 / (image_x_stride * image_x_stride)
#
_verify_conv_data_format(node)
# image_shape = [batch_size, image_y_dim, image_x_dim, input_depth]
image_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
image_shape.assert_is_fully_defined()
# kernel_shape = [kernel_y_dim, kernel_x_dim, input_depth, output_depth]
kernel_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
kernel_shape.assert_is_fully_defined()
# strides
strides_shape = list(node.attr["strides"].list.i)
strides_product = strides_shape[1] * strides_shape[2]
return ops.OpStats("flops",
(2 * image_shape.num_elements()
* kernel_shape.num_elements()
/ (image_shape.dims[-1].value * strides_product)))
################################################################################
# Other ops
################################################################################
@ops.RegisterStatistics("AddN", "flops")
def _add_n_flops(graph, node):
"""Compute flops for AddN operation."""
if not node.input:
return _zero_flops(graph, node)
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
return ops.OpStats("flops", in_shape.num_elements() * (len(node.input) - 1))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/internal/flops_registry.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A test lib that defines some models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python import pywrap_tensorflow as print_mdl
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import variable_scope
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import compat
def BuildSmallModel():
"""Build a small forward conv model."""
image = array_ops.zeros([2, 6, 6, 3])
_ = variable_scope.get_variable(
'ScalarW', [],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
kernel = variable_scope.get_variable(
'DW', [3, 3, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
kernel = variable_scope.get_variable(
'DW2', [2, 2, 6, 12],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(x, kernel, [1, 2, 2, 1], padding='SAME')
return x
def BuildFullModel():
"""Build the full model with conv,rnn,opt."""
seq = []
for i in range(4):
with variable_scope.variable_scope('inp_%d' % i):
seq.append(array_ops.reshape(BuildSmallModel(), [2, 1, -1]))
cell = rnn_cell.BasicRNNCell(16)
out = rnn.dynamic_rnn(
cell, array_ops.concat(seq, axis=1), dtype=dtypes.float32)[0]
target = array_ops.ones_like(out)
loss = nn_ops.l2_loss(math_ops.reduce_mean(target - out))
sgd_op = gradient_descent.GradientDescentOptimizer(1e-2)
return sgd_op.minimize(loss)
def BuildSplitableModel():
"""Build a small model that can be run partially in each step."""
image = array_ops.zeros([2, 6, 6, 3])
kernel1 = variable_scope.get_variable(
'DW', [3, 3, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
r1 = nn_ops.conv2d(image, kernel1, [1, 2, 2, 1], padding='SAME')
kernel2 = variable_scope.get_variable(
'DW2', [2, 3, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
r2 = nn_ops.conv2d(image, kernel2, [1, 2, 2, 1], padding='SAME')
r3 = r1 + r2
return r1, r2, r3
def SearchTFProfNode(node, name):
"""Search a node in the tree."""
if node.name == name:
return node
for c in node.children:
r = SearchTFProfNode(c, name)
if r: return r
return None
@contextlib.contextmanager
def ProfilerFromFile(profile_file):
"""Initialize a profiler from profile file."""
print_mdl.ProfilerFromFile(compat.as_bytes(profile_file))
profiler = model_analyzer.Profiler.__new__(model_analyzer.Profiler)
yield profiler
print_mdl.DeleteProfiler()
def CheckAndRemoveDoc(profile):
assert 'Doc:' in profile
start_pos = profile.find('Profile:')
return profile[start_pos + 9:]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/profiler/internal/model_analyzer_testlib.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu.topology import Topology
from tensorflow.python.util.tf_export import tf_export
SINGLE_CORE_ASSIGNMENT = [[[0, 0, 0]]]
def _compute_task_and_cores_to_replicas(core_assignment, topology):
"""Computes a nested dict which maps task and logical core to replicas."""
task_and_cores_to_replicas = {}
for replica in xrange(core_assignment.shape[0]):
for logical_core in xrange(core_assignment.shape[1]):
coordinates = core_assignment[replica, logical_core, :]
task_id = topology.task_ordinal_at_coordinates(coordinates)
if task_id not in task_and_cores_to_replicas:
task_and_cores_to_replicas[task_id] = {}
if logical_core not in task_and_cores_to_replicas[task_id]:
task_and_cores_to_replicas[task_id][logical_core] = set()
task_and_cores_to_replicas[task_id][logical_core].add(replica)
task_to_sorted_replica_id = {}
for task, core_to_replicas in task_and_cores_to_replicas.items():
core_to_sorted_replicas = {}
for core, replicas in core_to_replicas.items():
core_to_sorted_replicas[core] = sorted(replicas)
task_to_sorted_replica_id[task] = core_to_sorted_replicas
return task_to_sorted_replica_id
@tf_export("tpu.experimental.DeviceAssignment")
class DeviceAssignment(object):
"""Mapping from logical cores in a computation to the physical TPU topology.
Prefer to use the `DeviceAssignment.build()` helper to construct a
`DeviceAssignment`; it is easier if less flexible than constructing a
`DeviceAssignment` directly.
"""
def __init__(self, topology, core_assignment):
"""Constructs a `DeviceAssignment` object.
Args:
topology: A `Topology` object that describes the physical TPU topology.
core_assignment: A logical to physical core mapping, represented as a
rank 3 numpy array. See the description of the `core_assignment`
property for more details.
Raises:
ValueError: If `topology` is not `Topology` object.
ValueError: If `core_assignment` is not a rank 3 numpy array.
"""
if not isinstance(topology, Topology):
raise ValueError("topology must be a Topology object, got {}".format(
type(topology)))
core_assignment = np.asarray(core_assignment, dtype=np.int32)
self._topology = topology
if core_assignment.ndim != 3:
raise ValueError("core_assignment must be a rank 3 numpy array, "
"got shape {}".format(core_assignment.shape))
self._num_replicas = core_assignment.shape[0]
self._num_cores_per_replica = core_assignment.shape[1]
if core_assignment.shape[-1] != topology.mesh_rank:
raise ValueError(
"minor dimension of core_assignment must have size equal to topology "
"rank ({}), got shape {}".format(topology.mesh_rank,
core_assignment.shape))
self._core_assignment = core_assignment
self._task_and_cores_to_replicas = _compute_task_and_cores_to_replicas(
self._core_assignment, topology)
@property
def topology(self):
"""A `Topology` that describes the TPU topology."""
return self._topology
@property
def num_cores_per_replica(self):
"""The number of cores per replica."""
return self._num_cores_per_replica
@property
def num_replicas(self):
"""The number of replicas of the computation."""
return self._num_replicas
@property
def core_assignment(self):
"""The logical to physical core mapping.
Returns:
An integer numpy array of rank 3, with shape
`[num_replicas, num_cores_per_replica, topology_rank]`. Maps
(replica, logical core) pairs to physical topology coordinates.
"""
return self._core_assignment
def coordinates(self, replica, logical_core):
"""Returns the physical topology coordinates of a logical core."""
return tuple(self.core_assignment[replica, logical_core, :])
def lookup_replicas(self, task_id, logical_core):
"""Lookup replica ids by task number and logical core.
Args:
task_id: TensorFlow task number.
logical_core: An integer, identifying a logical core.
Returns:
A sorted list of the replicas that are attached to that task and
logical_core.
Raises:
ValueError: If no replica exists in the task which contains the logical
core.
"""
try:
return self._task_and_cores_to_replicas[task_id][logical_core]
except KeyError:
raise ValueError(
"Can not find any replica in task: {} contains logical_core: {} ".
format(task_id, logical_core))
def tpu_ordinal(self, replica=0, logical_core=0):
"""Returns the ordinal of the TPU device assigned to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_ordinal_at_coordinates(coordinates)
def host_device(self, replica=0, logical_core=0, job=None):
"""Returns the CPU device attached to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.cpu_device_name_at_coordinates(coordinates, job=job)
def tpu_device(self, replica=0, logical_core=0, job=None):
"""Returns the name of the TPU device assigned to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_name_at_coordinates(coordinates, job=job)
@staticmethod
def build(topology,
computation_shape=None,
computation_stride=None,
num_replicas=1):
return device_assignment(topology, computation_shape, computation_stride,
num_replicas)
def _ring_2d(height, width):
"""Ring-order of a height x width mesh.
For example, in a 4x4 mesh, this returns the following order.
0 -- 1 -- 2 -- 3
| | | |
15-- 6 -- 5 -- 4
| | | |
14-- 7 -- 8 -- 9
| | | |
13-- 12-- 11-- 10
Args:
height: An integer represents the height.
width: An integer represents the width.
Returns:
A list of [y, x] pairs with ring order.
"""
if height == 1:
return [(0, i) for i in range(width)]
if width == 1:
return [(i, 0) for i in range(height)]
if height % 2 != 0:
logging.warning("Odd dimension")
return [(i % height, i // height) for i in range(width * height)]
ret = [(0, 0)]
for i in range(height // 2):
for j in range(1, width):
ret.append((2 * i, j))
for j in range(width - 1, 0, -1):
ret.append((2 * i + 1, j))
for i in range(height - 1, 0, -1):
ret.append((i, 0))
return ret
def device_assignment(topology,
computation_shape=None,
computation_stride=None,
num_replicas=1):
"""Computes a device_assignment of a computation across a TPU topology.
Attempts to choose a compact grid of cores for locality.
Returns a `DeviceAssignment` that describes the cores in the topology assigned
to each core of each replica.
`computation_shape` and `computation_stride` values should be powers of 2 for
optimal packing.
Args:
topology: A `Topology` object that describes the TPU cluster topology.
To obtain a TPU topology, evaluate the `Tensor` returned by
`initialize_system` using `Session.run`. Either a serialized
`TopologyProto` or a `Topology` object may be passed. Note: you must
evaluate the `Tensor` first; you cannot pass an unevaluated `Tensor` here.
computation_shape: A rank 1 int32 numpy array with size equal to the
topology rank, describing the shape of the computation's block of cores.
If None, the `computation_shape` is `[1] * topology_rank`.
computation_stride: A rank 1 int32 numpy array of size `topology_rank`,
describing the inter-core spacing of the `computation_shape` cores in the
TPU topology. If None, the `computation_stride` is `[1] * topology_rank`.
num_replicas: The number of computation replicas to run. The replicas will
be packed into the free spaces of the topology.
Returns:
A DeviceAssignment object, which describes the mapping between the logical
cores in each computation replica and the physical cores in the TPU
topology.
Raises:
ValueError: If `topology` is not a valid `Topology` object.
ValueError: If `computation_shape` or `computation_stride` are not 1D int32
numpy arrays with shape [3] where all values are positive.
ValueError: If computation's replicas cannot fit into the TPU topology.
"""
# Deserialize the Topology proto, if it is a string.
if isinstance(topology, bytes):
topology = Topology(serialized=topology)
if not isinstance(topology, Topology):
raise ValueError("`topology` is not a Topology object; got {}".format(
type(topology)))
topology_rank = len(topology.mesh_shape)
mesh_shape = topology.mesh_shape
if computation_shape is None:
computation_shape = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_shape = np.asarray(computation_shape, dtype=np.int32)
if computation_stride is None:
computation_stride = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_stride = np.asarray(computation_stride, dtype=np.int32)
if computation_shape.shape != (topology_rank,):
raise ValueError("computation_shape must have shape [{}]; got {}".format(
topology_rank, computation_shape.shape))
if computation_stride.shape != (topology_rank,):
raise ValueError("computation_stride must have shape [{}]; got {}".format(
topology_rank, computation_stride.shape))
if any(computation_shape < 1):
raise ValueError(
"computation_shape must be positive; got computation_shape={}".format(
computation_shape))
if any(computation_stride < 1):
raise ValueError(
"computation_stride must be positive; got computation_stride={}".format(
computation_stride))
# Computes the physical size of one computation instance.
computation_footprint = computation_shape * computation_stride
if any(computation_footprint > mesh_shape):
raise ValueError(
"computation footprint {} does not fit in TPU topology shape {}".format(
computation_footprint, mesh_shape))
# Computes how many copies of the computation footprint fit in the mesh.
block_counts = mesh_shape // computation_footprint
replica_counts = block_counts * computation_stride
max_replicas = np.prod(replica_counts)
if num_replicas > max_replicas:
raise ValueError(
"requested {} replicas but only {} replicas with shape {} and "
"computation_stride {} fit in a TPU mesh of shape {}".format(
num_replicas, max_replicas, computation_shape, computation_stride,
mesh_shape))
def ceil_of_ratio(n, m):
return (n + m - 1) // m
replica_shape = [0] * topology_rank
if num_replicas > 0:
remaining_replicas = num_replicas
remaining_dims = topology_rank
# Choose dimensions as close to an equal cube as possible, in order of
# increasing dimension size. By visiting dimensions in increasing size, we
# assign the most constrained dimension first, so we won't make infeasible
# choices.
#
# As a secondary sort order, visit the dimensions in reverse order. This
# means we try to use both cores on the same chip in preference to two cores
# on different chips.
for x, ni in sorted(((x, -i) for (i, x) in enumerate(replica_counts))):
i = -ni
target_size = int(math.ceil(remaining_replicas**(1.0 / remaining_dims)))
replica_shape[i] = min(target_size, x)
remaining_replicas = ceil_of_ratio(remaining_replicas, replica_shape[i])
remaining_dims -= 1
assert remaining_replicas == 1 and remaining_dims == 0
# Assigns an offset to each replica such that no two replicas overlap.
replica_offsets = np.full([num_replicas, topology_rank], -1, dtype=np.int32)
# TODO(ylc): Revisit here when topology_rank > 3.
enable_2d_tiling = (
topology_rank == 3 and
computation_shape[-1] == 2 # Only handle 2D case.
and np.prod(computation_stride) == 1 # Ensure no stride.
and num_replicas == max_replicas) # Full replication.
logging.info("enable_2d_tiling: {}".format(enable_2d_tiling))
if enable_2d_tiling:
assignment = []
inner_ring = _ring_2d(computation_shape[0], computation_shape[1])
outer_ring = _ring_2d(replica_shape[0], replica_shape[1])
for replica in xrange(num_replicas):
outer_x, outer_y = outer_ring[replica]
per_replica_assignment = []
for index in xrange(np.prod(computation_shape)):
inner_x, inner_y = inner_ring[index // 2]
px = outer_x * computation_shape[0] + inner_x
py = outer_y * computation_shape[1] + inner_y
pz = index % 2
per_replica_assignment.append([px, py, pz])
assignment.append(per_replica_assignment)
else:
for replica in xrange(num_replicas):
# Chooses a replica number in each axis.
t = replica
pos = []
for dim in replica_shape[::-1]:
pos.append(t % dim)
t //= dim
replica_pos = np.array(pos[::-1], dtype=np.int32)
# Determines where that replica starts in each axis.
outer = replica_pos // computation_stride
inner = replica_pos % computation_stride
replica_offsets[replica, :] = outer * computation_footprint + inner
# Computes a logical core -> physical core mapping for each replica.
indices = [
np.arange(0, computation_shape[i] * computation_stride[i],
computation_stride[i]) for i in xrange(topology_rank)
]
indices = np.concatenate(
[i[..., np.newaxis] for i in np.meshgrid(*indices, indexing="ij")],
axis=-1)
indices = indices.reshape((-1, topology_rank))
assignment = indices + replica_offsets[:, np.newaxis, :]
return DeviceAssignment(topology, core_assignment=assignment)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/device_assignment.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU specific APIs to be used in conjunction with TPU Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.distribute.cluster_resolver import TPUClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import device
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import topology
from tensorflow.python.tpu import tpu
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
_INITIALIZED_TPU_SYSTEMS = {}
_LOCAL_MASTERS = ("", "local")
@tf_export("tpu.experimental.initialize_tpu_system")
def initialize_tpu_system(cluster_resolver=None):
"""Initialize the TPU devices.
Args:
cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
Returns:
The tf.tpu.Topology object for the topology of the TPU cluster.
Raises:
RuntimeError: If no TPU devices found for eager execution.
"""
job = None
if cluster_resolver is None:
# If no cluster resolver is specified, and running eagerly, execute the init
# ops in the current device scope.
if context.executing_eagerly():
curr_device = device.DeviceSpec.from_string(context.context().device_name)
if curr_device.job is not None:
job = "{}/replica:0/task:0".format(curr_device.job)
cluster_resolver = TPUClusterResolver("")
assert isinstance(cluster_resolver, TPUClusterResolver)
tpu_name = compat.as_text(cluster_resolver._tpu) # pylint: disable=protected-access
if tpu_name in _INITIALIZED_TPU_SYSTEMS:
logging.warning("TPU system %s has already been initialized. "
"Reinitializing the TPU can cause previously created "
"variables on TPU to be lost.")
logging.info("Initializing the TPU system: %s", tpu_name)
if context.executing_eagerly():
# This function looks as it is for the following non-intuitive reasons.
# tpu.initialize_system creates a dummy op whose sole purpose is to trigger
# DistributedTPURewritePass. This pass actually adds real ops that
# initialize the TPU system. Thus, we can't simply run tpu.initialize_system
# eagerly. We need to wrap it in defun and trigger the rewrite passes on it.
if tpu_name not in _LOCAL_MASTERS:
# Explicitly place the tpu.initialize_system in the first worker to
# avoid the output node match multiple devices error.
job = "{}/replica:0/task:0".format(cluster_resolver.get_job_name())
@function.defun
def _tpu_init_fn():
return tpu.initialize_system(job=job)
# The TPU_SYSTEM device must match the device used in tpu.initialize_system
# exactly, otherwise you can get errors if there are multiple TPU_SYSTEM
# devices available.
with ops.device(tpu._tpu_system_device_name(job)): # pylint: disable=protected-access
output = _tpu_init_fn()
# Clear out the eager context caches since the memory is invalid now.
logging.info("Clearing out eager caches")
context.context()._clear_caches() # pylint: disable=protected-access
serialized_topology = output.numpy()
# TODO(b/134094971): Remove this when lazy tensor copy in multi-device
# function has been implemented.
context.context().mirroring_policy = context.MIRRORING_ALL
else:
master = cluster_resolver.master()
cluster_spec = cluster_resolver.cluster_spec()
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
if cluster_spec:
session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
with ops.Graph().as_default():
with session_lib.Session(config=session_config, target=master) as sess:
serialized_topology = sess.run(tpu.initialize_system())
logging.info("Finished initializing TPU system.")
tpu_topology = topology.Topology(serialized=serialized_topology)
_INITIALIZED_TPU_SYSTEMS[tpu_name] = tpu_topology
return tpu_topology
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tpu_strategy_util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU embedding APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import math
import re
import six
from tensorflow.core.protobuf.tpu import optimization_parameters_pb2
from tensorflow.core.protobuf.tpu import tpu_embedding_configuration_pb2 as elc
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.util.tf_export import tf_export
TRAINING = elc.TPUEmbeddingConfiguration.TRAINING
INFERENCE = elc.TPUEmbeddingConfiguration.INFERENCE
# TODO(shizhiw): a more future-proof way is to have optimization_parameter such
# as AdagradParameters etc instead of learning_rate.
class TableConfig(
collections.namedtuple('TableConfig', [
'vocabulary_size', 'dimension', 'initializer', 'combiner',
'hot_id_replication', 'learning_rate', 'learning_rate_key',
'optimization_parameters',
])):
"""Embedding table configuration."""
def __new__(cls,
vocabulary_size,
dimension,
initializer=None,
combiner='mean',
hot_id_replication=False,
learning_rate=None,
learning_rate_key=None,
optimization_parameters=None):
"""Embedding table configuration.
Args:
vocabulary_size: Number of vocabulary (/rows) in the table.
dimension: The embedding dimension.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and standard
deviation `1/sqrt(dimension)`.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn', 'sum' and None are
supported, with 'mean' the default. 'sqrtn' often achieves good
accuracy, in particular with bag-of-words columns. For more information,
see `tf.nn.embedding_lookup_sparse`. None is only valid for dense rather
than sparse tensors.
hot_id_replication: If true, enables hot id replication, which can make
embedding lookups faster if there are some hot rows in the table.
learning_rate: float, static learning rate for this table. If
learning_rate and learning_rate_key are both `None`, global
static learning rate as specified in `optimization_parameters` in
`TPUEmbedding` constructor will be used. `learning_rate_key` must be
`None` if `learning_rate` is not `None.
learning_rate_key: string, use dynamic learning rate of
`learning_rates[learning_rate_key]` for this table, where
`learning_rates` is the second argument of
`generate_send_gradients_op()`. If learning_rate and learning_rate_key
are both `None`, global static learning rate as specified in
`optimization_parameters` in `TPUEmbedding` constructor will be used.
`learning_rate` must be `None` if `learning_rate_key` is not `None.
optimization_parameters: `AdagradParameters`, `AdamParameters`,
`Stochasticgradientdescentparameters`. Specifies table level optimizer.
If it's `None` global optimizer in `TPUEmbedding` constructor is used.
Returns:
`TableConfig`.
Raises:
ValueError: if `vocabulary_size` is not positive integer.
ValueError: if `dimension` is not positive integer.
ValueError: if `initializer` is specified and is not callable.
ValueError: if `combiner` is not supported.
ValueError: if `learning_rate` and `learning_rate_key` are both not
`None`.
"""
if not isinstance(vocabulary_size, int) or vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size {}.'.format(vocabulary_size))
if not isinstance(dimension, int) or dimension < 1:
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
if combiner not in ('mean', 'sum', 'sqrtn', None):
raise ValueError('Invalid combiner {}'.format(combiner))
if learning_rate is not None and learning_rate_key is not None:
raise ValueError('At most one of learning_rate and learning_rate_key '
'can be None; got {} and {}'
.format(learning_rate, learning_rate_key))
if optimization_parameters is not None:
if not isinstance(optimization_parameters, _OptimizationParameters):
raise ValueError('`optimization_parameters` must inherit from '
'`_OptimizationParameters`. '
'`type(optimization_parameters)`={}'.format(
type(optimization_parameters)))
return super(TableConfig,
cls).__new__(cls, vocabulary_size, dimension, initializer,
combiner, hot_id_replication, learning_rate,
learning_rate_key, optimization_parameters)
class FeatureConfig(
collections.namedtuple(
'FeatureConfig',
['table_id', 'max_sequence_length', 'weight_key'])):
"""Feature configuration."""
def __new__(cls,
table_id,
max_sequence_length=0,
weight_key=None):
"""Feature configuration.
Args:
table_id: Which table the feature is uses for embedding lookups.
max_sequence_length: If positive, the feature is a sequence feature with
the corresponding maximum sequence length. If the sequence is longer
than this, it will be truncated. If 0, the feature is not a sequence
feature.
weight_key: If using weights for the combiner, this key specifies which
input feature contains the weights.
Returns:
`FeatureConfig`.
Raises:
ValueError: if `max_sequence_length` non-negative.
"""
if not isinstance(max_sequence_length, int) or max_sequence_length < 0:
raise ValueError('Invalid max_sequence_length {}.'.format(
max_sequence_length))
return super(FeatureConfig, cls).__new__(cls, table_id, max_sequence_length,
weight_key)
class EnqueueData(
collections.namedtuple(
'EnqueueData',
['embedding_indices', 'sample_indices', 'aggregation_weights'])):
"""Data to be enqueued through generate_enqueue_ops()."""
def __new__(cls,
embedding_indices,
sample_indices=None,
aggregation_weights=None):
"""Data to be enqueued through generate_enqueue_ops().
Args:
embedding_indices: A rank 1 Tensors, indices into the embedding tables. It
corresponds to sp_ids.values in embedding_lookup_sparse(). Both int32
and int64 are allowed and will be converted to int32 internally.
sample_indices: A rank 2 Tensors specifying the training example to which
the corresponding embedding_indices and aggregation_weights values
belong. It corresponds to sp_ids.indices in embedding_lookup_sparse().
If it is None, we assume each embedding_indices belongs to a different
sample. Both int32 and int64 are allowed and will be converted to int32
internally.
aggregation_weights: A rank 1 Tensors containing aggregation weights.
It corresponds to sp_weights.values in embedding_lookup_sparse(). If it
is None, we assume all weights are 1. Both float32 and float64 are
allowed and will be converted to float32 internally.
Returns:
An EnqueueData tuple.
"""
return super(EnqueueData, cls).__new__(cls, embedding_indices,
sample_indices, aggregation_weights)
@staticmethod
def from_sparse_tensor(sp_tensor, weights=None):
return EnqueueData(
sp_tensor.values,
sp_tensor.indices,
aggregation_weights=weights.values if weights is not None else None)
def get_enqueue_datas_list_from_sparse_tensors_list(sp_tensors_list):
"""Convenient function for generate_enqueue_ops().
Args:
sp_tensors_list: a list of dictionary mapping from string of feature names
to SparseTensor. Each dictionary is for one TPU core. Dictionaries for the
same host should be contiguous on the list.
Returns:
enqueue_datas_list: a list of dictionary mapping from string
of feature names to EnqueueData. Each dictionary is for one
TPU core. Dictionaries for the same host should be contiguous
on the list.
"""
enqueue_datas_list = []
for sp_tensors in sp_tensors_list:
enqueue_datas = collections.OrderedDict(
(k, EnqueueData.from_sparse_tensor(v))
for k, v in six.iteritems(sp_tensors))
enqueue_datas_list.append(enqueue_datas)
return enqueue_datas_list
AdamSlotVariableNames = collections.namedtuple(
'AdamSlotVariableNames', ['m', 'v'])
AdagradSlotVariableName = collections.namedtuple(
'AdagradSlotVariableName', ['accumulator'])
AdamSlotVariables = collections.namedtuple(
'AdamSlotVariables', ['m', 'v'])
AdagradSlotVariable = collections.namedtuple(
'AdagradSlotVariable', ['accumulator'])
VariablesAndOps = collections.namedtuple(
'VariablesAndOps',
['embedding_variables_by_table', 'slot_variables_by_table',
'load_ops', 'retrieve_ops']
)
class _OptimizationParameters(object):
"""Parameters common to all optimizations."""
def __init__(self, learning_rate, use_gradient_accumulation,
clip_weight_min, clip_weight_max):
self.learning_rate = learning_rate
self.use_gradient_accumulation = use_gradient_accumulation
self.clip_weight_min = clip_weight_min
self.clip_weight_max = clip_weight_max
@tf_export(v1=['tpu.experimental.AdagradParameters'])
class AdagradParameters(_OptimizationParameters):
"""Optimization parameters for Adagrad with TPU embeddings.
Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the
`optimization_parameters` argument to set the optimizer and its parameters.
See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec`
for more details.
```
estimator = tf.estimator.tpu.TPUEstimator(
...
embedding_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
...
optimization_parameters=tf.tpu.experimental.AdagradParameters(0.1),
...))
```
"""
def __init__(self,
learning_rate,
initial_accumulator=0.1,
use_gradient_accumulation=True,
clip_weight_min=None,
clip_weight_max=None):
"""Optimization parameters for Adagrad.
Args:
learning_rate: used for updating embedding table.
initial_accumulator: initial accumulator for Adagrad.
use_gradient_accumulation: setting this to `False` makes embedding
gradients calculation less accurate but faster. Please see
`optimization_parameters.proto` for details.
for details.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
"""
super(AdagradParameters,
self).__init__(learning_rate, use_gradient_accumulation,
clip_weight_min, clip_weight_max)
if initial_accumulator <= 0:
raise ValueError('Adagrad initial_accumulator must be positive')
self.initial_accumulator = initial_accumulator
@tf_export(v1=['tpu.experimental.AdamParameters'])
class AdamParameters(_OptimizationParameters):
"""Optimization parameters for Adam with TPU embeddings.
Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the
`optimization_parameters` argument to set the optimizer and its parameters.
See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec`
for more details.
```
estimator = tf.estimator.tpu.TPUEstimator(
...
embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
...
optimization_parameters=tf.tpu.experimental.AdamParameters(0.1),
...))
```
"""
def __init__(self,
learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=1e-08,
lazy_adam=True,
sum_inside_sqrt=True,
use_gradient_accumulation=True,
clip_weight_min=None,
clip_weight_max=None):
"""Optimization parameters for Adam.
Args:
learning_rate: a floating point value. The learning rate.
beta1: A float value.
The exponential decay rate for the 1st moment estimates.
beta2: A float value.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability.
lazy_adam: Use lazy Adam instead of Adam. Lazy Adam trains faster.
Please see `optimization_parameters.proto` for details.
sum_inside_sqrt: This improves training speed. Please see
`optimization_parameters.proto` for details.
use_gradient_accumulation: setting this to `False` makes embedding
gradients calculation less accurate but faster. Please see
`optimization_parameters.proto` for details.
for details.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
"""
super(AdamParameters,
self).__init__(learning_rate, use_gradient_accumulation,
clip_weight_min, clip_weight_max)
if beta1 < 0. or beta1 >= 1.:
raise ValueError('beta1 must be between 0. and 1; got {}.'.format(beta1))
if beta2 < 0. or beta2 >= 1.:
raise ValueError('beta2 must be between 0. and 1; got {}.'.format(beta2))
if epsilon <= 0.:
raise ValueError('epsilon must be positive; got {}.'.format(epsilon))
if not use_gradient_accumulation and not lazy_adam:
raise ValueError(
'When disabling Lazy Adam, gradient accumulation must be used.')
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.lazy_adam = lazy_adam
self.sum_inside_sqrt = sum_inside_sqrt
@tf_export(v1=['tpu.experimental.StochasticGradientDescentParameters'])
class StochasticGradientDescentParameters(_OptimizationParameters):
"""Optimization parameters for stochastic gradient descent for TPU embeddings.
Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the
`optimization_parameters` argument to set the optimizer and its parameters.
See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec`
for more details.
```
estimator = tf.estimator.tpu.TPUEstimator(
...
embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
...
optimization_parameters=(
tf.tpu.experimental.StochasticGradientDescentParameters(0.1))))
```
"""
def __init__(self, learning_rate, clip_weight_min=None,
clip_weight_max=None):
"""Optimization parameters for stochastic gradient descent.
Args:
learning_rate: a floating point value. The learning rate.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
"""
super(StochasticGradientDescentParameters,
self).__init__(learning_rate, False, clip_weight_min, clip_weight_max)
DeviceConfig = collections.namedtuple('DeviceConfig',
['num_hosts', 'num_cores', 'job_name'])
class TPUEmbedding(object):
"""API for using TPU for embedding.
Example:
```
table_config_user = tpu_embedding.TableConfig(
vocabulary_size=4, dimension=2,
initializer=initializer, combiner='mean')
table_to_config_dict = {'video': table_config_video,
'user': table_config_user}
feature_to_config_dict = {'watched': tpu_embedding.FeatureConfig('video'),
'favorited': tpu_embedding.FeatureConfig('video'),
'friends': tpu_embedding.FeatureConfig('user')}
batch_size = 4
num_hosts = 1
optimization_parameters = tpu_embedding.AdagradParameters(1., 1.)
mode = tpu_embedding.TRAINING
embedding = tpu_embedding.TPUEmbedding(
table_to_config_dict, feature_to_config_dict,
batch_size, num_hosts, mode, optimization_parameters)
batch_size_per_core = embedding.batch_size_per_core
sparse_features_list = []
for host in hosts:
with ops.device(host):
for _ in range(embedding.num_cores_per_host):
sparse_features = {}
sparse_features['watched'] = sparse_tensor.SparseTensor(...)
sparse_features['favorited'] = sparse_tensor.SparseTensor(...)
sparse_features['friends'] = sparse_tensor.SparseTensor(...)
sparse_features_list.append(sparse_features)
enqueue_ops = embedding.generate_enqueue_ops(sparse_features_list)
embedding_variables_and_ops = embedding.create_variables_and_ops()
def computation():
activations = embedding.get_activations()
loss = compute_loss(activations)
base_optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=1)
cross_shard_optimizer = tpu_optimizer.CrossShardOptimizer(
base_optimizer)
train_op = cross_shard_optimizer.minimize(loss)
gradients = (
tpu_embedding_gradient.get_gradients_through_compute_gradients(
cross_shard_optimizer, loss, activations)
send_gradients_op = embedding.generate_send_gradients_op(gradients)
with ops.control_dependencies([train_op, send_gradients_op]):
loss = array_ops.identity(loss)
loss = tpu.shard(computation,
num_shards=embedding.num_cores)
with self.test_session() as sess:
sess.run(tpu.initialize_system(embedding_config=
embedding.config_proto))
sess.run(variables.global_variables_initializer())
sess.run(embedding_variables_and_ops.load_ops())
sess.run(enqueue_ops)
loss_val = sess.run(loss)
```
"""
# TODO(shizhiw): Consider addign a field to FeatureConfig that indicates that
# the feature should not be used to update embedding table (cr/204852758,
# cr/204940540). Also, this can support different combiners for different
# features within the same table.
# TODO(shizhiw, b/118512626): Remove `batch_size` from `__init__` and move it
# to `FeatureConfig`?
# TODO(shizhiw): will it be cleaner to make `table_to_config_dict` and
# `feature_to_config_dict` lists of `TableSpec` and `FeatureSpec`
# respectively?
# TODO(shizhiw): Consider adding `input_fn` as an option to remove boilerplate
# for-loops around construction of inputs.
# `optimization_parameter` applies to all tables. If the need arises,
# we can add `optimization_parameters` to `TableConfig` to override this
# global setting.
def __init__(self,
table_to_config_dict,
feature_to_config_dict,
batch_size,
mode,
master=None,
optimization_parameters=None,
cluster_def=None,
pipeline_execution_with_tensor_core=False,
partition_strategy='div',
device_config=None):
"""API for using TPU for embedding lookups.
Args:
table_to_config_dict: A dictionary mapping from string of table name to
`TableConfig`. Table refers to an embedding table, e.g. `params`
argument to `tf.nn.embedding_lookup_sparse()`.
feature_to_config_dict: A dictionary mapping from string of feature name
to `FeatureConfig`. Feature refers to ids to lookup in embedding table,
e.g. `sp_ids` argument to `tf.nn.embedding_lookup_sparse()`.
batch_size: An `int` representing the global batch size.
mode: `TRAINING` or `INFERENCE`.
master: A `string` representing the TensorFlow master to use.
optimization_parameters: `AdagradParameters`, `AdamParameters`,
`Stochasticgradientdescentparameters`. Must be set in training unless
all tables specify their own optimizers. And it must be `None` in
inference.
cluster_def: A ClusterDef object describing the TPU cluster.
pipeline_execution_with_tensor_core: setting this to `True` makes training
faster, but trained model will be different if step N and step N+1
involve the same set of embedding IDs. Please see
`tpu_embedding_configuration.proto` for details.
partition_strategy: A string, either 'mod' or 'div', specifying how to map
the lookup id to the embedding tensor. For more information see
`tf.nn.embedding_lookup_sparse`.
device_config: A DeviceConfig instance, used when `master` and
`cluster_def` are both `None`.
Raises:
ValueError: if any input is invalid.
"""
if partition_strategy not in ('div', 'mod'):
raise ValueError(
'Invalid partition_strategy {}'.format(partition_strategy))
self._partition_strategy = partition_strategy
_validate_table_to_config_dict(table_to_config_dict)
# Avoid nondeterminism from `Dict` iteration order by using `OrderedDict`.
self._table_to_config_dict = _create_ordered_dict(table_to_config_dict)
_validate_feature_to_config_dict(table_to_config_dict,
feature_to_config_dict)
self._feature_to_config_dict = _create_ordered_dict(feature_to_config_dict)
self._table_to_features_dict, self._table_to_num_features_dict = (
_create_table_to_features_and_num_features_dicts(
self._feature_to_config_dict))
self._combiners = _create_combiners(self._table_to_config_dict,
self._table_to_features_dict)
self._batch_size = batch_size
if master is None and cluster_def is None:
if device_config is None:
raise ValueError('When master and cluster_def are both None,'
'device_config must be set but is not.')
if device_config.num_cores % device_config.num_hosts:
raise ValueError('num_hosts ({}) should divide num_cores ({}) '
'but does not.'.format(device_config.num_cores,
device_config.num_hosts))
self._num_hosts = device_config.num_hosts
self._num_cores = device_config.num_cores
self._num_cores_per_host = self._num_cores // self._num_hosts
self._hosts = [
'{}/replica:0/task:{}/device:CPU:0'.format(device_config.job_name, i)
for i in range(self._num_hosts)
]
else:
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata( # pylint: disable=protected-access
master,
cluster_def=cluster_def))
if tpu_system_metadata.num_cores == 0:
raise ValueError('TPUEmbedding needs TPUs, but master {} does not have '
'TPUs.'.format(master))
self._num_hosts = tpu_system_metadata.num_hosts
master_job_name = tpu_system_metadata_lib.master_job(master, cluster_def)
self._hosts = []
for device in tpu_system_metadata.devices:
if 'device:CPU:' in device.name and (
master_job_name is None or master_job_name in device.name):
self._hosts.append(device.name)
self._num_cores_per_host = tpu_system_metadata.num_of_cores_per_host
self._num_cores = tpu_system_metadata.num_cores
_validate_batch_size(self._batch_size, self._num_cores)
self._batch_size_per_core = self._batch_size // self._num_cores
# TODO(shizhiw): remove `mode`?
if mode == TRAINING:
_validate_optimization_parameters(optimization_parameters,
self._table_to_config_dict)
self._optimization_parameters = optimization_parameters
elif mode == INFERENCE:
if optimization_parameters is not None:
raise ValueError('`optimization_parameters` should be `None` '
'for inference mode.')
self._optimization_parameters = (
StochasticGradientDescentParameters(1.))
else:
raise ValueError('`mode` only supports {} and {}; got {}.'
.format(TRAINING, INFERENCE, mode))
self._mode = mode
# TODO(shizhiw): move `optimization_parameters` into `_optimizer_handler`
# and create special handler for inference that inherits from
# StochasticGradientDescentHandler with more user-friendly error message
# on get_slot().
self._optimizer_handler_dict = self._get_optimizer_handler_by_table()
self._pipeline_execution_with_tensor_core = (
pipeline_execution_with_tensor_core)
self._config_proto = self._create_config_proto()
@property
def hosts(self):
"""A list of device names for CPU hosts.
Returns:
A list of device names for CPU hosts.
"""
return copy.copy(self._hosts)
# TODO(shizhiw): change to num_tensor_cores_per_host to be more explicit and
# to be consistent with `tpu_embedding_configuration.proto`.
@property
def num_cores_per_host(self):
"""Number of TPU cores on a CPU host.
Returns:
Number of TPU cores on a CPU host.
"""
return self._num_cores_per_host
@property
def num_cores(self):
"""Total number of TPU cores on all hosts.
Returns:
Total number of TPU cores on all hosts.
"""
return self._num_cores
@property
def batch_size_per_core(self):
"""Batch size for each TPU core.
The sparse tensors in `sparse_features_list` to `generate_enqueue_ops`
must have batch dimension equal to this.
Returns:
Batch size for each TPU core.
"""
return self._batch_size_per_core
@property
def config_proto(self):
"""Create embedding config proto for `tpu.initialize_system()`.
Returns:
an `TPUEmbeddingConfiguration` proto describing the desired
configuration of the hardware embedding lookup tables, which
is passed to `tpu.initialize_system()`.
"""
return self._config_proto
@property
def table_to_config_dict(self):
return copy.copy(self._table_to_config_dict)
@property
def feature_to_config_dict(self):
return copy.copy(self._feature_to_config_dict)
@property
def table_to_features_dict(self):
return copy.copy(self._table_to_features_dict)
@property
def optimization_parameters(self):
return self._optimization_parameters
def _create_config_proto(self):
"""Create `TPUEmbeddingConfiguration`."""
self._learning_rate_keys = list(
set(c.learning_rate_key
for c in self._table_to_config_dict.values()
if c.learning_rate_key is not None))
config_proto = elc.TPUEmbeddingConfiguration()
for table in self._table_to_config_dict:
table_descriptor = config_proto.table_descriptor.add()
table_descriptor.name = table
table_config = self._table_to_config_dict[table]
# For small tables, we pad to the number of hosts so that at least one
# id will be assigned to each host.
table_descriptor.vocabulary_size = max(table_config.vocabulary_size,
len(self.hosts))
table_descriptor.dimension = table_config.dimension
table_descriptor.num_features = self._table_to_num_features_dict[table]
optimization_parameters = (
self._optimizer_handler_dict[table].get_optimization_parameters())
parameters = table_descriptor.optimization_parameters
if table_config.learning_rate:
parameters.learning_rate.constant = (table_config.learning_rate)
elif table_config.learning_rate_key:
parameters.learning_rate.dynamic.tag = (
self._learning_rate_keys.index(table_config.learning_rate_key))
else:
parameters.learning_rate.constant = (
optimization_parameters.learning_rate)
parameters.gradient_accumulation_status = (
optimization_parameters_pb2.GradientAccumulationStatus.ENABLED
if optimization_parameters.use_gradient_accumulation else
optimization_parameters_pb2.GradientAccumulationStatus.DISABLED)
if optimization_parameters.clip_weight_min is not None:
parameters.clipping_limits.lower.value = (
optimization_parameters.clip_weight_min)
if optimization_parameters.clip_weight_max is not None:
parameters.clipping_limits.upper.value = (
optimization_parameters.clip_weight_max)
if table_config.hot_id_replication:
parameters.hot_id_replication_configuration.status = (
optimization_parameters_pb2.HotIdReplicationConfiguration.ENABLED)
optimizer_handler = self._optimizer_handler_dict[table]
optimizer_handler.set_optimization_parameters(table_descriptor)
config_proto.mode = self._mode
config_proto.batch_size_per_tensor_core = self._batch_size_per_core
config_proto.num_hosts = self._num_hosts
config_proto.num_tensor_cores = self._num_cores
config_proto.sharding_strategy = (
elc.TPUEmbeddingConfiguration.DIV_DEFAULT
if self._partition_strategy == 'div' else
elc.TPUEmbeddingConfiguration.MOD)
config_proto.pipeline_execution_with_tensor_core = (
self._pipeline_execution_with_tensor_core)
return config_proto
def create_variables_and_ops(self, embedding_variable_name_by_table=None,
slot_variable_names_by_table=None):
"""Create embedding and slot variables, with ops to load and retrieve them.
Args:
embedding_variable_name_by_table: A dictionary mapping from string of
table name to string of embedding variable name. If `None`,
defaults from `get_default_slot_variable_names()` will be used.
slot_variable_names_by_table: A dictionary mapping from string of table
name to `AdamSlotVariableNames`, `AdagradSlotVariableNames` etc. If
`None`, defaults from `get_default_slot_variable_names()` will be used.
Returns:
`tpu_embedding.VariablesAndOps` with:
A dictionary mapping from string of table name to embedding variables,
A dictionary mapping from string of table name to AdagradSlotVariable,
AdamSlotVariables etc with slot variables,
A function which returns a list of ops to load embedding and slot
variables from TPU to CPU.
A function which returns a list of ops to retrieve embedding and slot
variables from TPU to CPU.
"""
embedding_variables_by_table = {}
slot_variables_by_table = {}
load_op_fns = []
retrieve_op_fns = []
for table in self._table_to_config_dict:
if embedding_variable_name_by_table:
embedding_variable_name = embedding_variable_name_by_table[table]
else:
embedding_variable_name = table
if slot_variable_names_by_table:
slot_variable_names = slot_variable_names_by_table[table]
else:
optimizer_handler = self._optimizer_handler_dict[table]
slot_variable_names = (
optimizer_handler.get_default_slot_variable_names(table))
device_fn = _create_device_fn(self._hosts)
with ops.device(device_fn):
table_variables = _create_partitioned_variables(
name=embedding_variable_name,
num_hosts=self._num_hosts,
vocabulary_size=self._table_to_config_dict[table].vocabulary_size,
embedding_dimension=self._table_to_config_dict[table].dimension,
initializer=self._table_to_config_dict[table].initializer,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
embedding_variables_by_table[table] = table_variables
slot_variables_for_table, load_ops_fn, retrieve_ops_fn = (
self._optimizer_handler_dict[table].create_variables_and_ops(
table, slot_variable_names, self._num_hosts,
self._table_to_config_dict[table], table_variables)
)
slot_variables_by_table[table] = slot_variables_for_table
load_op_fns.append(load_ops_fn)
retrieve_op_fns.append(retrieve_ops_fn)
def load_ops():
"""Calls and returns the load ops for each embedding table.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_ops_list = []
for load_op_fn in load_op_fns:
load_ops_list.extend(load_op_fn())
return load_ops_list
def retrieve_ops():
"""Calls and returns the retrieve ops for each embedding table.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_ops_list = []
for retrieve_op_fn in retrieve_op_fns:
retrieve_ops_list.extend(retrieve_op_fn())
return retrieve_ops_list
return VariablesAndOps(embedding_variables_by_table,
slot_variables_by_table,
load_ops, retrieve_ops)
def generate_enqueue_ops(self, enqueue_datas_list):
"""Generate enqueue ops.
Args:
enqueue_datas_list: a list of dictionary mapping from string
of feature names to EnqueueData. Each dictionary is for one
TPU core. Dictionaries for the same host should be contiguous
on the list.
Returns:
Ops to enqueue to TPU for embedding.
"""
self._validate_generate_enqueue_ops_enqueue_datas_list(enqueue_datas_list)
return [
self._generate_enqueue_op(
enqueue_datas, device_ordinal=i % self._num_cores_per_host)
for i, enqueue_datas in enumerate(enqueue_datas_list)
]
def _validate_generate_enqueue_ops_enqueue_datas_list(self,
enqueue_datas_list):
"""Validate `enqueue_datas_list`."""
feature_set = set(self._feature_to_config_dict.keys())
contiguous_device = None
for i, enqueue_datas in enumerate(enqueue_datas_list):
used_feature_set = set(enqueue_datas.keys())
# Check features are valid.
missing_feature_set = feature_set - used_feature_set
if missing_feature_set:
raise ValueError('`enqueue_datas_list[{}]` misses a feature that is '
'in `feature_to_config_dict`: {}.'.format(
i, missing_feature_set))
extra_feature_set = used_feature_set - feature_set
if extra_feature_set:
raise ValueError('`enqueue_datas_list[{}]` has a feature that is not '
'in `feature_to_config_dict`: {}.'.format(
i, extra_feature_set))
device = None
device_feature = None
for feature, enqueue_data in six.iteritems(enqueue_datas):
combiner = self._table_to_config_dict[
self._feature_to_config_dict[feature].table_id].combiner
if not isinstance(enqueue_data, EnqueueData):
raise ValueError('`enqueue_datas_list[{}]` has a feature that is '
'not mapped to `EnqueueData`. `feature`: {}'.format(
i, feature))
if enqueue_data.sample_indices is None and combiner:
raise ValueError('`enqueue_datas_list[{}]` has a feature that has '
'neither `EnqueueData` or `combiner`.'
'`feature`: {}, combiner: {}.'.format(
i, feature, combiner))
if (enqueue_data.sample_indices is not None and
enqueue_data.sample_indices.op.device !=
enqueue_data.embedding_indices.op.device):
raise ValueError(
'Device of sample_indices does not agree with '
'that of emebdding_indices for feature {}.'.format(feature))
if (enqueue_data.aggregation_weights is not None and
enqueue_data.aggregation_weights.op.device !=
enqueue_data.embedding_indices.op.device):
raise ValueError(
'Device of aggregation_weights does not agree with '
'that of emebdding_indices for feature {}.'.format(feature))
# Check all features are on the same device.
if device is None:
device = enqueue_data.embedding_indices.op.device
device_feature = feature
else:
if device != enqueue_data.embedding_indices.op.device:
raise ValueError('Devices are different between features in '
'`enqueue_datas_list[{}]`; '
'devices: {}, {}; features: {}, {}.'.format(
i, device,
enqueue_data.embedding_indices.op.device,
feature, device_feature))
if i % self._num_cores_per_host:
if device != contiguous_device:
raise ValueError('We expect the `enqueue_datas` which are on the '
'same host to be contiguous in '
'`enqueue_datas_list`, '
'`enqueue_datas_list[{}]` is on device {}, '
'but is expected to be on device {}.'.format(
i, device, contiguous_device))
else:
contiguous_device = device
def _generate_enqueue_op(self, enqueue_datas, device_ordinal):
enqueue_data0 = list(enqueue_datas.values())[0]
with ops.colocate_with(enqueue_data0.embedding_indices):
(sample_indices_list, embedding_indices_list, aggregation_weights_list,
table_ids, max_sequence_lengths) = (
self._format_for_tpu_embedding_sparse_tensor_batch(enqueue_datas))
return tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch(
sample_indices_list,
embedding_indices_list,
aggregation_weights_list,
table_ids,
device_ordinal=device_ordinal,
combiners=self._combiners,
max_sequence_lengths=max_sequence_lengths)
def _format_for_tpu_embedding_sparse_tensor_batch(self, enqueue_datas):
"""Format sparse features for `enqueue_tpu_embedding_sparse_tensor_batch()`.
Args:
enqueue_datas: a `Dict` of tensors for embedding. Can be sparse or
dense.
Returns:
Arguments for `enqueue_tpu_embedding_sparse_tensor_batch()`.
"""
(sample_indices_list, embedding_indices_list, aggregation_weights_list,
table_ids, max_sequence_lengths) = [], [], [], [], []
for table_id, table in enumerate(self._table_to_features_dict):
features = self._table_to_features_dict[table]
for feature in features:
enqueue_data = enqueue_datas[feature]
sample_indices = (
enqueue_data.sample_indices
if enqueue_data.sample_indices is not None else array_ops.zeros(
(0,), dtype=dtypes.int32))
sample_indices_list.append(sample_indices)
aggregation_weights = (
enqueue_data.aggregation_weights if
enqueue_data.aggregation_weights is not None else array_ops.zeros(
(0,), dtype=dtypes.float32))
aggregation_weights_list.append(aggregation_weights)
embedding_indices_list.append(enqueue_data.embedding_indices)
table_ids.append(table_id)
max_sequence_lengths.append(
self._feature_to_config_dict[feature].max_sequence_length)
return (sample_indices_list, embedding_indices_list,
aggregation_weights_list, table_ids, max_sequence_lengths)
def get_activations(self):
"""Get activations for features.
This should be called within `computation` that is passed to
`tpu.replicate` and friends.
Returns:
A dictionary mapping from `String` of feature name to `Tensor`
of activation.
"""
recv_activations = tpu_ops.recv_tpu_embedding_activations(
num_outputs=len(self._table_to_config_dict),
config=self._config_proto.SerializeToString())
activations = collections.OrderedDict()
for table_id, table in enumerate(self._table_to_features_dict):
features = self._table_to_features_dict[table]
num_features = self._table_to_num_features_dict[table]
feature_index = 0
table_activations = array_ops.reshape(
recv_activations[table_id],
[self.batch_size_per_core, num_features, -1])
for feature in features:
seq_length = self._feature_to_config_dict[feature].max_sequence_length
if not seq_length:
activations[feature] = table_activations[:, feature_index, :]
feature_index = feature_index + 1
else:
activations[feature] = (
table_activations[:, feature_index:(feature_index+seq_length), :])
feature_index = feature_index + seq_length
return activations
def generate_send_gradients_op(self,
feature_to_gradient_dict,
learning_rates=None):
"""Send gradient to TPU embedding.
Args:
feature_to_gradient_dict: dict mapping feature names to gradient wrt
activations.
learning_rates: dict mapping from learning rate key to dynamic learning
rate. Defaults to `None`.
Returns:
SendTPUEmbeddingGradients Op.
Raises:
RuntimeError: If `mode` is not `TRAINING`.
"""
if self._mode != TRAINING:
raise RuntimeError('Only in training mode gradients need to '
'be sent to TPU embedding; got mode {}.'
.format(self._mode))
if learning_rates is None:
learning_rates = dict()
gradients = []
for table in self._table_to_features_dict:
features = self._table_to_features_dict[table]
table_gradients = []
for feature in features:
gradient = feature_to_gradient_dict[feature]
# Expand dims for non-sequence feature to match sequence features.
if gradient.shape.ndims == 2:
gradient = array_ops.expand_dims(gradient, 1)
table_gradients.append(gradient)
interleaved_table_grads = array_ops.reshape(
array_ops.concat(table_gradients, axis=1),
[-1, array_ops.shape(table_gradients[0])[-1]])
gradients.append(interleaved_table_grads)
return tpu_ops.send_tpu_embedding_gradients(
inputs=gradients,
learning_rates=[
learning_rates[tag] for tag in self._learning_rate_keys
],
config=self.config_proto.SerializeToString())
def _get_optimizer_handler_by_table(self):
optimizer_handlers = {}
for table, table_config in self.table_to_config_dict.items():
if table_config.optimization_parameters is not None:
optimizer = table_config.optimization_parameters
else:
optimizer = self._optimization_parameters
optimizer_handlers[table] = _get_optimization_handler(optimizer)
return optimizer_handlers
def _validate_table_to_config_dict(table_to_config_dict):
"""Validate `table_to_config_dict`."""
for k, v in six.iteritems(table_to_config_dict):
if not isinstance(v, TableConfig):
raise ValueError('Value of `table_to_config_dict` must be of type '
'`TableConfig`, got {} for {}.'.format(type(v), k))
def _validate_feature_to_config_dict(table_to_config_dict,
feature_to_config_dict):
"""Validate `feature_to_config_dict`."""
used_table_set = set([feature.table_id
for feature in feature_to_config_dict.values()])
table_set = set(table_to_config_dict.keys())
unused_table_set = table_set - used_table_set
if unused_table_set:
raise ValueError('`table_to_config_dict` specifies table that is not '
'used in `feature_to_config_dict`: {}.'
.format(unused_table_set))
extra_table_set = used_table_set - table_set
if extra_table_set:
raise ValueError('`feature_to_config_dict` refers to a table that is not '
'specified in `table_to_config_dict`: {}.'
.format(extra_table_set))
def _validate_batch_size(batch_size, num_cores):
if batch_size % num_cores:
raise ValueError('`batch_size` is not a multiple of number of '
'cores. `batch_size`={}, `_num_cores`={}.'.format(
batch_size, num_cores))
def _validate_optimization_parameters(optimization_parameters,
table_to_config_dict):
"""Validate global optimization_parameters and per table optimizers.
If global optimizer is `None`, all table optimizers should be non `None`.
Args:
optimization_parameters: global optimizer provided in `TPUEmbedding`
constructor.
table_to_config_dict: A dictionary mapping from string of table name to
`TableConfig`.
"""
tbl_optimizer_missing = False
for _, table_config in table_to_config_dict.items():
if table_config.optimization_parameters is None:
tbl_optimizer_missing = True
break
if optimization_parameters:
if not isinstance(optimization_parameters, _OptimizationParameters):
raise ValueError('`optimization_parameters` must inherit from '
'`_OptimizationParameters`. '
'`type(optimization_parameters)`={}'.format(
type(optimization_parameters)))
else:
# Missing global optimization_parameters.
if tbl_optimizer_missing:
ValueError('`optimization_parameters` is missing.')
class _OptimizerHandler(object):
"""Interface class for handling optimizer specific logic."""
def __init__(self, optimization_parameters):
self._optimization_parameters = optimization_parameters
def get_optimization_parameters(self):
return self._optimization_parameters
def set_optimization_parameters(self, table_descriptor):
raise NotImplementedError()
def get_default_slot_variable_names(self, table):
raise NotImplementedError()
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
raise NotImplementedError()
class _AdagradHandler(_OptimizerHandler):
"""Handles Adagrad specific logic."""
def __init__(self, optimization_parameters):
super(_AdagradHandler, self).__init__(optimization_parameters)
self._table_to_accumulator_variables_dict = {}
def set_optimization_parameters(self, table_descriptor):
table_descriptor.optimization_parameters.adagrad.SetInParent()
def get_default_slot_variable_names(self, table):
return AdagradSlotVariableName('{}/{}'.format(table, 'Adagrad'))
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
accumulator_initializer = init_ops.constant_initializer(
self._optimization_parameters.initial_accumulator)
accumulator_variables = _create_partitioned_variables(
name=slot_variable_names.accumulator,
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=accumulator_initializer)
slot_variables = AdagradSlotVariable(accumulator_variables)
def load_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_op_list = []
for host_id, table_variable, accumulator_variable in (zip(
range(num_hosts), table_variables, accumulator_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops.load_tpu_embedding_adagrad_parameters(
parameters=table_variable,
accumulators=accumulator_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
return load_op_list
def retrieve_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_op_list = []
for host_id, table_variable, accumulator_variable in (zip(
range(num_hosts), table_variables, accumulator_variables)):
with ops.colocate_with(table_variable):
retrieved_table, retrieved_accumulator = (
tpu_ops.retrieve_tpu_embedding_adagrad_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table),
state_ops.assign(accumulator_variable, retrieved_accumulator))
retrieve_op_list.append(retrieve_parameters_op)
return retrieve_op_list
return slot_variables, load_ops_fn, retrieve_ops_fn
class _AdamHandler(_OptimizerHandler):
"""Handles Adam specific logic."""
def __init__(self, optimization_parameters):
super(_AdamHandler, self).__init__(optimization_parameters)
self._table_to_m_variables_dict = {}
self._table_to_v_variables_dict = {}
def set_optimization_parameters(self, table_descriptor):
table_descriptor.optimization_parameters.adam.beta1 = (
self._optimization_parameters.beta1)
table_descriptor.optimization_parameters.adam.beta2 = (
self._optimization_parameters.beta2)
table_descriptor.optimization_parameters.adam.epsilon = (
self._optimization_parameters.epsilon)
table_descriptor.optimization_parameters.adam.use_non_lazy_adam = (
not self._optimization_parameters.lazy_adam)
table_descriptor.optimization_parameters.adam.use_sum_inside_sqrt = (
self._optimization_parameters.sum_inside_sqrt)
def get_default_slot_variable_names(self, table):
return AdamSlotVariableNames('{}/{}/m'.format(table, 'Adam'),
'{}/{}/v'.format(table, 'Adam'))
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
m_initializer = init_ops.zeros_initializer()
m_variables = _create_partitioned_variables(
name=slot_variable_names.m,
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=m_initializer)
v_initializer = init_ops.zeros_initializer()
v_variables = _create_partitioned_variables(
name=slot_variable_names.v,
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=v_initializer)
slot_variables = AdamSlotVariables(m_variables, v_variables)
def load_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_op_list = []
for host_id, table_variable, m_variable, v_variable in (zip(
range(num_hosts), table_variables,
m_variables, v_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops.load_tpu_embedding_adam_parameters(
parameters=table_variable,
momenta=m_variable,
velocities=v_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
return load_op_list
def retrieve_ops_fn():
"""Returns the retrieve ops for Adam embedding tables.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_op_list = []
for host_id, table_variable, m_variable, v_variable in (zip(
range(num_hosts), table_variables,
m_variables, v_variables)):
with ops.colocate_with(table_variable):
retrieved_table, retrieved_m, retrieved_v = (
tpu_ops.retrieve_tpu_embedding_adam_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table),
state_ops.assign(m_variable, retrieved_m),
state_ops.assign(v_variable, retrieved_v))
retrieve_op_list.append(retrieve_parameters_op)
return retrieve_op_list
return slot_variables, load_ops_fn, retrieve_ops_fn
class _StochasticGradientDescentHandler(_OptimizerHandler):
"""Handles stochastic gradient descent specific logic."""
def set_optimization_parameters(self, table_descriptor):
(table_descriptor.optimization_parameters.stochastic_gradient_descent
.SetInParent())
def get_default_slot_variable_names(self, table):
return None
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
del table_config
def load_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_op_list = []
for host_id, table_variable in (zip(
range(num_hosts), table_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops
.load_tpu_embedding_stochastic_gradient_descent_parameters(
parameters=table_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
return load_op_list
def retrieve_ops_fn():
"""Returns the retrieve ops for SGD embedding tables.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_op_list = []
for host_id, table_variable in (zip(
range(num_hosts), table_variables)):
with ops.colocate_with(table_variable):
retrieved_table = (
tpu_ops
.retrieve_tpu_embedding_stochastic_gradient_descent_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table))
retrieve_op_list.append(retrieve_parameters_op)
return retrieve_op_list
return None, load_ops_fn, retrieve_ops_fn
def _get_optimization_handler(optimization_parameters):
if isinstance(optimization_parameters, AdagradParameters):
return _AdagradHandler(optimization_parameters)
elif isinstance(optimization_parameters, AdamParameters):
return _AdamHandler(optimization_parameters)
elif isinstance(optimization_parameters, StochasticGradientDescentParameters):
return _StochasticGradientDescentHandler(optimization_parameters)
else:
return NotImplementedError()
def _create_ordered_dict(d):
"""Create an OrderedDict from Dict."""
return collections.OrderedDict((k, d[k]) for k in sorted(d))
def _create_combiners(table_to_config_dict, table_to_features_dict):
"""Create a per feature list of combiners, ordered by table."""
combiners = []
for table in table_to_config_dict:
combiner = table_to_config_dict[table].combiner or 'sum'
combiners.extend([combiner] * len(table_to_features_dict[table]))
return combiners
def _create_table_to_features_and_num_features_dicts(feature_to_config_dict):
"""Create mapping from table to a list of its features."""
table_to_features_dict_tmp = {}
table_to_num_features_dict_tmp = {}
for feature, feature_config in six.iteritems(feature_to_config_dict):
if feature_config.table_id in table_to_features_dict_tmp:
table_to_features_dict_tmp[feature_config.table_id].append(feature)
else:
table_to_features_dict_tmp[feature_config.table_id] = [feature]
table_to_num_features_dict_tmp[feature_config.table_id] = 0
if feature_config.max_sequence_length == 0:
table_to_num_features_dict_tmp[feature_config.table_id] = (
table_to_num_features_dict_tmp[feature_config.table_id] + 1)
else:
table_to_num_features_dict_tmp[feature_config.table_id] = (
table_to_num_features_dict_tmp[feature_config.table_id] +
feature_config.max_sequence_length)
table_to_features_dict = collections.OrderedDict()
table_to_num_features_dict = collections.OrderedDict()
for table in sorted(table_to_features_dict_tmp):
table_to_features_dict[table] = sorted(table_to_features_dict_tmp[table])
table_to_num_features_dict[table] = table_to_num_features_dict_tmp[table]
return table_to_features_dict, table_to_num_features_dict
def _create_device_fn(hosts):
"""Create device_fn() to use with _create_partitioned_variables()."""
def device_fn(op):
"""Returns the `device` for `op`."""
part_match = re.match(r'.*/part_(\d+)(/|$)', op.name)
dummy_match = re.match(r'.*dummy_(\d+).*', op.name)
if not part_match and not dummy_match:
raise RuntimeError(
'Internal Error: Expected {} to contain /part_* or dummy_*'.format(
op.name))
if part_match:
idx = int(part_match.group(1))
else:
idx = int(dummy_match.group(1))
device = hosts[idx]
logging.debug('assigning {} to {}.', op, device)
return device
return device_fn
def _create_partitioned_variables(name,
num_hosts,
vocabulary_size,
embedding_dimension,
initializer,
collections=None): # pylint: disable=redefined-outer-name
"""Creates ParitionedVariables based on `num_hosts` for `table`."""
num_slices = min(vocabulary_size, num_hosts)
var_list = list(
variable_scope.get_variable(
name,
shape=(vocabulary_size, embedding_dimension),
partitioner=partitioned_variables.fixed_size_partitioner(num_slices),
dtype=dtypes.float32,
initializer=initializer,
collections=collections,
trainable=False))
if vocabulary_size >= num_hosts:
return var_list
# For padded part, define the dummy variable to be loaded into TPU system.
for idx in range(num_hosts - vocabulary_size):
var_list.append(
variable_scope.get_variable(
'dummy_{}_{}'.format(vocabulary_size + idx, name),
shape=(1, embedding_dimension),
dtype=dtypes.float32,
initializer=initializer,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
trainable=False))
return var_list
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tpu_embedding.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""Utilities to handle tensor tracer parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import re
from tensorflow.python.platform import tf_logging as logging
TRACE_MODE_NAN_INF = 'nan-inf'
TRACE_MODE_PART_TENSOR = 'part-tensor'
TRACE_MODE_FULL_TENSOR = 'full-tensor'
TRACE_MODE_FULL_IF_NAN = 'trace-back-if-nan'
TRACE_MODE_NORM = 'norm'
TRACE_MODE_MAX_ABS = 'max-abs'
TRACE_MODE_SUMMARY = 'summary'
# summary mode to collects a finite set of signatures for each traced tensor,
# (such as norm, max, min, mean) and dumps it using tb summaries.
TRACE_MODE_FULL_TENSOR_SUMMARY = 'full_tensor_summary'
# Full tensor mode dumps the whole tensor values for the traced tensors without
# any processing on them; using tb summaries.
_FLAG_NAME_TRACE_STACK_SIZE = 'trace_stack_size'
_SUBMODE_BRIEF = 'brief'
_SUBMODE_DETAILED = 'detailed'
_FLAGS_ENV_VAR = 'TENSOR_TRACER_FLAGS'
_FLAG_SINGLE_QUOTE_PAT = re.compile(r"\s*--([^=]+)='([^']*)'")
_FLAG_DOUBLE_QUOTE_PAT = re.compile(r'\s*--([^=]+)="([^"]*)"')
_FLAG_NO_QUOTE_PAT = re.compile(r'\s*--([^=]+)=(\S*)')
_FLAG_NO_EQUAL_PAT = re.compile(r'\s*--([^=]+)\s*')
_FLAG_NAME_ENABLE = 'enable'
_FLAG_NAME_TRACE_MODE = 'trace_mode'
_FLAG_NAME_USE_COMPACT_TRACE = 'compact_trace'
_FLAG_NAME_TRACE_SCALAR_OPS = 'trace_scalar'
_FLAG_NAME_TRACE_BEFORE_OPS = 'trace_before_included_ops'
_FLAG_NAME_TRACE_AFTER_OPS = 'trace_after_included_ops'
_FLAG_NAME_SUBMODE = 'submode'
_FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS = 'include_less_interesting_ops'
_FLAG_NAME_EXCLUDED_OPNAMES = 'excluded_opnames'
_FLAG_NAME_EXCLUDED_OPTYPES = 'excluded_optypes'
_FLAG_NAME_INCLUDED_OPNAMES = 'included_opnames'
_FLAG_NAME_INCLUDED_OPTYPES = 'included_optypes'
_FLAG_NAME_INCLUDED_CORES = 'included_cores'
_FLAG_NAME_TRACE_DIR = 'trace_dir'
_FLAG_NAME_REPORT_FILE = 'report_file'
_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR = 'use_test_undeclared_outputs_dir'
_FLAG_NAME_OP_RANGE = 'op_range'
# Folder to dump the pre (before tensor tracer updates) and post graphs (after
# tensor tracer updates).
_FLAG_DUMP_BEFORE_AFTER_GRAPHS = 'dump_graphs'
_OP_RANGE_PAT = re.compile(r'(\d+):(\d+)')
_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR = 'TEST_UNDECLARED_OUTPUTS_DIR'
class TTParameters(object):
"""A class that handles the parameters of Tensor Tracer."""
def __init__(self, env=None):
if env:
self._env = env
else:
self._env = os.environ
self._validate_flag_names()
self.trace_mode = self._get_trace_mode()
self.submode = self._get_submode()
self.trace_dir = self._get_trace_dir()
self.report_file_path = self._get_report_filepath()
self.op_range = self._get_op_range()
self.excluded_opname_re_list = self._flag_value_to_re_list(
_FLAG_NAME_EXCLUDED_OPNAMES)
self.excluded_optype_re_list = self._flag_value_to_re_list(
_FLAG_NAME_EXCLUDED_OPTYPES)
self.included_opname_re_list = self._flag_value_to_re_list(
_FLAG_NAME_INCLUDED_OPNAMES)
self.included_optype_re_list = self._flag_value_to_re_list(
_FLAG_NAME_INCLUDED_OPTYPES)
self.is_conditional_trace = self._is_conditional_trace_mode()
self.trace_scalar_ops = self.is_flag_on(_FLAG_NAME_TRACE_SCALAR_OPS)
self.use_compact_trace = self.is_flag_on(_FLAG_NAME_USE_COMPACT_TRACE)
# _trace_ops_before_included and _trace_ops_after_included denotes to depth
# of tracing relative to the ops given in --included_opnames or
# --included_optypes
# For example, in the below graph
# op1 --> op2 --> op3 --> op4 --> op5
# If --included_opnames=op3 then only op3 will be traced.
# If also --trace_before_included_ops=2 (_trace_ops_before_included), then
# op1 and op2 will be traced as they are at most 2 hops apart from an
# included op. Similarly, if --trace_after_included_ops=2, then op4 and op5
# will also be traced.
self.trace_ops_before_included = self._get_flag_int_value(
_FLAG_NAME_TRACE_BEFORE_OPS, 0)
self.trace_ops_after_included = self._get_flag_int_value(
_FLAG_NAME_TRACE_AFTER_OPS, 0)
self.trace_stack_size = self._get_flag_int_value(
_FLAG_NAME_TRACE_STACK_SIZE, 1)
_, self.graph_dump_path = self.get_flag_value(
_FLAG_DUMP_BEFORE_AFTER_GRAPHS)
self.included_cores = self._flag_value_as_int_list(
_FLAG_NAME_INCLUDED_CORES)
self.include_less_interesting_ops, _ = self.get_flag_value(
_FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS)
def _is_conditional_trace_mode(self):
return self.trace_mode == TRACE_MODE_FULL_IF_NAN
def _get_report_filepath(self):
"""Sets the path of the output report file."""
found, report_file_path = self.get_flag_value(
_FLAG_NAME_REPORT_FILE)
if found and report_file_path \
and self.use_test_undeclared_outputs_dir():
if os.path.isabs(report_file_path):
raise ValueError('If use_test_undeclared_outputs_dir is set,'
'report_file_path cannot be an absolute path (%s)'
%report_file_path)
outputs_dir = self._env.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR)
report_file_path = os.path.join(outputs_dir, report_file_path)
return report_file_path
def _get_op_range(self):
"""Sets the index range of the Ops that we will consider tracing."""
found, op_range = self.get_flag_value(_FLAG_NAME_OP_RANGE)
if not found or not op_range:
op_range = (-1, -1) # this means including all ops.
return op_range
match = _OP_RANGE_PAT.match(op_range)
if not match:
op_range = (-1, -1) # this means including all ops.
return op_range
op_range = (int(match.group(1)), int(match.group(2)))
return op_range
def _get_trace_dir(self):
found, trace_dir = self.get_flag_value(_FLAG_NAME_TRACE_DIR)
if found and trace_dir \
and self.use_test_undeclared_outputs_dir():
raise ValueError('Cannot not use --%s and --%s at the same time'
%(_FLAG_NAME_TRACE_DIR,
_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR))
if self.use_test_undeclared_outputs_dir():
trace_dir = self._env.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR)
return trace_dir
def _get_trace_mode(self):
"""Checks if the given trace mode is valid."""
found, trace_mode = self.get_flag_value(_FLAG_NAME_TRACE_MODE)
if not found or not trace_mode:
trace_mode = TRACE_MODE_NORM
valid_trace_modes = [
TRACE_MODE_NAN_INF, TRACE_MODE_PART_TENSOR, TRACE_MODE_FULL_TENSOR,
TRACE_MODE_NORM, TRACE_MODE_MAX_ABS, TRACE_MODE_FULL_IF_NAN,
TRACE_MODE_SUMMARY, TRACE_MODE_FULL_TENSOR_SUMMARY
]
if trace_mode not in valid_trace_modes:
raise ValueError('Invalid trace mode "%s" given to the Tensor_Tracer.'
'Valid trace modes are: %s'%(trace_mode,
valid_trace_modes))
return trace_mode
def is_brief_mode(self):
return self.submode == _SUBMODE_BRIEF
def _get_submode(self):
"""Checks if the given submode is valid."""
found, submode = self.get_flag_value(_FLAG_NAME_SUBMODE)
if not found or not submode:
submode = _SUBMODE_DETAILED
if not submode:
return
valid_submodes = [_SUBMODE_DETAILED, _SUBMODE_BRIEF]
if submode not in valid_submodes:
raise ValueError('Invalid submode "%s" given to the Tensor_Tracer.'
'Valid submodes are: %s'%(submode,
valid_submodes))
return submode
@staticmethod
def match_next_flag(flags, pos):
"""Returns the match for the next TensorTracer flag.
Args:
flags: a string that contains the flags.
pos: where in flags to start the search.
Returns:
A pair where the first element is the regular-expression
match found and the second element indicates if the match
has a value.
"""
match = _FLAG_DOUBLE_QUOTE_PAT.match(flags, pos)
if match:
return match, True
match = _FLAG_SINGLE_QUOTE_PAT.match(flags, pos)
if match:
return match, True
match = _FLAG_NO_QUOTE_PAT.match(flags, pos)
if match:
return match, True
match = _FLAG_NO_EQUAL_PAT.match(flags, pos)
if match:
# The flag is found but is not given a value.
return match, False
# The flag is not found.
return None, False
def _validate_flag_names(self):
"""Validates if the TensorTrace flags passed are valid."""
valid_flag_names = [
_FLAG_NAME_ENABLE, _FLAG_NAME_TRACE_MODE, _FLAG_NAME_USE_COMPACT_TRACE,
_FLAG_NAME_TRACE_SCALAR_OPS, _FLAG_NAME_TRACE_BEFORE_OPS,
_FLAG_NAME_TRACE_AFTER_OPS, _FLAG_NAME_TRACE_STACK_SIZE,
_FLAG_NAME_SUBMODE, _FLAG_NAME_EXCLUDED_OPNAMES,
_FLAG_NAME_EXCLUDED_OPTYPES, _FLAG_NAME_INCLUDED_OPNAMES,
_FLAG_NAME_INCLUDED_OPTYPES, _FLAG_NAME_TRACE_DIR,
_FLAG_NAME_INCLUDED_CORES, _FLAG_NAME_REPORT_FILE,
_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR,
_FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS, _FLAG_NAME_OP_RANGE,
_FLAG_DUMP_BEFORE_AFTER_GRAPHS
]
tensor_tracer_flags = self._env.get(_FLAGS_ENV_VAR)
if not tensor_tracer_flags:
return
pos = 0
while True:
match, _ = TTParameters.match_next_flag(tensor_tracer_flags, pos)
if not match:
break
flag_name = match.group(1)
if flag_name not in valid_flag_names:
raise ValueError(
'The flag name "%s" passed via the environment variable "%s" '
'is invalid. Valid flag names are:'
'\n%s'%(flag_name, _FLAGS_ENV_VAR, valid_flag_names))
pos = match.end()
def _flag_value_as_int_list(self, wanted_flag_name):
"""Returns the integer list of a TensorTracer flag.
Args:
wanted_flag_name: the name of the flag we are looking for.
Returns:
the value of the flag.
Raises:
RuntimeError: If supposedly deadcode is reached.
"""
int_list = []
found, flag_value = self.get_flag_value(wanted_flag_name)
if found:
try:
integer_values = flag_value.split(',')
int_list = [int(int_val) for int_val in integer_values]
except ValueError:
logging.warning('Cannot convert %s to int for flag %s', int_list,
wanted_flag_name)
return int_list
def _get_flag_int_value(self, wanted_flag_name, default_value):
"""Returns the int value of a TensorTracer flag.
Args:
wanted_flag_name: the name of the flag we are looking for.
default_value: the default value for the flag, if not provided.
Returns:
the value of the flag.
Raises:
RuntimeError: If supposedly deadcode is reached.
"""
flag_int_value = default_value
found, flag_value = self.get_flag_value(wanted_flag_name)
if found:
try:
flag_int_value = int(flag_value)
except ValueError:
logging.warning('Cannot convert %s to int for flag %s' % (
flag_int_value, wanted_flag_name))
return flag_int_value
def get_flag_value(self, wanted_flag_name):
"""Returns the value of a TensorTracer flags.
Args:
wanted_flag_name: the name of the flag we are looking for.
Returns:
A pair where the first element indicates if the flag is
found and the second element is the value of the flag.
Raises:
RuntimeError: If supposedly deadcode is reached.
"""
tensor_tracer_flags = self._env.get(_FLAGS_ENV_VAR)
if not tensor_tracer_flags:
return False, None
pos = 0
while True:
match, has_value = TTParameters.match_next_flag(
tensor_tracer_flags, pos)
if not match:
return False, None
flag_name = match.group(1)
if has_value:
flag_value = match.group(2)
else:
flag_value = None
if flag_name == wanted_flag_name:
return True, flag_value
pos = match.end()
raise RuntimeError('Should not reach here.')
def _flag_value_to_re_list(self, flag_name):
"""Converts list of strings to compiled RE."""
re_list = []
found, flag_value = self.get_flag_value(flag_name)
if not found or not flag_value:
return re_list
list_of_values = flag_value.split()
for v in list_of_values:
r = re.compile(v)
re_list.append(r)
return re_list
def is_flag_on(self, flag_name):
"""Returns True if the given flag is on."""
found, flag_value = self.get_flag_value(flag_name)
if not found:
return False
if flag_value is None:
return True
# Depends on the flag value.
flag_value = flag_value.lower()
enabled = flag_value in ['1', 't', 'true', 'y', 'yes']
return enabled
def is_enabled(self):
"""Returns True if TensorTracer is enabled."""
if self.is_flag_on(_FLAG_NAME_ENABLE):
logging.info('Tensor Tracer is enabled with flags %s.' %
self._env.get(_FLAGS_ENV_VAR))
return True
else:
return False
def use_test_undeclared_outputs_dir(self):
"""Decides the output directory of the report and trace files.
Args:
None.
Returns:
True if the output files should be written to the
test-undeclared-outputs-directory defined via an
env variable.
"""
return self.is_flag_on(_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tensor_tracer_flags.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for TPU InfeedQueue methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
from tensorflow.python.tpu import tpu_feed
class InfeedTest(test.TestCase):
def testConstructor(self):
"""Tests that the constructor can be called with different arguments."""
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2)
self.assertEqual(i.number_of_tuple_elements, 2)
self.assertEqual(i.tuple_types, None)
self.assertEqual(i.tuple_shapes, None)
self.assertEqual(i.number_of_shards, None)
i = tpu_feed.InfeedQueue(
tuple_types=[dtypes.float32, dtypes.int32, dtypes.int32])
self.assertEqual(i.number_of_tuple_elements, 3)
self.assertEqual(i.tuple_types,
[dtypes.float32, dtypes.int32, dtypes.int32])
self.assertEqual(i.tuple_shapes, None)
self.assertEqual(i.number_of_shards, None)
i = tpu_feed.InfeedQueue(tuple_shapes=[[1], [2, 3]])
self.assertEqual(i.number_of_tuple_elements, 2)
self.assertEqual(i.tuple_types, None)
self.assertEqual(i.tuple_shapes, [[1], [2, 3]])
self.assertEqual(i.number_of_shards, None)
i = tpu_feed.InfeedQueue(shard_dimensions=[1, 0, 7])
self.assertEqual(i.number_of_tuple_elements, 3)
self.assertEqual(i.tuple_types, None)
self.assertEqual(i.tuple_shapes, None)
self.assertEqual([p.shard_dimension
for p in i.sharding_policies], [1, 0, 7])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue()
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(
number_of_tuple_elements=2, tuple_types=[dtypes.float32])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2, tuple_shapes=[[1]])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2, shard_dimensions=[1])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(tuple_shapes=[[1], [2, 3]], shard_dimensions=[1])
def testModification(self):
"""Tests modification of the queue post-construction."""
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2)
i.set_tuple_types([dtypes.float32, dtypes.int32])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
i.set_tuple_types([dtypes.float32, dtypes.float32])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.float32])
with self.assertRaises(ValueError):
i.set_tuple_types([dtypes.float32])
i.set_tuple_shapes([[1], [2, 3]])
self.assertEqual(i.tuple_shapes, [[1], [2, 3]])
i.set_tuple_shapes([[1, 2], [3, 4]])
self.assertEqual(i.tuple_shapes, [[1, 2], [3, 4]])
with self.assertRaises(ValueError):
i.set_tuple_shapes([[1, 2]])
i.set_number_of_shards(2)
self.assertEqual(i.number_of_shards, 2)
i.set_number_of_shards(3)
self.assertEqual(i.number_of_shards, 3)
t1 = constant_op.constant(1, dtypes.int32, shape=[6])
t2 = constant_op.constant(2.0, dtypes.float32, shape=[3, 18])
i.set_configuration_from_input_tensors([t1, t2])
self.assertEqual(i.tuple_shapes, [[6], [3, 18]])
self.assertEqual(i.tuple_types, [dtypes.int32, dtypes.float32])
i.set_configuration_from_sharded_input_tensors([[t2, t1], [t2, t1]])
self.assertEqual(i.number_of_shards, 2)
self.assertEqual(i.tuple_shapes, [[6, 18], [12]])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
i.set_shard_dimensions([1, 0])
i.set_number_of_shards(3)
with self.assertRaises(ValueError):
i.set_number_of_shards(4)
def testFreezing(self):
"""Tests freezing the queue."""
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2)
t1 = constant_op.constant(1, dtypes.int32, shape=[2])
t2 = constant_op.constant(2.0, dtypes.float32, shape=[2, 4])
i.set_configuration_from_sharded_input_tensors([[t2, t1], [t2, t1]])
self.assertEqual(i.number_of_shards, 2)
self.assertEqual(i.tuple_shapes, [[4, 4], [4]])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
self.assertEqual(i.shard_dimensions, [0, 0])
i.freeze()
i.set_number_of_shards(2)
i.set_tuple_shapes([[4, 4], [4]])
i.set_tuple_types([dtypes.float32, dtypes.int32])
i.set_shard_dimensions([0, 0])
with self.assertRaises(ValueError):
i.set_number_of_shards(1)
with self.assertRaises(ValueError):
i.set_tuple_shapes([[8, 8], [8]])
with self.assertRaises(ValueError):
i.set_tuple_types([dtypes.int32, dtypes.float32])
with self.assertRaises(ValueError):
i.set_shard_dimensions([1, 0])
self.assertEqual(i.number_of_shards, 2)
self.assertEqual(i.tuple_shapes, [[4, 4], [4]])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
self.assertEqual(i.shard_dimensions, [0, 0])
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tpu_infeed_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""A utility to trace tensor values on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import sys
import numpy as np
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.tpu import tensor_tracer_flags
from tensorflow.python.tpu import tensor_tracer_report
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import training_util
_DEVICE_TYPE_TPU = 'tpu'
_DEVICE_TYPE_CPU = 'cpu'
_TRACE_MODE_PART_TENSOR_SIZE = 3
_REASON_OUTSIDE_OP_RANGE = 'not-traced-outside-op-range'
_REASON_UNSAFE_OP = 'not-traced-unsafe-op'
_REASON_WHILELOOP_OP = 'not-traced-special-whileloop-op'
_REASON_UNSAFE_SCALAR = 'not-traced-unsafe-scalar'
_REASON_SKIP_SCALAR = 'not-traced-scalar'
_REASON_LESS_INTERESTING_OP = 'not-traced-less-interesting-op'
_REASON_DEVICE_MISMATCH = 'not-traced-device-mismatch'
_REASON_DYNAMIC_SHAPE = 'not-traced-dynamic-shape'
_REASON_SCALAR_GET_TRACED = 'traced-scalar'
_REASON_TENSOR_GET_TRACED = 'traced-tensor'
_REASON_USER_INCLUDED = 'traced-user-included'
_REASON_USER_EXCLUDED = 'not-traced-user-excluded'
_REASON_NOT_EXECUTED = 'not-traced-not-in-exec-path'
_REASON_NON_NUMERIC_TENSOR = 'not-traced-non-numeric-tensor'
_REASON_FEEDS_WHILELOOP_OP = 'not-traced-feeds-special-whileloop-op'
_OUTPUT_STREAM_ESCAPE = 'file://'
_TENSOR_TRACER_COLLECTION = 'tensor_tracer_variables'
_TRACE_FILE_NAME = 'trace.all'
_COMPACT_TRACE_FILE_PREFIX = 'compact_trace.'
_COMPACT_TRACE_ENTRY_INIT_VALUE = -1.0
_TENSOR_TRACER_STORAGE = 'tensor_tracer_storage'
_TT_SNAPSHOT = 'tensor_tracer_snapshot'
_REPLICA_ID_TAG = '#replica-id: '
_TT_SUMMARY_NORM = 'tensor_tracer_norm'
_TT_SUMMARY_MAX = 'tensor_tracer_max'
_TT_SUMMARY_MIN = 'tensor_tracer_min'
_TT_SUMMARY_MEAN = 'tensor_tracer_mean'
_TT_SUMMARY_VAR = 'tensor_tracer_var'
_TT_SUMMARY_SIZE = 'tensor_tracer_size'
_TT_SUMMARY_TAG = 'tensor_tracer_summary'
_TT_TENSORBOARD_PLUGIN_NAME = 'tensor_tracer'
_TT_HOSTCALL_KEY = 'tensor_tracer_host_call'
_TT_EVENT_FILE_SUFFIX = '.tensor_tracer'
_TT_SUMMARY_MAX_QUEUE = 100
def read_tensor_tracer_event_file(event_file):
"""Reads the event file written by tensor tracer.
Args:
event_file: Path to the event file that contains only tensor tracer events.
Returns:
An event dictionary in the form of
{step_number: {tensor_name: tensor_content}}
Raises:
ValueError: If an unexpected trace is found.
"""
event_dict = {}
for trace_event in summary_iterator.summary_iterator(event_file):
# First event is an event with file_version: "brain.Event:2"
if not trace_event.HasField('summary'):
continue
step = trace_event.step
if step not in event_dict:
event_dict[step] = {}
if len(trace_event.summary.value) != 1:
raise ValueError('Single step contains %d summary values,'
' expected 1.' % len(trace_event.summary.value))
tensor_value = trace_event.summary.value[0]
tensor_name = tensor_value.tag
real_shape = [d.size for d in tensor_value.tensor.tensor_shape.dim]
tensor_content = np.frombuffer(
tensor_value.tensor.tensor_content,
dtypes.DType(tensor_value.tensor.dtype).as_numpy_dtype()
).reshape(real_shape)
event_dict[step][tensor_name] = tensor_content
return event_dict
def tensor_tracepoint(tensor, checkpoint_name):
"""Adds a checkpoint with the given checkpoint name for the given tensor.
The tensor will be added to the list of tensors that will be traced by the
tensor tracer.
Args:
tensor: the tensor object for which the tracing is requested.
checkpoint_name: a string name for the checkpoint. This name has to be a
unique name if used within model comparison. The tensors that have the same
checkpoint identifier is compared in model comparison.
Returns:
The provided tensor.
"""
tensor.graph.get_collection(_TENSOR_TRACER_COLLECTION)
tensor.graph.add_to_collection(_TENSOR_TRACER_COLLECTION,
(tensor, checkpoint_name))
return tensor
def keras_layer_tracepoint(layer, checkpoint_name):
"""An interface for adding the tensor outputs of a keras layer.
Encapsulates tensor_tracepoint.
Args:
layer: A keras layer.
checkpoint_name: a string name for the checkpoint. This name has to be a
unique name if used within model comparison. The tensors that have the same
checkpoint identifier is compared in model comparison.
Returns:
The provided layer.
"""
try:
outputs = layer.output
if tensor_util.is_tensor(outputs):
tensor_tracepoint(outputs, '%s' % (checkpoint_name))
else:
idx = 0
for output_tensor in outputs:
if tensor_util.is_tensor(outputs):
tensor_tracepoint(output_tensor, '%s_%d' % (checkpoint_name, idx))
idx += 1
except AttributeError:
pass
except RuntimeError:
pass
return layer
def _trace_files_need_precreated(output_dir):
"""Return True if trace files must be pre-created by users."""
if not output_dir.startswith('/'):
return False
if len(output_dir) < 5:
return False
if output_dir[2] != 'n':
return False
if output_dir[3] != 's':
return False
if output_dir[1] != 'c':
return False
if output_dir[4] != '/':
return False
return True
class TensorTracer(object):
"""A software construct for tracing tensor values in a TF graph on TPU.
This utility is disabled by default. It can be enabled by setting
the TENSOR_TRACER_FLAGS env variable as:
export TENSOR_TRACER_FLAGS="--enable=1"
If it is enabled, it will trace the output tensor values of
selected Ops in the graph. It has two outputs: (1) the traces and (2)
a report. The traces are dumped to a specified local file on the TPU
host. The report is printed to the log.info of the TPU job.
By passing options via the env variable, users can change:
(1) the trace mode (e.g., detecting NaN/Inf, printing partial or
full tensor values)
(2) which Ops to be traced (via op.name or op.type)
(3) output trace file path.
"""
# The set of graphs that are rewritten by tensor tracer.
_traced_graphs = set()
@staticmethod
def is_enabled():
"""Returns True if TensorTracer is enabled."""
return tensor_tracer_flags.TTParameters().is_enabled()
@staticmethod
def check_device_type(device_type):
"""Checks if the given device type is valid."""
if device_type not in (_DEVICE_TYPE_TPU, _DEVICE_TYPE_CPU):
raise ValueError('Invalid device_type "%s"'%device_type)
@staticmethod
def check_trace_mode(device_type, trace_mode):
"""Checks if the given trace mode work on the given device type.
Args:
device_type: Device type, TPU, GPU, CPU.
trace_mode: Tensor tracer trace mode.
Raises:
ValueError: If the given trace mode is not supported for the device.
"""
if trace_mode in (tensor_tracer_flags.TRACE_MODE_SUMMARY,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY):
if device_type != _DEVICE_TYPE_TPU:
raise ValueError('Device_type "%s" is not yet supported for '
'trace mode "%s"' % (device_type, trace_mode))
@staticmethod
def loop_cond_op(op):
return op.type in ('LoopCond', 'RefLoopCond')
@staticmethod
def while_loop_op(op):
"""Returns true if op is one of the special ops of in a while loop.
Args:
op: A tf.Operation.
Returns:
True if the given op is one of [Switch, Merge, Enter, Exit,
NextIteration, LoopCond], which are all building blocks for TF while
loops.
"""
return (control_flow_util.IsLoopSwitch(op) or
control_flow_util.IsLoopMerge(op) or
control_flow_util.IsLoopEnter(op) or
control_flow_util.IsLoopExit(op) or
TensorTracer.loop_cond_op(op) or
op.type in ('RefNextIteration', 'NextIteration'))
@staticmethod
def unsafe_op(op):
"""Returns True if this op is not safe to be traced."""
if control_flow_util.IsInCond(op):
return True
# Reasons for not including following op types:
# Assign: cause incorrect result with CPU tracing.
if op.type == 'Assign':
return True
return False
@staticmethod
def device_mismatch(device_type, op):
if device_type == _DEVICE_TYPE_TPU:
# pylint: disable=protected-access
return tpu._TPU_REPLICATE_ATTR not in op.node_def.attr
# pylint: enable=protected-access
return False
@staticmethod
def unsafe_scalar_trace(op):
"""Return true if scalar output tensor from Op is not safe to be traced."""
# Tracing the following causes cycle in the graph on TPU.
if op.type in ('LoopCond', 'Enter', 'Merge', 'Const',
'Switch', 'Less', 'ReadVariableOp'):
return True
# Tracing the following will cause casting-issue
# with the norm tracing mode or other compilation issues on CPU.
if op.type in ('VarHandleOp', 'IteratorToStringHandle',
'IteratorGetNext', 'OneShotIterator',
'IteratorV2', 'MakeIterator',
'BatchDatasetV2', 'MapDataset',
'FixedLengthRecordDataset', 'TakeDataset', 'ZipDataset',
'Placeholder', 'PlaceholderWithDefault', 'StridedSlice'):
return True
return False
def _less_interesting_op(self, op):
"""Returns True if the given op is not an interesting one to be traced."""
# If flag is set to include less interesting ops, then include everything.
if self._parameters.include_less_interesting_ops:
return False
# Following ops are highly unlikey to cause bugs.
return op.type in ('Const', 'Identity', 'Cast', 'Shape')
@staticmethod
def reason(op_idx, details):
"""Returns reason why the Op at op_idx is traced or not."""
return '%d %s'%(op_idx, details)
def __init__(self):
"""Initializes a TensorTracer.
Sets the various member fields from the flags (if given) or the defaults.
"""
self._replica_id = None
self._tt_config = tensor_tracer_report.TensorTracerConfig()
self._parameters = tensor_tracer_flags.TTParameters()
self._included_op_full_names = set()
self._host_call_fn = {}
self._cache_variables = {}
def _get_all_cache_variables(self):
return self._cache_variables
def _create_or_get_tensor_values_cache(self, cache_name, graph=None,
shape=None, dtype=dtypes.float32,
num_signatures=None):
"""Creates a variable as the cache to store intermediate tensor values.
Args:
cache_name: Name to be given to the cache (an instance of tf.variable).
graph: Tensorflow graph.
shape: A list of dimensions.
dtype: Data type of created cache
Returns:
A ref to newly created or existing cache with the given dimensions.
Raises:
ValueError: If missing a parameter to create the cache.
"""
def _escape_namescopes(variable_name):
# TODO(deveci): This might cause name collisions as in "foo/bar/mytensor"
# and "foo_bar/mytensor".
return variable_name.replace('/', '_').replace(':', '_')
if cache_name not in self._cache_variables:
if graph is None:
raise ValueError('Graph must be provided at cache creation.')
if shape is None:
raise ValueError('shape must be provided at cache creation.')
graph = graph or ops.get_default_graph()
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
self._cache_variables[cache_name] = variable_scope.get_variable(
_TT_SNAPSHOT + '_' + _escape_namescopes(cache_name),
shape=shape, dtype=dtype,
initializer=init_ops.constant_initializer(
_COMPACT_TRACE_ENTRY_INIT_VALUE),
trainable=False,
use_resource=True,
collections=[_TENSOR_TRACER_STORAGE, ops.GraphKeys.LOCAL_VARIABLES])
return self._cache_variables[cache_name]
def _add_replica_id_to_graph(self):
"""Adds nodes for computing the replica ID to the graph."""
if self._tt_config.num_replicas:
with ops.control_dependencies(None):
# Uses None as dependency to run outside of TPU graph rewrites.
self._replica_id = tpu_ops.tpu_replicated_input(
list(range(self._tt_config.num_replicas)),
name='tt_replica_id')
else:
self._replica_id = 'unknown'
def _inside_op_range(self, idx):
"""Return True if the given index is inside the selected range."""
if idx < self._parameters.op_range[0]:
return False
return (self._parameters.op_range[1] < 0 or
idx <= self._parameters.op_range[1])
def _is_user_included_op(self, op):
"""Checks whether the op is included in the tensor tracer flags.
Args:
op: tf Operation
Returns:
True, if the op is included.
An op is included if:
- Its op name is given in included_opnames
- Its op type is given in included_optypes
- The op is at most _trace_ops_before_included hops before an included op
- The op is at most _trace_ops_after_included hops after an included op
"""
def _is_op_or_any_neighbor_included(op, check_before=0, check_after=0):
"""Helper function to check if op is included or not."""
if op.name in self._included_op_full_names:
return True
for opname_re in self._parameters.included_opname_re_list:
if opname_re.match(op.name):
self._included_op_full_names.add(op.name)
return True
for optype_re in self._parameters.included_optype_re_list:
if optype_re.match(op.type):
self._included_op_full_names.add(op.name)
return True
if check_after > 0:
for out_tensor in op.outputs:
for consumer in out_tensor.consumers():
if _is_op_or_any_neighbor_included(consumer, check_after - 1, 0):
self._included_op_full_names.add(op.name)
return True
if check_before > 0:
for input_tensor in op.inputs:
if _is_op_or_any_neighbor_included(input_tensor.op,
0,
check_before - 1):
self._included_op_full_names.add(op.name)
return True
return False
# check_after and check_before are swapped below, as below operation
# checks the distance from an arbitrary op to included ops.
return _is_op_or_any_neighbor_included(
op, self._parameters.trace_ops_after_included,
self._parameters.trace_ops_before_included)
def _is_user_excluded_op(self, op):
for opname_re in self._parameters.excluded_opname_re_list:
if opname_re.match(op.name):
return True
for optype_re in self._parameters.excluded_optype_re_list:
if optype_re.match(op.type):
return True
return False
def _signature_types(self):
"""Returns a dictionary holding the order of signatures in the cache for the selected trace mode."""
if self._parameters.trace_mode in set([
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_MAX_ABS]):
return {self._parameters.trace_mode: 0}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
return {_TT_SUMMARY_NORM: 0, _TT_SUMMARY_MAX: 1, _TT_SUMMARY_MIN: 2,
_TT_SUMMARY_MEAN: 3, _TT_SUMMARY_VAR: 4, _TT_SUMMARY_SIZE: 5}
return {}
def _num_signature_dimensions(self):
return len(self._signature_types())
def _use_tensor_values_cache(self):
"""Returns True if immediate tensors should be first saved to a cache."""
if self._parameters.trace_mode not in set([
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_MAX_ABS,
tensor_tracer_flags.TRACE_MODE_SUMMARY
]):
return False
if (self._parameters.trace_dir and
_trace_files_need_precreated(self._parameters.trace_dir)):
return True
return self._parameters.use_compact_trace
def _use_tensor_buffer(self):
"""Returns true if the whole tensor needs to be cached/buffered in memory."""
return (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY)
def _save_tensor_value_to_cache_op(self, cache_idx, updates):
"""Returns an op that will save the given updates to an entry in the cache.
Args:
cache_idx: The cache index of the tensor within the cache.
updates: A dictionary of the signature updates.
Returns:
Cache update operation.
"""
# state_ops.scatter_update allows updates only along the first dimension.
# Make a compact array by concantating different signatures, and update
# them all together.
sorted_update = []
signature_indices = self._signature_types()
for _, val in sorted(updates.items(),
key=lambda item: signature_indices[item[0]]):
sorted_update.append(val)
cache = self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG)
indices = constant_op.constant([cache_idx])
updates = array_ops.concat(sorted_update, axis=0)
updates = array_ops.reshape(updates, [1, self._num_signature_dimensions()])
return state_ops.scatter_update(cache, indices, updates).op
def _snapshot_tensor(self, tensor):
"""Creates a new tf.Variable and a new tf.Operation that assigns the value of the tensor to this variable.
Args:
tensor: tensor whose values will be stored in a new tf.Variable.
Returns:
An assignment operation.
"""
snapshot_variable = self._create_or_get_tensor_values_cache(
tensor.name, tensor.op.graph,
tensor.shape.as_list(), tensor.dtype)
return state_ops.assign(snapshot_variable, tensor).op
def _preprocess_traced_tensor(self, tensor):
"""Computes NAN/Norm/Max on TPUs before sending to CPU.
Args:
tensor: The tensor to be traced.
Returns:
A tensor that should be input to the trace_function.
Raises:
RuntimeError: If the trace mode is invalid.
"""
def _detect_nan_inf(tensor):
"""Trace function for detecting any NaN/Inf in the tensor."""
if tensor.dtype.is_floating:
mask = math_ops.reduce_any(
gen_math_ops.logical_or(
gen_math_ops.is_nan(tensor), gen_math_ops.is_inf(tensor)))
output_tensor = control_flow_ops.cond(mask,
lambda: constant_op.constant(1.0),
lambda: constant_op.constant(0.0))
else:
output_tensor = constant_op.constant(0.0)
# The shape has to be 1. Set it if it does not have the information.
output_tensor = array_ops.reshape(output_tensor, [1])
return output_tensor
def _compute_signature(tensor, tf_op, cast_to_f32=True):
if cast_to_f32:
tensor = math_ops.cast(tensor, dtypes.float32)
output_tensor = tf_op(tensor)
# The shape has to be 1. Set it if it does not have the information.
output_tensor = array_ops.reshape(output_tensor, [1])
return output_tensor
def _show_size(tensor):
# In order to check the size of a tensor.
# Not all sizes are known at the compile time, also, different replicas
# sometimes get different sizes of tensors.
# Collect it here to be used in merging replica data.
tsize = _compute_signature(tensor, array_ops.size, cast_to_f32=False)
# Cast to float32, so that it can be placed into same cache with other
# signatures.
return math_ops.cast(tsize, dtypes.float32)
def _show_max(tensor, cast_to_f32=True):
# returns -inf for empty tensor
return _compute_signature(tensor, math_ops.reduce_max, cast_to_f32)
def _show_min(tensor, cast_to_f32=True):
# returns inf for empty tensor
return _compute_signature(tensor, math_ops.reduce_min, cast_to_f32)
def _show_norm(tensor, cast_to_f32=True):
# returns 0 for empty tensor
return _compute_signature(tensor, linalg_ops.norm, cast_to_f32)
def _show_mean_and_variance(tensor, cast_to_f32=True):
if cast_to_f32:
tensor = math_ops.cast(tensor, dtypes.float32)
# returns nan for empty tensor
mean, var = nn_impl.moments(array_ops.reshape(tensor, [-1]), axes=[0])
# The shape has to be 1. Set it if it does not have the information.
mean = array_ops.reshape(mean, [1])
var = array_ops.reshape(var, [1])
return mean, var
def _show_max_abs(tensor):
tensor = math_ops.cast(tensor, dtypes.float32)
output_tensor = math_ops.reduce_max(math_ops.abs(tensor))
zero = constant_op.constant(0, dtypes.float32)
output_tensor = gen_math_ops.maximum(zero, output_tensor)
# The shape has to be 1. Set it if it does not have the information.
output_tensor = array_ops.reshape(output_tensor, [1])
return output_tensor
def _detect_inf_nan_producer(tensor):
"""Checks if the tensor is the first NaN/Inf tensor in the computation path."""
if tensor.op.inputs:
inp_check = [
_detect_nan_inf(inp_tensor) for inp_tensor in tensor.op.inputs
]
is_any_input_inf_nan = math_ops.add_n(inp_check)
else:
is_any_input_inf_nan = constant_op.constant(0, dtypes.bool)
is_current_tensor_inf_nan = _detect_nan_inf(tensor)
# An op is NaN/INF producer only when all inputs are nan/inf free (
# is_any_input_inf_nan = 0), and its output has nan/inf (
# is_current_tensor_inf_nan=1). Below will be 1 if op nan/inf is producer.
is_nan_producer = is_current_tensor_inf_nan - is_any_input_inf_nan
is_nan_producer = math_ops.reduce_any(is_nan_producer > 0)
return is_nan_producer
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_FULL_IF_NAN):
return {self._parameters.trace_mode: _detect_inf_nan_producer(tensor)}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NAN_INF:
return {self._parameters.trace_mode: _detect_nan_inf(tensor)}
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_PART_TENSOR):
return {self._parameters.trace_mode: tensor}
if (self._parameters.trace_mode in (
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY)):
return {self._parameters.trace_mode: tensor}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NORM:
return {self._parameters.trace_mode: _show_norm(tensor)}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_MAX_ABS:
return {self._parameters.trace_mode: _show_max_abs(tensor)}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
tensor = math_ops.cast(tensor, dtypes.float32)
tsize = _show_size(tensor)
tnorm = _show_norm(tensor, cast_to_f32=False)
tmax = _show_max(tensor, cast_to_f32=False)
tmin = _show_min(tensor, cast_to_f32=False)
tmean, tvar = _show_mean_and_variance(tensor, cast_to_f32=False)
return {_TT_SUMMARY_NORM: tnorm, _TT_SUMMARY_MAX: tmax,
_TT_SUMMARY_MIN: tmin, _TT_SUMMARY_MEAN: tmean,
_TT_SUMMARY_VAR: tvar, _TT_SUMMARY_SIZE: tsize}
raise RuntimeError(
'Tensor trace fun for %s is not yet implemented'
% self._parameters.trace_mode)
def _make_tensor_trace_fun(self, tensor_name, tensor_trace_order):
"""Makes the tensor tracing function called by outside compilation.
Args:
tensor_name: name of the tensor being traced.
tensor_trace_order: TensorTraceOrder object holding tensorname to id map.
Returns:
A function to be passed as the first argument to outside compilation.
Raises:
RuntimeError: If the trace mode is invalid.
"""
def _print_tensor(tensor_name, num_elements, tensor, output_tensor):
"""Prints a tensor value to a file.
Args:
tensor_name: name of the tensor being traced.
num_elements: number of elements to print (-1 means print all).
tensor: the tensor needs to be returned.
output_tensor: the tensor needs to be printed.
Returns:
The same tensor passed via the "tensor" argument.
Raises:
ValueError: If tensor_name is not already in
self._tensorname_idx_map.
"""
if self._parameters.is_brief_mode():
if tensor_name not in tensor_trace_order.tensorname_idx_map:
raise ValueError(
'Tensor name %s is not in the tensorname_idx_map'%tensor_name)
msg = '%d'%self._tensorname_idx_map[tensor_name]
else:
msg = '"%s"'%tensor_name
if self._parameters.trace_dir:
output_path = os.path.join(self._parameters.trace_dir, _TRACE_FILE_NAME)
output_stream = _OUTPUT_STREAM_ESCAPE + output_path
else:
output_stream = sys.stderr
return logging_ops.print_v2(msg, array_ops.shape(output_tensor),
'@', self._replica_id,
'\n', output_tensor, '\n',
summarize=num_elements,
output_stream=output_stream)
def _show_part_tensor(tensor):
"""Trace function for printing part of the tensor."""
return _print_tensor(tensor_name, _TRACE_MODE_PART_TENSOR_SIZE,
tensor, tensor)
def _show_full_tensor(tensor):
"""Trace function for printing the entire tensor."""
return _print_tensor(tensor_name, -1, tensor, tensor)
def _show_full_tensors(tensor):
"""Prints the full tensor values for the tensors that are _trace_stack_size hops away from a given tensor."""
def _get_distance_k_tensors(k_before=0):
"""Returns the tensors that are at most k_before hops away from the tensor."""
if k_before < 0:
return []
visited_tensors = {tensor: 0}
visitor_queue = [tensor]
head = 0
while head < len(visitor_queue):
current_tensor = visitor_queue[head]
head += 1
distance = visited_tensors[current_tensor]
if distance == k_before:
break
for input_tensor in current_tensor.op.inputs:
if input_tensor in visited_tensors:
continue
visitor_queue.append(input_tensor)
visited_tensors[input_tensor] = distance + 1
return visitor_queue
tensors_to_print = _get_distance_k_tensors(
self._parameters.trace_stack_size)
print_ops = [_print_tensor(t.name, -1, t, t) for t in tensors_to_print]
with ops.control_dependencies(print_ops):
return constant_op.constant(True)
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_FULL_IF_NAN):
return _show_full_tensors
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_PART_TENSOR):
return _show_part_tensor
# The input tensor has a shape of "[1]" for TRACE_MODE_NAN_INF,
# TRACE_MODE_NORM, and TRACE_MODE_MAX_ABS, as related computations are
# performed within TPUs and only their results are transferred to CPU.
# Simply, print the full tensor for these trace modes.
if self._parameters.trace_mode in (
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR,
tensor_tracer_flags.TRACE_MODE_MAX_ABS,
tensor_tracer_flags.TRACE_MODE_SUMMARY
):
return _show_full_tensor
raise RuntimeError('Tensor trace fun for %s is not yet implemented'
%self._parameters.trace_mode)
def _skip_op(self, op_id, op, ops_in_exec_path, report_handler):
"""Returns True if we should not trace Op.
Args:
op_id: Topological index of the op.
op: tf.Operation
ops_in_exec_path: Set of operations that are in the execution path.
report_handler: An instance of tensor_tracer_report.TTReportHandle.
Returns:
True if the op should not be traced, false otherwise.
"""
if TensorTracer.while_loop_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_WHILELOOP_OP))
return True
if TensorTracer.unsafe_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_UNSAFE_OP))
return True
if TensorTracer.device_mismatch(self._tt_config.device_type, op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_DEVICE_MISMATCH))
return True
if op not in ops_in_exec_path:
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_NOT_EXECUTED))
return True
if not self._inside_op_range(op_id):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_OUTSIDE_OP_RANGE))
return True
if self._less_interesting_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_LESS_INTERESTING_OP))
return True
if self._is_user_included_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))
return False
if self._is_user_excluded_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))
return True
return False
def _skip_tensor(self, op_id, out_tensor, report_handler):
"""Returns True if we should not trace out_tensor.
Args:
op_id: Topological index of the op producing tensor.
out_tensor: tf.Tensor
report_handler: An instance of tensor_tracer_report.TTReportHandle.
Returns:
True if the tensor should not be traced, false otherwise.
"""
# Skips a tensor if the tensor has a non-numeric type.
# Note: we cannot use check_ops.is_numeric_tensor(out_tensor)
# because it also excludes tensors with dtypes, bool, and
# float32_ref, which we actually want to trace.
non_numeric_tensor_types = set([dtypes.variant, dtypes.resource,
dtypes.string])
if out_tensor.dtype in non_numeric_tensor_types:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_NON_NUMERIC_TENSOR))
return True
# Skip a tensor if it feeds a special while loop op.
if [consumer for consumer in out_tensor.consumers() if
TensorTracer.while_loop_op(consumer)]:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_FEEDS_WHILELOOP_OP))
return True
if self._is_user_included_op(out_tensor.op):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))
return False
if self._is_user_excluded_op(out_tensor.op):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))
return True
if not out_tensor.get_shape().is_fully_defined():
# If trace mode is nan-inf, norm or max, then the tensor will be reduced
# to a scalar before the outside compilation call.
if self._parameters.trace_mode in (
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_MAX_ABS,
tensor_tracer_flags.TRACE_MODE_SUMMARY
):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))
return False
else:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_DYNAMIC_SHAPE))
return True
rank = len(out_tensor.shape)
if rank < 1:
# scalar
if self._parameters.trace_scalar_ops:
if TensorTracer.unsafe_scalar_trace(out_tensor.op):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_UNSAFE_SCALAR))
return True
else:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_SCALAR_GET_TRACED))
return False
else:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_SKIP_SCALAR))
return True
else:
# tensor
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))
return False
def _filter_execution_path_operations(self, operations, fetches):
"""Returns the set of ops in the execution path to compute given fetches."""
# If no fetch provided, then return all operations.
if fetches is None:
return set(operations)
# Convert to list, if a single element is provided.
if not isinstance(fetches, (list, tuple)):
fetches = [fetches]
# If a tensor is given as fetch, convert it to op.
op_fetches = []
for fetch in fetches:
if isinstance(fetch, ops.Operation):
op_fetches.append(fetch)
elif isinstance(fetch, ops.Tensor):
op_fetches.append(fetch.op)
else:
raise RuntimeError('Given fetch:%s is neither a tensor nor an op.'
%fetch)
execution_path_operations = set(op_fetches)
traverse_stack = list(op_fetches)
while True:
if not traverse_stack:
break
head_op = traverse_stack.pop()
input_ops = [tensor_input.op for tensor_input in head_op.inputs]
input_ops.extend(head_op.control_inputs)
for input_op in input_ops:
if input_op not in execution_path_operations:
# Filter out loop condition operations, tracing them causes a cycle.
# Trace only the loop-body.
if TensorTracer.loop_cond_op(input_op):
continue
execution_path_operations.add(input_op)
traverse_stack.append(input_op)
return execution_path_operations
def _determine_and_instrument_traced_tensors(self, graph_order,
ops_in_exec_path,
tensor_trace_points,
report_handler):
"""Determines the tensors to trace and instruments the trace details.
Args:
graph_order: graph_order tuple containing graph (tf.graph), operations
(list of operations), op_to_idx (op id mapping), (tensors) list of
tensors, tensor_to_idx (tensor id mapping), contains_cycle (whether
there is a cycle in the graph), topological_order_or_cycle (list of ops
in topological order or list of ops creating a cycle).
ops_in_exec_path: Set of ops in the execution path.
tensor_trace_points: Collection of programatic tensor trace points.
report_handler: An instance of tensor_tracer_report.TTReportHandle.
Returns:
List of tensors to be traced.
"""
traced_tensors = []
checkpoint_operations = set([tensor.op
for (tensor, _) in tensor_trace_points])
for op_id, op in enumerate(graph_order.operations):
if checkpoint_operations and op not in checkpoint_operations:
continue
if self._skip_op(op_id, op, ops_in_exec_path, report_handler):
continue
for i in range(len(op.outputs)):
out_tensor = op.outputs[i]
if not self._skip_tensor(op_id, out_tensor, report_handler):
traced_tensors.append(out_tensor)
return traced_tensors
def _check_trace_files(self):
"""Checks if any requirements for trace files are satisfied."""
if not self._parameters.trace_dir:
# traces will be written to stderr. No need to check trace files.
return
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
# Output files are handled by tf.summary operations, no need to precreate
# them.
return
if _trace_files_need_precreated(self._parameters.trace_dir):
for replica_id in range(0, self._tt_config.num_replicas):
trace_file_path = os.path.join(
self._parameters.trace_dir,
_COMPACT_TRACE_FILE_PREFIX) + '%d'%replica_id
if not gfile.Exists(trace_file_path):
raise RuntimeError(
'%s must be pre-created with the '
'appropriate properties.'%trace_file_path)
else:
if not gfile.Exists(self._parameters.trace_dir):
gfile.MkDir(self._parameters.trace_dir)
if not gfile.Exists(self._parameters.trace_dir):
raise RuntimeError('Failed to create %s'%self._parameters.trace_dir)
def _determine_trace_and_create_report(self, graph, ops_in_exec_path):
"""Work needs to be done prior to TPU or CPU tracing.
Args:
graph: tf.graph
ops_in_exec_path: Set of operations in the execution path.
Returns:
An instance of tensor_tracer_report.TensorTraceOrder, containing list of
tensors to be traced with their topological order information.
"""
self._check_trace_files()
graph_order = tensor_tracer_report.sort_tensors_and_ops(graph)
tensor_trace_points = graph.get_collection(_TENSOR_TRACER_COLLECTION)
report_handler = tensor_tracer_report.TTReportHandle()
traced_tensors = self._determine_and_instrument_traced_tensors(
graph_order, ops_in_exec_path, tensor_trace_points, report_handler)
tensor_trace_order = tensor_tracer_report.TensorTraceOrder(graph_order,
traced_tensors)
num_signatures = self._num_signature_dimensions()
if num_signatures:
self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG,
graph,
[len(traced_tensors),
num_signatures])
if self._parameters.trace_mode in (
tensor_tracer_flags.TRACE_MODE_SUMMARY,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY):
report_proto = report_handler.create_report_proto(self._tt_config,
self._parameters,
tensor_trace_order,
tensor_trace_points,
self._signature_types())
report_handler.write_report_proto(report_proto, self._parameters)
else:
report_handler.create_report(self._tt_config, self._parameters,
tensor_trace_order, tensor_trace_points)
return tensor_trace_order
def _create_host_call(self):
return self._parameters.trace_mode in (
tensor_tracer_flags.TRACE_MODE_SUMMARY,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY)
def _generate_flush_cache_op(self, num_replicas, on_tpu):
"""Generates an Op that will flush the cache to file.
Args:
num_replicas: total number of replicas.
on_tpu: if the graph is executed on TPU.
Returns:
The Op to flush the cache to file.
"""
def _flush_fun(cache, replica_id):
"""Flushes the cache to a file corresponding to replica_id."""
def _f(file_index):
"""Generates a func that flushes the cache to a file."""
def _print_cache():
"""Flushes the cache to a file."""
replica_str = ('%d' % file_index)
if self._parameters.trace_dir:
output_path = (os.path.join(self._parameters.trace_dir,
_COMPACT_TRACE_FILE_PREFIX)
+ replica_str)
output_stream = _OUTPUT_STREAM_ESCAPE + output_path
else:
output_stream = sys.stderr
new_step_line = _REPLICA_ID_TAG + replica_str
print_ops = []
for i in range(self._num_signature_dimensions()):
print_ops.append(logging_ops.print_v2(
new_step_line, '\n',
cache[:, i], '\n',
summarize=-1,
output_stream=output_stream))
with ops.control_dependencies(print_ops):
return constant_op.constant(0).op
return _print_cache
def _eq(file_index):
return math_ops.equal(replica_id, file_index)
flush_op_cases = {}
for i in range(num_replicas):
flush_op_cases[_eq(i)] = _f(i)
# Each replica needs to determine where to write their output.
# To do this, we check if replica_id is 0, then 1, ..., and then
# num_replicas - 1 statically; and return the corresponding static file
# name. We cannot simply set the file name in python, as replica_id is
# only known during tf runtime, and we cannot create dynamic filenames.
return control_flow_ops.case(flush_op_cases, exclusive=True)
cache = self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG)
if on_tpu:
flush_op = tpu.outside_compilation(_flush_fun,
cache.value(), self._replica_id)
else:
flush_op = _flush_fun(cache.value(), self._replica_id)
with ops.control_dependencies([flush_op]):
reset_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE,
dtype=cache.dtype,
shape=cache.shape)
assign_op = state_ops.assign(cache, reset_value).op
with ops.control_dependencies([assign_op]):
return constant_op.constant(0).op
def _flush_tensor_values_cache(self, tensor_fetches, op_fetches, on_tpu):
"""Flushes the intermediate tensor values in the graph to the cache.
Args:
tensor_fetches: list of tensor results returned by the model_fn.
op_fetches: list of ops that are returned by the model_fn, e.g., train_op.
on_tpu: if the graph is executed on TPU.
Returns:
An identical copy of tensor_fetches.
"""
# Add a dependency to op and tensor fetches to make sure that all tracing
# ops are executed before flushing trace results.
with ops.control_dependencies(op_fetches +
[tensor.op for tensor in tensor_fetches]):
flush_cache_op = self._generate_flush_cache_op(
self._tt_config.num_replicas, on_tpu)
return control_flow_ops.tuple(tensor_fetches,
control_inputs=[flush_cache_op])
def _process_tensor_fetches(self, tensor_fetches):
"""Check that tensor_fetches is not empty and have valid tensors."""
# If none or empty list.
if tensor_fetches is None:
raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be '
'None.')
if not isinstance(tensor_fetches, (list, tuple)):
tensor_fetches = [tensor_fetches]
elif not tensor_fetches:
raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be '
'empty list.')
fetches = []
for fetch in tensor_fetches:
if isinstance(fetch, ops.Tensor):
fetches.append(fetch)
else:
raise RuntimeError('Given tensor_fetch:%s is not a tensor.' % fetch)
return fetches
def _process_op_fetches(self, op_fetches):
"""Check that op_fetches have valid ops."""
if op_fetches is None:
return []
if not isinstance(op_fetches, (list, tuple)):
op_fetches = [op_fetches]
fetches = []
for fetch in op_fetches:
if isinstance(fetch, ops.Operation):
fetches.append(fetch)
elif isinstance(fetch, ops.Tensor):
fetches.append(fetch.op)
else:
logging.warning('Ignoring the given op_fetch:%s, which is not an op.' %
fetch)
return fetches
def _convert_fetches_to_input_format(self, input_fetches, current_fetches):
"""Changes current_fetches' format, so that it matches input_fetches."""
if isinstance(input_fetches, ops.Tensor):
if len(current_fetches) != 1:
raise RuntimeError('Tensor tracer input/output fetches do not match.')
return current_fetches[0]
else:
if len(current_fetches) != len(current_fetches):
raise RuntimeError('Tensor tracer input/output fetches do not match.')
elif isinstance(input_fetches, tuple):
return tuple(current_fetches)
else:
return current_fetches
def _get_op_control_flow_context(self, op):
"""Returns the control flow of the given op.
Args:
op: tf.Operation for which the control flow context is requested.
Returns:
op_control_flow_context: which the is control flow context of the given
op. If the operation type is LoopExit, returns the outer control flow
context.
"""
# pylint: disable=protected-access
op_control_flow_context = op._control_flow_context
# pylint: enable=protected-access
if control_flow_util.IsLoopExit(op):
op_control_flow_context = op_control_flow_context.outer_context
return op_control_flow_context
def _prepare_host_call_fn(self, processed_t_fetches, op_fetches):
"""Creates a host call function that will write the cache as tb summary.
Args:
processed_t_fetches: List of tensor provided to session.run.
op_fetches: List of operations provided to session.run.
Raises:
ValueError if trace_dir is not set.
"""
if self._parameters.trace_dir is None:
raise ValueError('Provide a trace_dir for tensor tracer in summary mode. '
'--trace_dir=/model/dir')
def _write_cache(step, **kwargs):
"""Writes the given caches as tensor summary.
Args:
step: Step tensor with dimension [num_cores].
**kwargs: The dictionary of tensors that needs to be written as
summaries. Key and value pairs within kwargs correspond to the tag
name, and tensor content that will be written using summary.write.
The trace_modes that use this function are:
- summary: In summary mode, kwargs includes a single (tag, content)
pair which are, _TT_SUMMARY_TAG and a tf.float32 signature_cache
variable. The dimension of the signature_cache is:
num_cores x num_traced_tensors x num_signatures.
- full_tensor_summary: kwargs will include all traced tensors. Tag
and content correspond to the name of the tensor, and its actual
content.
Returns:
A tf.Operation that needs to be executed for the host call dependencies.
"""
# TODO(deveci): Parametrize max_queue, so that flushing op can be called
# less frequently.
# Setting max_queue to 100 appears to be safe even when the number of
# iterations are much lower, as the destructor of the writer will flushes
# it.
summary_write_ops = []
with summary.create_file_writer_v2(
self._parameters.trace_dir,
filename_suffix=_TT_EVENT_FILE_SUFFIX,
max_queue=_TT_SUMMARY_MAX_QUEUE).as_default():
summary_metadata = summary_pb2.SummaryMetadata(
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name=_TT_TENSORBOARD_PLUGIN_NAME))
for key, value in kwargs.items():
summary_write_ops.append(summary.write(
_TT_SUMMARY_TAG + '/' + key, value, metadata=summary_metadata,
step=step[0]))
return control_flow_ops.group(summary_write_ops)
step = array_ops.reshape(training_util.get_or_create_global_step(), [1])
self._host_call_fn = {}
host_call_deps = op_fetches + [tensor.op for tensor in processed_t_fetches]
caches_to_write = {}
with ops.control_dependencies(host_call_deps):
all_caches = self._get_all_cache_variables()
for cache_name, cache_variable in all_caches.items():
# Increase the cache rank by 1, so that when host call concatenates
# tensors from different replicas, we can identify them with [core_id].
new_cache_shape = [1]
new_cache_shape.extend(cache_variable.shape.as_list())
cache = array_ops.reshape(cache_variable.value(), new_cache_shape)
caches_to_write[cache_name] = cache
# Add step to parameter dictionary.
caches_to_write['step'] = step
# Other options without adding step to parameter dictionary are
# * host_call_fn = (_write_cache(step, caches_to_write)) : fails as it
# considers caches_to_write as a single parameter, rather than a keyword
# parameters.
# * host_call_fn = (_write_cache(step, **caches_to_write)) : fails with
# a syntax error.
self._host_call_fn[_TT_HOSTCALL_KEY] = (_write_cache, caches_to_write)
def host_call_deps_and_fn(self):
return self._host_call_fn
def _trace_execution(self, graph,
tensor_fetches,
op_fetches=None,
on_tpu=True):
"""Commong tracing function for both CPU and TPUs.
The caller function should set device_type, num_replicas,
num_replicas_per_host, num_hosts and replica_id before calling
_trace_execution.
Args:
graph: the graph of Ops executed on the TPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
on_tpu: True if executing on TPU.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If tensor_fetches is None or empty.
"""
def _cast_unsupported_dtypes(tensor):
"""Casts tensor to a supported type."""
if tensor.dtype.__eq__(dtypes.int64):
# outside-compilation doesn't support int64 input yet.
return math_ops.cast(tensor, dtypes.int32)
if tensor.dtype.__eq__(dtypes.bfloat16) or tensor.dtype.__eq__(
dtypes.float16):
# Since host can't handle bf16, convert tensor to f32.
return math_ops.cast(tensor, dtypes.float32)
return tensor
TensorTracer.check_device_type(self._tt_config.device_type)
TensorTracer.check_trace_mode(self._tt_config.device_type,
self._parameters.trace_mode)
# Check in_tensor_fetches, and op_fetches and convert them to lists.
processed_t_fetches = self._process_tensor_fetches(tensor_fetches)
op_fetches = self._process_op_fetches(op_fetches)
all_fetches = op_fetches + [tensor.op for tensor in processed_t_fetches]
# Filter out the operations that won't be executed.
# if fetches=None, then ops_in_exec_path = set(operations)
exec_op_set = self._filter_execution_path_operations(graph.get_operations(),
all_fetches)
# Write report file, and determine the traced tensors.
tensor_trace_order = self._determine_trace_and_create_report(
graph, exec_op_set)
tensor_fetch_set = set(processed_t_fetches)
tracing_ops = []
# pylint: disable=protected-access
current_control_flow_context = graph._get_control_flow_context()
# pylint: enable=protected-access
sorted_exec_op_list = list(exec_op_set)
sorted_exec_op_list.sort(key=lambda op: op.name)
# Trace ops only if they are in the execution path.
for op in sorted_exec_op_list:
for i in range(len(op.outputs)):
out_tensor = op.outputs[i]
tensor_name = out_tensor.name
if tensor_name not in tensor_trace_order.tensorname_to_cache_idx:
continue
# Create the list of consumers before calling _preprocess_traced_tensor.
# Otherwise, adding control input below, will introduce a cycle in the
# graph.
consumers = out_tensor.consumers()
# Not all consumers may be in the exec path. Filter out the consumers
# to keep the graph simpler.
consumers = [cop for cop in consumers if cop in exec_op_set]
# If there is no consumer of the tensor, there is no need to trace it;
# unless the tensor itself is one of the fetches.
is_a_fetched_tensor = out_tensor in tensor_fetch_set
if (not consumers) and (not is_a_fetched_tensor):
continue
op_control_flow_context = self._get_op_control_flow_context(op)
# pylint: disable=protected-access
graph._set_control_flow_context(op_control_flow_context)
# pylint: enable=protected-access
processed_tensors = self._preprocess_traced_tensor(out_tensor)
if on_tpu:
for signature in processed_tensors.keys():
processed_tensors[signature] = _cast_unsupported_dtypes(
processed_tensors[signature])
if self._use_tensor_values_cache():
# Use a small cache to store the characteristics of the tensor.
cache_idx = tensor_trace_order.tensorname_to_cache_idx[tensor_name]
trace_op = self._save_tensor_value_to_cache_op(cache_idx,
processed_tensors)
elif self._use_tensor_buffer():
if len(processed_tensors) != 1:
raise RuntimeError('Multiple stats are only allowed in compact '
'mode.')
processed_out_tensor = processed_tensors.values()[0]
# Store the whole tensor in a buffer.
trace_op = self._snapshot_tensor(processed_out_tensor)
else:
def tpu_wrap_trace_fn(tensor, out_tensor_name):
"""Wraps the trace_fn with outside compilation if on TPUs."""
tensor_trace_fn = self._make_tensor_trace_fun(out_tensor_name,
tensor_trace_order)
if on_tpu:
return tpu.outside_compilation(tensor_trace_fn, tensor)
else:
return tensor_trace_fn(tensor)
def conditional_trace_fn(predicate_tensor, out_tensor, trace_fn,
out_tensor_name):
"""Creates a cond op that traces the out_tensor if predicate is satisfied."""
return control_flow_ops.cond(
predicate_tensor, lambda: trace_fn(out_tensor, out_tensor_name),
lambda: constant_op.constant(False)).op
if len(processed_tensors) != 1:
raise RuntimeError('Multiple stats are only allowed in compact '
'mode.')
# Collecting multiple statistics are only supported in the summary
# mode that uses compact format(self._use_tensor_values_cache = true).
# Non-compact mode currently allows single stat per tensor.
processed_out_tensor = six.next(six.itervalues(processed_tensors))
if self._parameters.is_conditional_trace:
trace_op = conditional_trace_fn(processed_out_tensor, out_tensor,
tpu_wrap_trace_fn, tensor_name)
elif self._parameters.included_cores:
should_print = constant_op.constant(False)
for core in self._parameters.included_cores:
should_print = gen_math_ops.logical_or(
should_print, gen_math_ops.equal(self._replica_id, core))
trace_op = conditional_trace_fn(should_print, processed_out_tensor,
tpu_wrap_trace_fn, tensor_name)
else:
trace_op = tpu_wrap_trace_fn(processed_out_tensor, tensor_name)
if is_a_fetched_tensor:
tracing_ops.append(trace_op)
continue
# Add it to all consumers, as some consumers may not be executed if they
# are in a control flow.
for consumer_op in consumers:
# pylint: disable=protected-access
consumer_op._add_control_input(trace_op)
# pylint: enable=protected-access
# pylint: disable=protected-access
graph._set_control_flow_context(current_control_flow_context)
# pylint: enable=protected-access
if tracing_ops:
# If we are tracing a fetched tensor, their dependency is stored in
# tracing_ops.
processed_t_fetches = control_flow_ops.tuple(processed_t_fetches,
control_inputs=tracing_ops)
if self._use_tensor_values_cache() or self._use_tensor_buffer():
if self._create_host_call() and on_tpu:
self._prepare_host_call_fn(processed_t_fetches, op_fetches)
else:
processed_t_fetches = self._flush_tensor_values_cache(
processed_t_fetches, op_fetches, on_tpu=on_tpu)
# processed_t_fetches is a list at this point. Convert it to the same
# format as given in tensor_fetches.
return self._convert_fetches_to_input_format(tensor_fetches,
processed_t_fetches)
def trace_tpu(self, graph,
tensor_fetches,
op_fetches=None,
num_replicas=None,
num_replicas_per_host=None,
num_hosts=None):
"""Traces the tensors generated by TPU Ops in a TF graph.
Args:
graph: the graph of Ops executed on the TPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
num_replicas: number of replicas used on the TPU.
num_replicas_per_host: number of replicas per TPU host.
num_hosts: total number of TPU hosts.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If num_replicas_per_host > 8.
RuntimeError: If tensor_fetches is None or empty.
"""
if graph in TensorTracer._traced_graphs:
logging.warning('Graph is already rewritten with tensor tracer, ignoring '
'multiple calls.')
return tensor_fetches
else:
TensorTracer._traced_graphs.add(graph)
self._tt_config.device_type = _DEVICE_TYPE_TPU
self._tt_config.num_replicas = num_replicas
self._tt_config.num_replicas_per_host = num_replicas_per_host
self._tt_config.num_hosts = num_hosts
if self._tt_config.num_replicas is not None:
if self._tt_config.num_replicas_per_host is None:
self._tt_config.num_replicas_per_host = 8
if self._tt_config.num_hosts is None:
self._tt_config.num_hosts = (
num_replicas // self._tt_config.num_replicas_per_host +
(num_replicas % self._tt_config.num_replicas_per_host > 0))
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_before_tt.pbtxt')
with graph.as_default():
self._add_replica_id_to_graph()
tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,
on_tpu=True)
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_after_tt.pbtxt')
return tensor_fetches
def trace_cpu(self, graph, tensor_fetches, op_fetches=None):
"""Traces the tensors generated by CPU Ops in a TF graph.
Args:
graph: the graph of Ops executed on the CPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If tensor_fetches is None or empty.
"""
if graph in TensorTracer._traced_graphs:
logging.warning('Graph is already rewritten with tensor tracer, ignoring '
'multiple calls.')
return tensor_fetches
else:
TensorTracer._traced_graphs.add(graph)
self._tt_config.device_type = _DEVICE_TYPE_CPU
self._tt_config.num_replicas = 1
self._tt_config.num_replicas_per_host = 1
self._tt_config.num_hosts = 1
self._replica_id = 0
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_before_tt.pbtxt')
with graph.as_default():
tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,
on_tpu=False)
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_after_tt.pbtxt')
return tensor_fetches
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tensor_tracer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the SessionRunHook for preemptible Cloud TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os
import threading
import time
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
class CloudTPUPreemptedHook(session_run_hook.SessionRunHook):
"""The SessionRunHook for preemptible Cloud TPUs.
This is an implementation of SessionRunHook for the pre-emptible Google Cloud
TPU service. It attempts to close the session if the TPU is preempted, and
exits the coordinator process if the session cannot be closed.
"""
def __init__(self, cluster):
self._cluster = cluster
def after_create_session(self, session, coord):
if tpu_cluster_resolver.is_running_in_gce():
self._tpu_poller = _TPUPollingThread(self._cluster, session)
self._tpu_poller.start()
def end(self, session):
self._tpu_poller.stop()
class _TPUPollingThread(threading.Thread):
"""A thread that polls the state of a TPU node.
When the node transitions into a TERMINAL state (PREEMPTED, TERMINATED)
that's considered as not recoverable by the underlying infrastructure,
it attempts to close the session, and exits the entire process if the
session.close() stucks.
"""
def __init__(self, cluster, session):
super(_TPUPollingThread, self).__init__()
self.daemon = True
self._running = True
self._session_closed = False
self._cluster = cluster
self._session = session
self._interval = 30
# Some of the Google API libraries are quite chatty, so disable them.
for name in ['googleapiclient.discovery', 'oauth2client.client']:
_logging.getLogger(name).setLevel(_logging.WARNING)
def stop(self):
self._running = False
self._session_closed = True
self.join()
def run(self):
if not tpu_cluster_resolver.is_running_in_gce():
logging.warning(
'TPUPollingThread is running in a non-GCE environment, exiting...')
self._running = False
return
while self._running:
response = self._cluster._fetch_cloud_tpu_metadata() # pylint: disable=protected-access
logging.warning(
'TPUPollingThread found TPU %s in state %s, and health %s.',
self._cluster._tpu, response['state'], # pylint: disable=protected-access
response.get('health', 'UNKNOWN'))
if 'state' in response and response['state'] in [
'TERMINATED', 'PREEMPTED'
]:
logging.warning(
'TPU node %s reached an unrecoverable state %s, '
'terminating training.',
self._cluster._tpu, # pylint: disable=protected-access
response['state'])
os._exit(1) # pylint: disable=protected-access
time.sleep(self._interval)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/preempted_hook.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow_estimator.python.estimator.tpu.util import *
# pylint: enable=wildcard-import,unused-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPU Feature Column Library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.feature_column import feature_column_lib as fc_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_function
# pylint: disable=protected-access
_TPU_FC_TO_SCOPE = '_tpu_feature_column_scope'
_SUPPORTED_SEQUENCE_COLUMNS = (fc._SequenceCategoricalColumn,
fc_lib.SequenceCategoricalColumn)
_SUPPORTED_CATEGORICAL_COLUMNS_V2 = (fc_lib.IdentityCategoricalColumn,
fc_lib.VocabularyFileCategoricalColumn,
fc_lib.VocabularyListCategoricalColumn,
fc_lib.WeightedCategoricalColumn,
fc_lib.SequenceCategoricalColumn)
_SUPPORTED_CATEGORICAL_COLUMNS = (fc._IdentityCategoricalColumn,
fc._VocabularyFileCategoricalColumn,
fc._VocabularyListCategoricalColumn,
fc._WeightedCategoricalColumn,
fc._SequenceCategoricalColumn
) + _SUPPORTED_CATEGORICAL_COLUMNS_V2
_SEQUENCE_FEATURE_LENGTH_POSTFIX = '_seq_length_'
def embedding_column(categorical_column,
dimension,
combiner='mean',
initializer=None,
max_sequence_length=0,
learning_rate_fn=None):
"""TPU embedding_column for `tf.feature_column.embedding_column`.
Note that the interface for TPU embedding_column is different from the non-TPU
version. The following args available for the non-TPU version are NOT
supported: ckpt_to_load_from, tensor_name_in_ckp, max_norm and trainable.
Args:
categorical_column: A categorical_column returned from
categorical_column_with_identity, weighted_categorical_column,
categorical_column_with_vocabulary_file,
categorical_column_with_vocabulary_list,
sequence_categorical_column_with_identity,
sequence_categorical_column_with_vocabulary_file,
sequence_categorical_column_with_vocabulary_list
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row for a non-sequence column. For more information, see
`tf.feature_column.embedding_column`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
max_sequence_length: An non-negative integer specifying the max sequence
length. Any sequence shorter then this will be padded with 0 embeddings
and any sequence longer will be truncated. This must be positive for
sequence features and 0 for non-sequence features.
learning_rate_fn: A function that takes global step and returns learning
rate for the embedding table.
Returns:
A _TPUEmbeddingColumn.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if `initializer` is specified but not callable.
"""
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS):
raise TypeError(
'categorical_column for tpu '
' embedding_column must be type %s, got %s.' % (' or '.join([
cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS
]), type(categorical_column)))
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. '
'Embedding of column_name: {}'.format(
categorical_column.name))
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
embedding_shape = categorical_column._num_buckets, dimension # pylint: disable=protected-access
def _creator(weight_collections, scope):
embedding_column_layer = fc._EmbeddingColumnLayer(
embedding_shape=embedding_shape,
initializer=initializer,
weight_collections=weight_collections,
trainable=True,
name='embedding_column_layer')
return embedding_column_layer(None, scope=scope) # pylint: disable=not-callable
column = _TPUEmbeddingColumn(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
layer_creator=_creator,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn)
# For Embedding column, the initializer is hidden inside the creator Fn, which
# is not accessiable later. So, we attach it to a speicial field. Also note
# that non-TPU Embedding column and non-TPU shared Embedding column handle the
# initializer differently. See shared_embedding_columns for details.
column._tpu_initializer = initializer
return column
def shared_embedding_columns(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
max_sequence_lengths=None,
learning_rate_fn=None):
"""List of dense columns that convert from sparse, categorical input.
Note that the interface for TPU embedding_column is different from the non-TPU
version. The following args available for the non-TPU version are NOT
supported: ckpt_to_load_from, tensor_name_in_ckp, max_norm and trainable.
Args:
categorical_columns: A list of categorical_columns returned from
categorical_column_with_identity, weighted_categorical_column,
categorical_column_with_vocabulary_file,
categorical_column_with_vocabulary_list,
sequence_categorical_column_with_identity,
sequence_categorical_column_with_vocabulary_file,
sequence_categorical_column_with_vocabulary_list
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row for a non-sequence column. For more information, see
`tf.feature_column.embedding_column`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
shared_embedding_collection_name: Optional name of the collection where
shared embedding weights are added. If not given, a reasonable name will
be chosen based on the names of `categorical_columns`. This is also used
in `variable_scope` when creating shared embedding weights.
max_sequence_lengths: An list of non-negative integers, either None or
empty or the same length as the argument categorical_columns. Entries
corresponding to non-sequence columns must be 0 and entries corresponding
to sequence columns specify the max sequence length for the column. Any
sequence shorter then this will be padded with 0 embeddings and any
sequence longer will be truncated.
learning_rate_fn: A function that takes global step and returns learning
rate for the embedding table.
Returns:
A _TPUEmbeddingColumn.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if `initializer` is specified but not callable.
ValueError: if `max_sequence_lengths` is specified and not the same length
as `categorical_columns`.
ValueError: if `max_sequence_lengths` is positive for a non sequence column
or 0 for a sequence column.
"""
for categorical_column in categorical_columns:
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS):
raise TypeError(
'categorical_column for tpu '
' shared_embedding_columns must be type %s, got %s.' % (' or '.join([
cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS
]), type(categorical_column)))
if not max_sequence_lengths:
max_sequence_lengths = [0] * len(categorical_columns)
if len(max_sequence_lengths) != len(categorical_columns):
raise ValueError('max_sequence_lengths and categorical_columns must be of '
'the same length. len(max_sequence_lengths)={} '
'len(categorical_columns)={}.'.format(
len(max_sequence_lengths), len(categorical_columns)))
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. ')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
num_buckets = sorted_columns[0]._num_buckets # pylint: disable=protected-access
for c in sorted_columns[1:]:
if num_buckets != c._num_buckets: # pylint: disable=protected-access
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
sorted_columns[0], num_buckets, c, c._num_buckets)) # pylint: disable=protected-access
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
tpu_columns = []
# Create the state (_SharedEmbeddingColumnLayer) here.
for categorical_column, max_sequence_length in zip(
categorical_columns, max_sequence_lengths):
column = _TPUSharedEmbeddingColumn(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
shared_embedding_collection_name=shared_embedding_collection_name,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn)
tpu_columns.append(column)
return tpu_columns
class _TPUBaseEmbeddingColumn(object):
"""Base class for TPU Embedding Column."""
def __init__(self,
categorical_column,
max_sequence_length=0,
learning_rate_fn=None):
self._tpu_categorical_column = categorical_column
self._max_sequence_length = max_sequence_length
self._learning_rate_fn = learning_rate_fn
if (self.is_sequence_column() and max_sequence_length < 1):
raise ValueError('max_sequence_length must be greater than 0 for '
'sequence columns. Got max_sequence_length={} for '
'sequence column {}.'.format(max_sequence_length,
categorical_column.name))
if (not self.is_sequence_column() and max_sequence_length != 0):
raise ValueError('Non zero max_seq_length={} specified for non '
'sequence column {}.'.format(max_sequence_length,
categorical_column.name))
def get_combiner(self):
"""Returns the embedding combiner."""
raise NotImplementedError('not implemented')
def get_embedding_table_size(self):
"""Returns the embedding table size, tuple of vocab size and dimension."""
raise NotImplementedError('not implemented')
def get_feature_key_name(self):
"""Returns the feature key name in the features dict."""
raise NotImplementedError('not impl')
def get_weight_key_name(self):
"""Return the key name for weights."""
raise NotImplementedError('not impl')
def get_embedding_var_name(self):
"""Returns the embedding variable name.
Feature key name and embedding variable name are usually one-to-one mapping.
But for shared embedding columns, it is many-to-one mapping.
"""
raise NotImplementedError('not impl')
def get_initializer(self):
"""Returns the initializer."""
raise NotImplementedError('not impl')
def is_categorical_column_weighted(self):
"""Check if the categorical column of the embedding column is weighted."""
raise NotImplementedError('not impl')
def is_sequence_column(self):
return isinstance(self._tpu_categorical_column, _SUPPORTED_SEQUENCE_COLUMNS)
def get_max_sequence_length(self):
return self._max_sequence_length
def get_learning_rate_fn(self):
return self._learning_rate_fn
def get_sequence_length_feature_key_name(self):
"""Get the key for the associated sequence length feature."""
return get_sequence_length_feature_key_name_from_feature_key_name(
self.get_feature_key_name())
class _TPUEmbeddingColumn(_TPUBaseEmbeddingColumn, fc._EmbeddingColumn):
"""Core Embedding Column."""
def __new__(cls,
categorical_column,
dimension,
combiner='mean',
layer_creator=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
max_sequence_length=0,
learning_rate_fn=None):
# Note, args ckpt_to_load_from, tensor_name_in_ckpt, max_norm and trainable
# are not supported on TPU. They are solely for matching the signature of
# __new__ of parent class fc._EmbeddingColumn.
return fc._EmbeddingColumn.__new__(
cls,
categorical_column,
dimension,
combiner=combiner,
layer_creator=layer_creator,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable)
def __init__(self,
categorical_column,
dimension,
combiner='mean',
layer_creator=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
max_sequence_length=0,
learning_rate_fn=None):
_TPUBaseEmbeddingColumn.__init__(
self,
categorical_column,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn)
self._key = None
def get_combiner(self):
return self.combiner
def get_embedding_table_size(self):
"""Returns num_ids and width."""
return (self.categorical_column._num_buckets, self.dimension)
def get_feature_key_name(self):
"""get_feature_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.categorical_column.name
return self.categorical_column.name
def get_weight_key_name(self):
"""get_weight_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.weight_feature_key
return None
def get_embedding_var_name(self):
"""get_embedding_var_name."""
return self.categorical_column.name
def get_initializer(self):
return self._tpu_initializer
def is_categorical_column_weighted(self):
"""Check if the categorical column of the embedding column is weighted."""
if isinstance(
self.categorical_column,
(
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
fc_lib.WeightedCategoricalColumn)):
return True
return False
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if tpu.under_tpu_inference_context():
def host_computation():
return fc._EmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc._EmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable)
# TPU mode
# Get the embeddings from the LazyBuilder.
tensor = inputs.get(self.get_feature_key_name())
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(self.get_embedding_var_name(),
'embedding_weights')
return tensor
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
if tpu.under_tpu_inference_context():
def host_computation():
return fc._EmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc._EmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable)
tensor = inputs.get(self.get_feature_key_name())
tensor_lengths = inputs.get(self.get_sequence_length_feature_key_name())
# inputs is a _LazyBuilder and for rank 1 tensors, it calls expand_dims(-1).
# We need to undo this to match the standard CPU sequence embedding.
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(self.get_embedding_var_name(),
'embedding_weights')
return fc._SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths)
class _TPUSharedEmbeddingColumn(_TPUBaseEmbeddingColumn,
fc._SharedEmbeddingColumn):
"""Core Shared Embedding Column."""
def __new__(cls,
categorical_column,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
max_sequence_length=0,
learning_rate_fn=None):
return fc._SharedEmbeddingColumn.__new__(
cls,
categorical_column,
dimension,
combiner=combiner,
initializer=initializer,
shared_embedding_collection_name=shared_embedding_collection_name,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable)
def __init__(self,
categorical_column,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
max_sequence_length=0,
learning_rate_fn=None):
_TPUBaseEmbeddingColumn.__init__(
self,
categorical_column,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn)
self._key = None
def get_combiner(self):
return self.combiner
def get_embedding_table_size(self):
"""Returns num_ids and width."""
return (self.categorical_column._num_buckets, self.dimension)
def get_feature_key_name(self):
"""get_feature_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.categorical_column.name
return self.categorical_column.name
def get_weight_key_name(self):
"""get_weight_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.weight_feature_key
return None
def get_embedding_var_name(self):
"""get_embedding_var_name."""
return self.shared_embedding_collection_name
def get_initializer(self):
return self.initializer
def is_categorical_column_weighted(self):
"""Check if the categorical column of the embedding column is weighted."""
if isinstance(
self.categorical_column,
(
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
fc_lib.WeightedCategoricalColumn)):
return True
return False
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if tpu.under_tpu_inference_context():
def host_computation():
return fc._SharedEmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc._SharedEmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable)
# TPU mode
# Get the embeddings from the LazyBuilder.
tensor = inputs.get(self.get_feature_key_name())
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(
self.get_embedding_var_name(),
'embedding_weights',
is_shared_embedding=True)
return tensor
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
if tpu.under_tpu_inference_context():
def host_computation():
return fc._SharedEmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc._SharedEmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable)
tensor = inputs.get(self.get_feature_key_name())
tensor_lengths = inputs.get(self.get_sequence_length_feature_key_name())
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(
self.get_embedding_var_name(),
'embedding_weights',
is_shared_embedding=True)
return fc._SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths)
def _record_variable_scope_and_name(embedding_var_name,
embedding_var_name_in_fc,
is_shared_embedding=False):
"""Add embedding variable name and scope to collection."""
g = ops.get_default_graph()
collection = g.get_collection_ref(_TPU_FC_TO_SCOPE)
if not collection:
collection.append({})
var_def_dict = collection[0]
captured_scope = variable_scope.get_variable_scope()
captured_scope_name = captured_scope.name
if embedding_var_name in var_def_dict:
if (var_def_dict[embedding_var_name][0] != captured_scope_name
and not is_shared_embedding):
raise ValueError(
'For embedding var name {}, the variable scope name is different, '
'got {}; expected {}'.format(embedding_var_name,
captured_scope_name,
var_def_dict[embedding_var_name][0]))
if var_def_dict[embedding_var_name][1] != embedding_var_name_in_fc:
raise ValueError(
'For embedding var name {}, the embedding name is different, '
'got {}; expected {}'.format(embedding_var_name,
embedding_var_name_in_fc,
var_def_dict[embedding_var_name][1]))
else:
var_def_dict[embedding_var_name] = (captured_scope_name,
embedding_var_name_in_fc)
def _is_running_on_cpu():
"""Returns True if the current context is CPU model."""
return tpu_function.get_tpu_context().number_of_shards is None
def get_sequence_length_feature_key_name_from_feature_key_name(feature_name):
"""Gets the name of the sequence length feature from that of the base feature.
Args:
feature_name: The feature key of a sequence column.
Returns:
A string which is the feature key for the associated feature length column.
"""
return feature_name + _SEQUENCE_FEATURE_LENGTH_POSTFIX
def split_sequence_columns(feature_columns):
"""Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns.
For use in a TPUEstimator model_fn function. E.g.
def model_fn(features):
sequence_columns, feature_columns = (
tf.tpu.feature_column.split_sequence_columns(feature_columns))
input = tf.feature_column.input_layer(
features=features, feature_columns=feature_columns)
sequence_features, sequence_lengths = (
tf.contrib.feature_column.sequence_input_layer(
features=features, feature_columns=sequence_columns))
Args:
feature_columns: A list of _TPUEmbeddingColumns to split.
Returns:
Two lists of _TPUEmbeddingColumns, the first is the sequence columns and the
second is the non-sequence columns.
"""
sequence_columns = []
non_sequence_columns = []
for column in feature_columns:
if not isinstance(column, (_TPUEmbeddingColumn, _TPUSharedEmbeddingColumn)):
raise TypeError(
'column must be a _TPUEmbeddingColumn or _TPUSharedEmbeddingColumn '
'but got %s instead.' % (type(column)))
if column.is_sequence_column():
sequence_columns.append(column)
else:
non_sequence_columns.append(column)
return sequence_columns, non_sequence_columns
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/feature_column.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for topology.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.tpu import topology
class TopologyTest(test.TestCase):
def testSerialization(self):
"""Tests if the class is able to generate serialized strings."""
original_topology = topology.Topology(
mesh_shape=[1, 1, 2],
device_coordinates=[[[0, 0, 0], [0, 0, 1]]],
)
serialized_str = original_topology.serialized()
new_topology = topology.Topology(serialized=serialized_str)
# Make sure the topology recovered from serialized str is same as the
# original topology.
self.assertAllEqual(
original_topology.mesh_shape, new_topology.mesh_shape)
self.assertAllEqual(
original_topology.device_coordinates, new_topology.device_coordinates)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/topology_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of Cloud TPU helper functions for data loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import functional_ops
def _TextLineDataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TextLineDataset(filename, buffer_size=buffer_size)
return dataset
def _TFRecordDataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
_FILETYPE_MAP = {
'tfrecord': _TFRecordDataset,
'textline': _TextLineDataset,
'text': _TextLineDataset,
}
def StreamingFilesDataset(files,
filetype=None,
file_reader_job=None,
worker_job=None,
num_epochs=None,
filename_shuffle_buffer_size=None,
num_parallel_reads=None,
batch_transfer_size=None,
sloppy=None):
"""StreamingFilesDataset constructs a dataset to stream from workers (GCE VM).
Because Cloud TPUs are allocated over the network, a Cloud TPU cannot read
files local to your GCE VM. In order to train using files stored on your local
VM (e.g. on local SSD for extreme performance), use the StreamingFilesDataset
helper to generate a dataset to feed your Cloud TPU with files from your GCE
VM.
The resulting dataset may return an OutOfRangeError if there are no files
found as a result of the fileglob expansion.
Note: StreamingFilesDataset assumes that the session is using a
TPUClusterResolver and has therefore a worker and a coordinator job. File
loading will be done on the coordinator job.
Args:
files: A string glob to match files, or a `tf.data.Dataset` generating file
names.
filetype: A string (one of 'tfrecord', or 'textline') or a single-argument
TensorFlow function that when given a filename returns a dataset.
file_reader_job: An optional string that corresponds to the job that should
perform the file reads.
worker_job: An optional string that corresponds to the job that should
process the tensors (i.e. your GPU or TPU worker).
num_epochs: The number of epochs through the training set that should be
generated. By default, it will repeat infinitely.
filename_shuffle_buffer_size: An optional integer whose value controls the
shuffling of the file names. If you would like to read from the files in
the same order, set to 0 or False.
num_parallel_reads: An optional integer controlling the number of files to
read from concurrently. (Set to 1 for no parallelism.)
batch_transfer_size: An optional integer controlling the batching used to
amortize the remote function invocation overhead. Set to a very large
number to increase throughput. Set to a very small number to reduce memory
consumption. Set to False to skip batching.
sloppy: (Optional.) If `False`, read input data while maintaining a
deterministic order. (This may have significant performance impacts.)
sloppy defaults to: True.
Returns:
A `tf.data.Dataset` with an infinite stream of elements generated by a
parallel interleaving of the set of files matched (or generated) by `files`
with a type is the output of the dataset specified by `filetype`.
Raises:
ValueError: if any argument is not of the expected type.
"""
if filetype is None:
filetype = 'tfrecord'
if isinstance(filetype, str):
if filetype not in _FILETYPE_MAP:
raise ValueError('Unexpected filetype: %s' % filetype)
reader_fn = _FILETYPE_MAP[filetype]
elif callable(filetype):
reader_fn = filetype
else:
raise ValueError('filetype should be a string or a callable')
file_reader_job = file_reader_job or 'coordinator'
worker_job = worker_job or 'worker'
if filename_shuffle_buffer_size is None:
filename_shuffle_buffer_size = 4096
num_parallel_reads = num_parallel_reads or 8
if batch_transfer_size is None:
batch_transfer_size = 256
if sloppy is None:
sloppy = True
if file_reader_job == 'coordinator':
file_reader_device = '/job:coordinator/task:0'
else:
file_reader_device = '/job:%s' % file_reader_job
with ops.device(file_reader_device):
if isinstance(files, str):
source_dataset = dataset_ops.Dataset.list_files(files)
elif isinstance(files, dataset_ops.DatasetV2):
source_dataset = files
else:
raise ValueError('files was not a string or a dataset: %s' % files)
if filename_shuffle_buffer_size:
source_dataset = source_dataset.shuffle(
buffer_size=filename_shuffle_buffer_size)
source_dataset = source_dataset.apply(
interleave_ops.parallel_interleave(
reader_fn, cycle_length=num_parallel_reads, sloppy=sloppy))
source_dataset = source_dataset.repeat(num_epochs)
if batch_transfer_size:
source_dataset = source_dataset.batch(batch_transfer_size)
source_dataset = source_dataset.prefetch(1)
source_iterator = dataset_ops.make_one_shot_iterator(source_dataset)
source_handle = source_iterator.string_handle()
@function.Defun(dtypes.string)
def LoadingFunc(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(source_dataset),
dataset_ops.get_legacy_output_shapes(source_dataset))
return remote_iterator.get_next()
def MapFn(unused_input):
source_dataset_output_types = dataset_ops.get_legacy_output_types(
source_dataset)
if isinstance(source_dataset_output_types, dtypes.DType):
output_types = [source_dataset_output_types]
elif isinstance(source_dataset_output_types, (list, tuple)):
output_types = source_dataset_output_types
else:
raise ValueError('source dataset has invalid output types')
remote_calls = functional_ops.remote_call(
args=[source_handle],
Tout=output_types,
f=LoadingFunc,
target='/job:%s/replica:0/task:0/cpu:0' % file_reader_job)
if len(remote_calls) == 1:
return remote_calls[0]
else:
return remote_calls
with ops.device('/job:%s' % worker_job):
output_dataset = dataset_ops.Dataset.range(2).repeat().map(
MapFn, num_parallel_calls=4 if sloppy else None)
output_dataset = output_dataset.prefetch(1)
if batch_transfer_size:
# Undo the batching used during the transfer.
output_dataset = output_dataset.apply(batching.unbatch()).prefetch(1)
return output_dataset
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/datasets.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Ops related to Tensor Processing Units."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU datasets tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.lib.io import python_io
from tensorflow.python.platform import test
from tensorflow.python.tpu import datasets
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
_NUM_FILES = 10
_NUM_ENTRIES = 20
class DatasetsTest(test.TestCase):
def setUp(self):
super(DatasetsTest, self).setUp()
self._coord = server_lib.Server.create_local_server()
self._worker = server_lib.Server.create_local_server()
self._cluster_def = cluster_pb2.ClusterDef()
worker_job = self._cluster_def.job.add()
worker_job.name = 'worker'
worker_job.tasks[0] = self._worker.target[len('grpc://'):]
coord_job = self._cluster_def.job.add()
coord_job.name = 'coordinator'
coord_job.tasks[0] = self._coord.target[len('grpc://'):]
session_config = config_pb2.ConfigProto(cluster_def=self._cluster_def)
self._sess = session.Session(self._worker.target, config=session_config)
self._worker_device = '/job:' + worker_job.name
def testTextLineDataset(self):
all_contents = []
for i in range(_NUM_FILES):
filename = os.path.join(self.get_temp_dir(), 'text_line.%d.txt' % i)
contents = []
for j in range(_NUM_ENTRIES):
contents.append(compat.as_bytes('%d: %d' % (i, j)))
with open(filename, 'wb') as f:
f.write(b'\n'.join(contents))
all_contents.extend(contents)
dataset = datasets.StreamingFilesDataset(
os.path.join(self.get_temp_dir(), 'text_line.*.txt'), filetype='text')
with ops.device(self._worker_device):
iterator = dataset_ops.make_initializable_iterator(dataset)
self._sess.run(iterator.initializer)
get_next = iterator.get_next()
retrieved_values = []
for _ in range(4 * len(all_contents)):
retrieved_values.append(compat.as_bytes(self._sess.run(get_next)))
self.assertEqual(set(all_contents), set(retrieved_values))
def testTFRecordDataset(self):
all_contents = []
for i in range(_NUM_FILES):
filename = os.path.join(self.get_temp_dir(), 'tf_record.%d' % i)
writer = python_io.TFRecordWriter(filename)
for j in range(_NUM_ENTRIES):
record = compat.as_bytes('Record %d of file %d' % (j, i))
writer.write(record)
all_contents.append(record)
writer.close()
dataset = datasets.StreamingFilesDataset(
os.path.join(self.get_temp_dir(), 'tf_record*'), filetype='tfrecord')
with ops.device(self._worker_device):
iterator = dataset_ops.make_initializable_iterator(dataset)
self._sess.run(iterator.initializer)
get_next = iterator.get_next()
retrieved_values = []
for _ in range(4 * len(all_contents)):
retrieved_values.append(compat.as_bytes(self._sess.run(get_next)))
self.assertEqual(set(all_contents), set(retrieved_values))
def testTFRecordDatasetFromDataset(self):
filenames = []
all_contents = []
for i in range(_NUM_FILES):
filename = os.path.join(self.get_temp_dir(), 'tf_record.%d' % i)
filenames.append(filename)
writer = python_io.TFRecordWriter(filename)
for j in range(_NUM_ENTRIES):
record = compat.as_bytes('Record %d of file %d' % (j, i))
writer.write(record)
all_contents.append(record)
writer.close()
filenames = dataset_ops.Dataset.from_tensor_slices(filenames)
dataset = datasets.StreamingFilesDataset(filenames, filetype='tfrecord')
with ops.device(self._worker_device):
iterator = dataset_ops.make_initializable_iterator(dataset)
self._sess.run(iterator.initializer)
get_next = iterator.get_next()
retrieved_values = []
for _ in range(4 * len(all_contents)):
retrieved_values.append(compat.as_bytes(self._sess.run(get_next)))
self.assertEqual(set(all_contents), set(retrieved_values))
def testArbitraryReaderFunc(self):
def MakeRecord(i, j):
return compat.as_bytes('%04d-%04d' % (i, j))
record_bytes = len(MakeRecord(10, 200))
all_contents = []
for i in range(_NUM_FILES):
filename = os.path.join(self.get_temp_dir(), 'fixed_length.%d' % i)
with open(filename, 'wb') as f:
for j in range(_NUM_ENTRIES):
record = MakeRecord(i, j)
f.write(record)
all_contents.append(record)
def FixedLengthFile(filename):
return readers.FixedLengthRecordDataset(filename, record_bytes)
dataset = datasets.StreamingFilesDataset(
os.path.join(self.get_temp_dir(), 'fixed_length*'),
filetype=FixedLengthFile)
with ops.device(self._worker_device):
iterator = dataset_ops.make_initializable_iterator(dataset)
self._sess.run(iterator.initializer)
get_next = iterator.get_next()
retrieved_values = []
for _ in range(4 * len(all_contents)):
retrieved_values.append(compat.as_bytes(self._sess.run(get_next)))
self.assertEqual(set(all_contents), set(retrieved_values))
def testArbitraryReaderFuncFromDatasetGenerator(self):
def my_generator():
yield (1, [1] * 10)
def gen_dataset(dummy):
return dataset_ops.Dataset.from_generator(
my_generator, (dtypes.int64, dtypes.int64),
(tensor_shape.TensorShape([]), tensor_shape.TensorShape([10])))
dataset = datasets.StreamingFilesDataset(
dataset_ops.Dataset.range(10), filetype=gen_dataset)
with ops.device(self._worker_device):
iterator = dataset_ops.make_initializable_iterator(dataset)
self._sess.run(iterator.initializer)
get_next = iterator.get_next()
retrieved_values = self._sess.run(get_next)
self.assertIsInstance(retrieved_values, (list, tuple))
self.assertEqual(len(retrieved_values), 2)
self.assertEqual(retrieved_values[0], 1)
self.assertItemsEqual(retrieved_values[1], [1] * 10)
def testUnexpectedFiletypeString(self):
with self.assertRaises(ValueError):
datasets.StreamingFilesDataset(
os.path.join(self.get_temp_dir(), '*'), filetype='foo')
def testUnexpectedFiletypeType(self):
with self.assertRaises(ValueError):
datasets.StreamingFilesDataset(
os.path.join(self.get_temp_dir(), '*'), filetype=3)
def testUnexpectedFilesType(self):
with self.assertRaises(ValueError):
datasets.StreamingFilesDataset(123, filetype='tfrecord')
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/datasets_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Helper library for functions used during TPU compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import threading
class TpuContext(threading.local):
"""A context object holding state about the TPU computation being built."""
def __init__(self):
"""Creates a new TpuContext."""
self._number_of_shards = None
@property
def number_of_shards(self):
return self._number_of_shards
def set_number_of_shards(self, number_of_shards):
self._number_of_shards = number_of_shards
# The Tpu context holds the number of shards when a sharded computation is
# being built, or None if no computation is being built.
_current_tpu_context = TpuContext()
@contextlib.contextmanager
def tpu_shard_context(number_of_shards):
if _current_tpu_context.number_of_shards is not None:
raise NotImplementedError("tpu_shard_context cannot be nested.")
try:
_current_tpu_context.set_number_of_shards(number_of_shards)
yield
finally:
_current_tpu_context.set_number_of_shards(None)
def get_tpu_context():
return _current_tpu_context
# Decorator function for tpu computation func that was passed to tpu.rewrite()
# if there is an embedded training loop in this func, trace tools will generate
# step markers for each iteration.
def on_device_training_loop(func):
# Value for this attribute is from xla.DebugOptions.StepMarkerLocation.
setattr(func, "step_marker_location", "STEP_MARK_AT_TOP_LEVEL_WHILE_LOOP")
return func
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tpu_function.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Library for constructing a training loop, suitable for TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compiler.xla import xla
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.tpu import tensor_tracer
from tensorflow.python.tpu import tpu_function
def while_loop(condition, body, inputs=None, infeed_queue=None, name=None):
"""Builds a training loop for TPUs.
The set of loop-carried tensors corresponds to `inputs`. Both
`condition` and `body` take the current value of the loop-carried
tensors. 'body' additionally takes a tuple of infeed from
infeed_queue if infeed_queue is not None. `condition` must return a
single boolean value that determines whether iteration
continues. `body` must return an updated list of values for the
loop-carried tensors.
Args:
condition: a Python function that builds the loop condition.
body: a Python function that builds the loop body.
inputs: a list of initial values passed into the training loop, or
None (equivalent to an empty list).
infeed_queue: if not None, the infeed queue from which to append a tuple
of arguments as inputs to condition.
name: (Deprecated) Does nothing.
Returns:
The final values of the loop-carried tensors.
Raises:
TypeError: if body or condition has the wrong signature.
"""
del name
# Converts inputs to Tensors.
inputs = [] if inputs is None else [ops.convert_to_tensor(x) for
x in inputs]
input_types = [x.dtype for x in inputs]
input_arity = len(inputs)
body_arg_error = xla.check_function_argument_count(
body, input_arity, infeed_queue)
if body_arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied loop body function cannot be called with the specified "
"inputs. You specified %d inputs: %s, but the loop body needs %s" % (
input_arity, str([i.name for i in inputs]), body_arg_error))
else:
raise TypeError(
"Supplied loop body function cannot be called with the specified "
"inputs. You specified %d inputs: %s and %d additional inputs from "
"infeed, but the computation needs %s" % (input_arity, str(
[i.name for i in inputs]), infeed_queue.number_of_tuple_elements,
body_arg_error))
condition_arg_error = xla.check_function_argument_count(
condition, input_arity, None)
if condition_arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied loop condition function cannot be called with the "
"specified inputs. You specified %d inputs: %s, but the loop "
"condition needs %s" % (input_arity, str([i.name for i in inputs]),
condition_arg_error))
else:
raise TypeError(
"Supplied loop condition function cannot be called with the "
"specified inputs. You specified %d inputs: %s, but the loop "
"condition needs %s. Note that infeed is not passed to the loop "
"condition." % (input_arity, str([i.name for i in inputs]),
condition_arg_error))
def condition_wrapper(*inputs):
# Discards the dummy output added for arity-0 loops.
if input_arity == 0:
inputs = []
return condition(*inputs)
def body_wrapper(*inputs):
"""Wrapper around `body` that handles infeed queues and control deps."""
inputs = list(inputs)
# Discards the dummy output added for arity-0 loops.
if input_arity == 0:
inputs = []
# Runs `body` with the dequeue_ops appended.
if infeed_queue:
number_of_shards = tpu_function.get_tpu_context().number_of_shards
if number_of_shards is None:
raise ValueError("Can't build training loop with infeed when there is "
"no tpu_shard_context. Are you building a loop or "
"graph directly rather than from inside tpu.rewrite, "
"tpu.batch_parallel, tpu.shard, or tpu.replicate?")
infeed_queue.set_number_of_shards(number_of_shards)
dequeue_ops = [d for d in infeed_queue.generate_dequeue_op()]
else:
dequeue_ops = []
outputs = body(*(inputs + dequeue_ops))
# If the computation only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs
if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU training loop body must return zero or more Tensor values "
"followed by zero or more Operations.")
output_types = [op.dtype for op in output_tensors]
if input_types != output_types:
raise TypeError(
"Mismatch between input types and output types for training loop "
"body: {} vs {}".format(input_types, output_types))
# Add the dequeue operations to output_operations to ensure they are run
# by the loop, even if the programmer's loop body does not use them.
output_operations += dequeue_ops
# Add a dummy output, if needed.
if not output_tensors:
output_tensors = array_ops.constant(0)
if output_operations:
# TODO(phawkins): in principle this is too restrictive since it serializes
# the training loop steps. In practice it does not matter since this loop
# will be compiled by XLA.
output_tensors = control_flow_ops.tuple(output_tensors,
control_inputs=output_operations)
if tensor_tracer.TensorTracer.is_enabled():
num_replicas = tpu_function.get_tpu_context().number_of_shards
if num_replicas is None:
num_replicas = 1
tt = tensor_tracer.TensorTracer()
output_tensors = tt.trace_tpu(ops.get_default_graph(),
output_tensors, None,
num_replicas)
return output_tensors
# If the body has arity 0, add a dummy loop-carried value to which we can add
# control dependencies from any side-effecting operations.
if input_arity == 0:
inputs = [array_ops.constant(0)]
return control_flow_ops.while_loop(
condition_wrapper, body_wrapper, inputs, name="", parallel_iterations=1)
def repeat(n, body, inputs=None, infeed_queue=None, name=None):
"""Builds a training loop that executes a fixed number of iterations.
The set of loop-carried tensors correspond to `inputs`.
`body` must be a function that takes and returns the values of the
loop-carried tensors.
Args:
n: the number of loop iterations
body: a Python function that builds the loop body.
inputs: a list of initial values passed into the training loop or
None (equivalent to an empty list).
infeed_queue: if not None, the infeed queue from which to append a tuple
of arguments as inputs to condition.
name: (Deprecated) Does nothing.
Returns:
The final values of the loop-carried tensors.
Raises:
ValueError: if there is a type error.
"""
def _convert_to_list(xs):
if not isinstance(xs, (list, tuple)):
return [xs]
else:
return list(xs)
def cond(i, *args):
del args
return i < n
def body_wrapper(i, *args):
return [i + 1] + _convert_to_list(body(*args))
inputs = [0] if inputs is None else [0] + _convert_to_list(inputs)
outputs = while_loop(
cond, body_wrapper, inputs=inputs, infeed_queue=infeed_queue, name=name)
outputs = _convert_to_list(outputs)
if len(outputs) == 1:
# Returns the Op rather than an empty list.
return outputs[0].op
else:
return outputs[1:]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/training_loop.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tpu_function helpers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
from tensorflow.python.tpu import tpu_sharding
class ShardingTest(test.TestCase):
def testFreeze(self):
"""Tests that freezing a policy applies default values."""
p1 = tpu_sharding.ShardingPolicy()
p1.freeze()
self.assertEqual(p1.number_of_shards,
tpu_sharding._DEFAULT_NUMBER_OF_SHARDS)
self.assertEqual(p1.shard_dimension, tpu_sharding._DEFAULT_SHARD_DIMENSION)
p2 = tpu_sharding.ShardingPolicy()
p2.set_number_of_shards(17)
p2.set_shard_dimension(23)
p2.freeze()
self.assertEqual(p2.number_of_shards, 17)
self.assertEqual(p2.shard_dimension, 23)
def testFrozen(self):
"""Tests that frozen policies can't be changed."""
p1 = tpu_sharding.ShardingPolicy()
p1.freeze()
with self.assertRaises(ValueError):
p1.set_number_of_shards(17)
with self.assertRaises(ValueError):
p1.set_shard_dimension(22)
def testStr(self):
"""Tests the string representation."""
p1 = tpu_sharding.ShardingPolicy()
self.assertEqual(str(p1), "ShardingPolicy(unset)")
p1.set_number_of_shards(17)
self.assertEqual(str(p1), "ShardingPolicy(unset)")
p1.set_shard_dimension(8)
self.assertEqual(str(p1), "ShardingPolicy(17 shards dimension 8)")
def testMerge(self):
"""Tests that merging works."""
p1 = tpu_sharding.ShardingPolicy()
p1.set_number_of_shards(17)
p1.set_shard_dimension(23)
p2 = tpu_sharding.ShardingPolicy()
p2.merge(p1)
self.assertEqual(p2.number_of_shards, 17)
self.assertEqual(p2.shard_dimension, 23)
p1 = tpu_sharding.ShardingPolicy()
p1.set_shard_dimension(12)
p2.merge(p1)
self.assertEqual(p2.number_of_shards, 17)
self.assertEqual(p2.shard_dimension, 12)
p2.freeze()
p2.merge(p1)
self.assertEqual(p2.number_of_shards, 17)
self.assertEqual(p2.shard_dimension, 12)
p1.set_number_of_shards(1)
with self.assertRaises(ValueError):
p2.merge(p1)
p1 = tpu_sharding.ShardingPolicy()
p1.set_number_of_shards(17)
p2.merge(p1)
p1.set_shard_dimension(2)
with self.assertRaises(ValueError):
p2.merge(p1)
def testGetShardedShape(self):
"""Tests getting a sharded shape."""
p = tpu_sharding.ShardingPolicy()
p.set_number_of_shards(3)
p.set_shard_dimension(1)
self.assertEqual(p.get_sharded_shape([4, 9]), [4, 3])
p.freeze()
with self.assertRaises(ValueError):
p.set_shard_dimension(0)
with self.assertRaises(ValueError):
_ = p.get_sharded_shape([4, 9], shard_index=4)
with self.assertRaises(ValueError):
_ = p.get_sharded_shape([4, 9], shard_index=-1)
with self.assertRaises(TypeError):
_ = p.get_sharded_shape("not_a_shape")
with self.assertRaises(ValueError):
_ = p.get_sharded_shape(tensor_shape.TensorShape(None))
with self.assertRaises(ValueError):
_ = p.get_sharded_shape([4, 10], shard_index=-1)
def testGetUnshardedShape(self):
"""Tests getting an unsharded shape."""
p = tpu_sharding.ShardingPolicy()
p.set_number_of_shards(2)
p.set_shard_dimension(1)
self.assertEqual(p.get_unsharded_shape([[4, 3], [4, 3]]), [4, 6])
with self.assertRaises(ValueError):
_ = p.get_unsharded_shape([[4, 3]])
with self.assertRaises(ValueError):
_ = p.get_unsharded_shape([[4, 3], [4, 3], [4, 3]])
with self.assertRaises(ValueError):
_ = p.get_unsharded_shape([[4, 3], [4, 2]])
with self.assertRaises(TypeError):
_ = p.get_unsharded_shape([[4, 3], "not_a_shape"])
with self.assertRaises(ValueError):
_ = p.get_unsharded_shape([None, [4, 3]])
with self.assertRaises(ValueError):
_ = p.get_unsharded_shape([[2], [4, 3]])
def testScalar(self):
"""Tests sharding and unsharding scalars."""
p = tpu_sharding.ShardingPolicy()
p.freeze()
self.assertEqual(p.get_sharded_shape([]), [])
self.assertEqual(p.get_unsharded_shape([[]]), [])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tpu_sharding_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow_estimator.python.estimator.tpu.tpu_config import *
# pylint: enable=wildcard-import,unused-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tpu_config.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for bfloat16 helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.tpu import bfloat16
class BFloat16ScopeTest(test.TestCase):
def testScopeName(self):
"""Test if name for the variable scope is propogated correctly.
"""
with bfloat16.bfloat16_scope() as bf:
self.assertEqual(bf.name, "")
@test_util.run_deprecated_v1
def testRequestedDType(self):
"""Test if requested dtype is honored in the getter.
"""
with bfloat16.bfloat16_scope() as scope:
v1 = variable_scope.get_variable("v1", [])
self.assertEqual(v1.dtype.base_dtype, dtypes.float32)
v2 = variable_scope.get_variable("v2", [], dtype=dtypes.bfloat16)
self.assertEqual(v2.dtype.base_dtype, dtypes.bfloat16)
self.assertEqual([dtypes.float32, dtypes.float32],
[v.dtype.base_dtype for v in scope.global_variables()])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/bfloat16_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow_estimator.python.estimator.tpu.error_handling import *
# pylint: enable=wildcard-import,unused-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/error_handling.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Modules that need to be exported to the API.
List TPU modules that aren't included elsewhere here so that they can be scanned
for tf_export decorations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.tpu import bfloat16
from tensorflow.python.tpu import feature_column_v2
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_embedding
from tensorflow.python.tpu import tpu_optimizer
# pylint: enable=unused-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/api.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPU Feature Column Library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.feature_column import feature_column_lib as fc_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu.feature_column import _is_running_on_cpu
from tensorflow.python.tpu.feature_column import _record_variable_scope_and_name
from tensorflow.python.tpu.feature_column import _SUPPORTED_CATEGORICAL_COLUMNS_V2
from tensorflow.python.tpu.feature_column import _TPUBaseEmbeddingColumn
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
@tf_export(v1=['tpu.experimental.embedding_column'])
def embedding_column_v2(categorical_column,
dimension,
combiner='mean',
initializer=None,
max_sequence_length=0,
learning_rate_fn=None):
"""TPU version of `tf.compat.v1.feature_column.embedding_column`.
Note that the interface for `tf.tpu.experimental.embedding_column` is
different from that of `tf.compat.v1.feature_column.embedding_column`: The
following arguments are NOT supported: `ckpt_to_load_from`,
`tensor_name_in_ckpt`, `max_norm` and `trainable`.
Use this function in place of `tf.compat.v1.feature_column.embedding_column`
when you want to use the TPU to accelerate your embedding lookups via TPU
embeddings.
```
column = tf.feature_column.categorical_column_with_identity(...)
tpu_column = tf.tpu.experimental.embedding_column(column, 10)
...
def model_fn(features):
dense_feature = tf.keras.layers.DenseFeature(tpu_column)
embedded_feature = dense_feature(features)
...
estimator = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn,
...
embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
column=[tpu_column],
...))
```
Args:
categorical_column: A categorical column returned from
`categorical_column_with_identity`, `weighted_categorical_column`,
`categorical_column_with_vocabulary_file`,
`categorical_column_with_vocabulary_list`,
`sequence_categorical_column_with_identity`,
`sequence_categorical_column_with_vocabulary_file`,
`sequence_categorical_column_with_vocabulary_list`
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row for a non-sequence column. For more information, see
`tf.feature_column.embedding_column`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
max_sequence_length: An non-negative integer specifying the max sequence
length. Any sequence shorter then this will be padded with 0 embeddings
and any sequence longer will be truncated. This must be positive for
sequence features and 0 for non-sequence features.
learning_rate_fn: A function that takes global step and returns learning
rate for the embedding table.
Returns:
A `_TPUEmbeddingColumnV2`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if `initializer` is specified but not callable.
"""
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS_V2):
raise TypeError(
'categorical_column for tpu '
' embedding_column must be type %s, got %s.' % (' or '.join([
cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS_V2
]), type(categorical_column)))
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. '
'Embedding of column_name: {}'.format(
categorical_column.name))
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
column = _TPUEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn)
return column
@tf_export(v1=['tpu.experimental.shared_embedding_columns'])
def shared_embedding_columns_v2(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
max_sequence_lengths=None,
learning_rate_fn=None):
"""TPU version of `tf.compat.v1.feature_column.shared_embedding_columns`.
Note that the interface for `tf.tpu.experimental.shared_embedding_columns` is
different from that of `tf.compat.v1.feature_column.shared_embedding_columns`:
The following arguments are NOT supported: `ckpt_to_load_from`,
`tensor_name_in_ckpt`, `max_norm` and `trainable`.
Use this function in place of
tf.compat.v1.feature_column.shared_embedding_columns` when you want to use the
TPU to accelerate your embedding lookups via TPU embeddings.
```
column_a = tf.feature_column.categorical_column_with_identity(...)
column_b = tf.feature_column.categorical_column_with_identity(...)
tpu_columns = tf.tpu.experimental.shared_embedding_columns(
[column_a, column_b], 10)
...
def model_fn(features):
dense_feature = tf.keras.layers.DenseFeature(tpu_columns)
embedded_feature = dense_feature(features)
...
estimator = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn,
...
embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
column=tpu_columns,
...))
```
Args:
categorical_columns: A list of categorical columns returned from
`categorical_column_with_identity`, `weighted_categorical_column`,
`categorical_column_with_vocabulary_file`,
`categorical_column_with_vocabulary_list`,
`sequence_categorical_column_with_identity`,
`sequence_categorical_column_with_vocabulary_file`,
`sequence_categorical_column_with_vocabulary_list`
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row for a non-sequence column. For more information, see
`tf.feature_column.embedding_column`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
shared_embedding_collection_name: Optional name of the collection where
shared embedding weights are added. If not given, a reasonable name will
be chosen based on the names of `categorical_columns`. This is also used
in `variable_scope` when creating shared embedding weights.
max_sequence_lengths: An list of non-negative integers, either None or
empty or the same length as the argument categorical_columns. Entries
corresponding to non-sequence columns must be 0 and entries corresponding
to sequence columns specify the max sequence length for the column. Any
sequence shorter then this will be padded with 0 embeddings and any
sequence longer will be truncated.
learning_rate_fn: A function that takes global step and returns learning
rate for the embedding table.
Returns:
A list of `_TPUSharedEmbeddingColumnV2`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if `initializer` is specified but not callable.
ValueError: if `max_sequence_lengths` is specified and not the same length
as `categorical_columns`.
ValueError: if `max_sequence_lengths` is positive for a non sequence column
or 0 for a sequence column.
"""
for categorical_column in categorical_columns:
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS_V2):
raise TypeError(
'categorical_column for tpu '
' shared_embedding_columns must be type %s, got %s.' % (' or '.join([
cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS_V2
]), type(categorical_column)))
if not max_sequence_lengths:
max_sequence_lengths = [0] * len(categorical_columns)
if len(max_sequence_lengths) != len(categorical_columns):
raise ValueError('max_sequence_lengths and categorical_columns must be of '
'the same length. len(max_sequence_lengths)={} '
'len(categorical_columns)={}.'.format(
len(max_sequence_lengths), len(categorical_columns)))
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. ')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
num_buckets = sorted_columns[0]._num_buckets # pylint: disable=protected-access
for c in sorted_columns[1:]:
if num_buckets != c._num_buckets: # pylint: disable=protected-access
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
sorted_columns[0], num_buckets, c, c._num_buckets)) # pylint: disable=protected-access
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
tpu_columns = []
column_creator = fc_lib.SharedEmbeddingColumnCreator(
dimension=dimension, initializer=initializer, ckpt_to_load_from=None,
tensor_name_in_ckpt=None, num_buckets=num_buckets, trainable=None,
name=shared_embedding_collection_name)
# Create the state (_SharedEmbeddingColumnLayer) here.
for categorical_column, max_sequence_length in zip(
categorical_columns, max_sequence_lengths):
column = _TPUSharedEmbeddingColumnV2(
categorical_column=categorical_column,
shared_embedding_column_creator=column_creator,
combiner=combiner,
initializer=initializer,
shared_embedding_collection_name=shared_embedding_collection_name,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn)
tpu_columns.append(column)
return tpu_columns
class _TPUEmbeddingColumnV2(_TPUBaseEmbeddingColumn, fc_lib.EmbeddingColumn):
"""Core Embedding Column."""
def __new__(cls,
categorical_column,
dimension,
combiner='mean',
initializer=None,
max_sequence_length=0,
learning_rate_fn=None):
return fc_lib.EmbeddingColumn.__new__(
cls,
categorical_column,
dimension,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True)
def __init__(self,
categorical_column,
dimension,
combiner='mean',
initializer=None,
max_sequence_length=0,
learning_rate_fn=None):
_TPUBaseEmbeddingColumn.__init__(
self,
categorical_column,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn)
self._key = None
def get_combiner(self):
return self.combiner
def get_embedding_table_size(self):
"""Returns num_ids and width."""
return (self.categorical_column._num_buckets, self.dimension)
def get_feature_key_name(self):
"""get_feature_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.categorical_column.name
return self.categorical_column.name
def get_weight_key_name(self):
"""get_weight_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.weight_feature_key
return None
def get_embedding_var_name(self):
"""get_embedding_var_name."""
return self.categorical_column.name
def get_initializer(self):
return self.initializer
def is_categorical_column_weighted(self):
"""Check if the categorical column of the embedding column is weighted."""
if isinstance(
self.categorical_column,
(
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
fc_lib.WeightedCategoricalColumn)):
return True
return False
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable)
# TPU mode
# Get the embeddings from the LazyBuilder.
tensor = inputs.get(self.get_feature_key_name())
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(self.get_embedding_var_name(),
'embedding_weights')
return tensor
def create_state(self, state_manager):
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn.create_state(
self, state_manager)
# Create state is called for the EmbeddingColumn to create its embedding
# variables under feature column V2, if we are on TPU so record the scope
# here.
_record_variable_scope_and_name(self.get_embedding_var_name(),
'embedding_weights')
def get_dense_tensor(self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn.get_dense_tensor(
self, transformation_cache, state_manager)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn.get_dense_tensor(
self, transformation_cache, state_manager)
# TPU mode
# Get the embeddings from the FeatureTransformationCache.
tensor = transformation_cache.get(self.get_feature_key_name(),
state_manager)
return tensor
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable)
tensor = inputs.get(self.get_feature_key_name())
tensor_lengths = inputs.get(self.get_sequence_length_feature_key_name())
# inputs is a _LazyBuilder and for rank 1 tensors, it calls expand_dims(-1).
# We need to undo this to match the standard CPU sequence embedding.
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(self.get_embedding_var_name(),
'embedding_weights')
return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager)
tensor = transformation_cache.get(self.get_feature_key_name(),
state_manager)
tensor_lengths = transformation_cache.get(
self.get_sequence_length_feature_key_name(),
state_manager)
# FeatureTransformationCache expands rank 1 tensors (like sequence length)
# to rank 2. We need to undo this to match the standard CPU sequence
# embedding.
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths)
class _TPUSharedEmbeddingColumnV2(_TPUBaseEmbeddingColumn,
fc_lib.SharedEmbeddingColumn):
"""Core Shared Embedding Column."""
def __new__(cls,
categorical_column,
shared_embedding_column_creator,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
max_sequence_length=0,
learning_rate_fn=None):
return fc_lib.SharedEmbeddingColumn.__new__(
cls,
categorical_column,
combiner=combiner,
shared_embedding_column_creator=shared_embedding_column_creator,
max_norm=None)
def __init__(self,
categorical_column,
shared_embedding_column_creator,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
max_sequence_length=0,
learning_rate_fn=None):
_TPUBaseEmbeddingColumn.__init__(
self,
categorical_column,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn)
self._initializer = initializer
self._shared_embedding_collection_name = shared_embedding_collection_name
def get_combiner(self):
return self.combiner
def get_embedding_table_size(self):
"""Returns num_ids and width."""
return (self.categorical_column._num_buckets,
self.shared_embedding_column_creator.dimension)
def get_feature_key_name(self):
"""get_feature_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.categorical_column.name
return self.categorical_column.name
def get_weight_key_name(self):
"""get_weight_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.weight_feature_key
return None
def get_embedding_var_name(self):
"""get_embedding_var_name."""
return self._shared_embedding_collection_name
def get_initializer(self):
return self._initializer
def is_categorical_column_weighted(self):
"""Check if the categorical column of the embedding column is weighted."""
if isinstance(
self.categorical_column,
(
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
fc_lib.WeightedCategoricalColumn)):
return True
return False
def _get_dense_tensor_internal(
self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal(
self, transformation_cache, state_manager)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal(
self, transformation_cache, state_manager)
# TPU mode
# Get the embeddings from the FeatureTransformationCache.
tensor = transformation_cache.get(self.get_feature_key_name(),
state_manager)
# Add to collection for _create_tpu_embedding_variables_and_ops
# Note that in Feature Column V2, shared embeddings have no scope.
_record_variable_scope_and_name(
self.get_embedding_var_name(),
self.shared_embedding_column_creator._name,
is_shared_embedding=True)
return tensor
def get_sequence_dense_tensor(
self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.SharedEmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.SharedEmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager)
tensor = self._get_dense_tensor_internal(
transformation_cache, state_manager)
tensor_lengths = transformation_cache.get(
self.get_sequence_length_feature_key_name(),
state_manager)
# FeatureTransformationCache expands rank 1 tensors (like sequence length)
# to rank 2. We need to undo this to match the standard CPU sequence
# embedding.
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths)
def split_sequence_columns_v2(feature_columns):
"""Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns.
For use in a TPUEstimator model_fn function. E.g.
def model_fn(features):
sequence_columns, feature_columns = (
tf.tpu.feature_column.split_sequence_columns(feature_columns))
input = tf.feature_column.input_layer(
features=features, feature_columns=feature_columns)
sequence_features, sequence_lengths = (
tf.contrib.feature_column.sequence_input_layer(
features=features, feature_columns=sequence_columns))
Args:
feature_columns: A list of _TPUEmbeddingColumns to split.
Returns:
Two lists of _TPUEmbeddingColumns, the first is the sequence columns and the
second is the non-sequence columns.
"""
sequence_columns = []
non_sequence_columns = []
for column in feature_columns:
if not isinstance(column, (_TPUEmbeddingColumnV2,
_TPUSharedEmbeddingColumnV2)):
raise TypeError(
'column must be a _TPUEmbeddingColumnV2 or '
'_TPUSharedEmbeddingColumnV2 but got %s instead.' % (type(column)))
if column.is_sequence_column():
sequence_columns.append(column)
else:
non_sequence_columns.append(column)
return sequence_columns, non_sequence_columns
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/feature_column_v2.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Optimizer that implements cross-shard gradient reduction for TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import optimizer
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["tpu.CrossShardOptimizer"])
class CrossShardOptimizer(optimizer.Optimizer):
"""An optimizer that averages gradients across TPU shards."""
def __init__(self,
opt,
reduction=losses.Reduction.MEAN,
name="CrossShardOptimizer",
group_assignment=None):
"""Construct a new cross-shard optimizer.
Args:
opt: An existing `Optimizer` to encapsulate.
reduction: The reduction to apply to the shard losses.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "CrossShardOptimizer".
group_assignment: Optional 2d int32 lists with shape
[num_groups, num_replicas_per_group] which describles how to apply
optimizer to subgroups.
Raises:
ValueError: If reduction is not a valid cross-shard reduction.
"""
if reduction not in (losses.Reduction.SUM, losses.Reduction.MEAN):
raise ValueError("Unsupported reduction: %s." % reduction)
if isinstance(opt, optimizer_v2.OptimizerV2):
raise TypeError(
"CrossShardOptimizer does not work with OptimizerV2. If you are "
"using TPUStrategy, OptimizerV2 will sum gradients across replicas."
"If you are using TPUEstimator, you may instead sum your gradients "
"with: grads = [tf.compat.v1.tpu.cross_replica_sum(g) for g in grads]"
". If you want to average your gradients, rescale your loss with: "
"loss /= global_batch_size")
super(CrossShardOptimizer, self).__init__(False, name)
self._opt = opt
self._reduction = reduction
self._group_assignment = group_assignment
def _verify_and_get_subgroup_size(self, group_assignment, num_shards):
"""Verify group_assignment and get the subgroup size".
Args:
group_assignment: list of group ids for applying the optimizer
to subgroups.
num_shards: The number of TPU shards.
Returns:
The size of one subgroup in group_assignment.
Raises:
ValueError: If group_assignment is invalid.
"""
if not group_assignment:
return None
if not (isinstance(group_assignment, list) and
all(isinstance(i, list) for i in group_assignment)):
raise ValueError("group_assignment must be a list of list. Got {}".format(
group_assignment))
replica_ids = set()
for g in group_assignment:
for i in g:
replica_ids.add(i)
if set(range(num_shards)) != replica_ids:
raise ValueError("group_assignment must be a permutation of range({0})."
" Got group_assignment={1}".format(
num_shards, group_assignment))
subgroup_size_list = [len(group) for group in group_assignment]
if all(subgroup_size_list[0] == size for size in subgroup_size_list):
return subgroup_size_list[0]
else:
raise ValueError("The size of each subgroup in group_assignment must "
"be equal. Got group_assignment={}".format(
self._group_assignment))
def compute_gradients(self, loss, var_list=None, **kwargs):
"""Compute gradients of "loss" for the variables in "var_list".
This simply wraps the compute_gradients() from the real optimizer. The
gradients will be aggregated in the apply_gradients() so that user can
modify the gradients like clipping with per replica global norm if needed.
The global norm with aggregated gradients can be bad as one replica's huge
gradients can hurt the gradients from other replicas.
Args:
loss: A Tensor containing the value to minimize.
var_list: Optional list or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKey.TRAINABLE_VARIABLES`.
**kwargs: Keyword arguments for compute_gradients().
Returns:
A list of (gradient, variable) pairs.
Raises:
ValueError: If not within a tpu_shard_context or group_assignment is
invalid.
"""
num_shards = tpu_function.get_tpu_context().number_of_shards
if num_shards is None:
logging.warning(
"CrossShardOptimizer should be used within a tpu_shard_context, but "
"got unset number_of_shards. Assuming 1.")
num_shards = 1
subgroup_size = self._verify_and_get_subgroup_size(self._group_assignment,
num_shards)
if num_shards > 1 and self._reduction == losses.Reduction.MEAN:
if self._group_assignment:
scale = 1.0 / subgroup_size
else:
scale = 1.0 / num_shards
loss *= scale
return self._opt.compute_gradients(loss, var_list=var_list, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
Calls tpu_ops.cross_replica_sum() to sum gradient contributions across
replicas, and then applies the real optimizer.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
An `Operation` that applies the gradients. If `global_step` was not None,
that operation also increments `global_step`.
Raises:
ValueError: If the grads_and_vars is malformed.
"""
summed_grads_and_vars = []
for (grad, var) in grads_and_vars:
if grad is None:
summed_grads_and_vars.append((grad, var))
else:
with ops.colocate_with(grad):
summed_grads_and_vars.append((tpu_ops.cross_replica_sum(
grad, self._group_assignment), var))
return self._opt.apply_gradients(summed_grads_and_vars, global_step, name)
def get_slot(self, *args, **kwargs):
"""Return a slot named "name" created for "var" by the Optimizer.
This simply wraps the get_slot() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
return self._opt.get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
"""Return a list of the names of slots created by the `Optimizer`.
This simply wraps the get_slot_names() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
A list of strings.
"""
return self._opt.get_slot_names(*args, **kwargs)
def variables(self):
"""Forwarding the variables from the underlying optimizer."""
return self._opt.variables()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tpu_optimizer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Functional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.tpu.ops import tpu_ops
TPUPartitionedCall = tpu_ops.tpu_partitioned_call # pylint: disable=invalid-name
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/functional.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Helper library for sharding during TPU compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import tensor_shape
_DEFAULT_NUMBER_OF_SHARDS = 1
_DEFAULT_SHARD_DIMENSION = 0
# TODO(b/36777903) change other parts of tpu.py to use this class.
class ShardingPolicy(object):
"""An object use to hold the sharding policy for a Tensor.
"""
def __init__(self):
self._number_of_shards = None
self._shard_dimension = None
self._frozen = False
def __str__(self):
if self.number_of_shards is None or self.shard_dimension is None:
return "ShardingPolicy(unset)"
else:
return ("ShardingPolicy(%d shards dimension %d)" %
(self.number_of_shards, self.shard_dimension))
def _fill_default_values(self):
if self._number_of_shards is None:
self._number_of_shards = _DEFAULT_NUMBER_OF_SHARDS
if self._shard_dimension is None:
self._shard_dimension = tensor_shape.as_dimension(
_DEFAULT_SHARD_DIMENSION)
def freeze(self):
"""Prevents further modification to the sharding policy.
Any values that have not been set when freeze is called are set to
defaults. If the ShardingPolicy is already frozen, this is a NoOp.
"""
if not self._frozen:
self._fill_default_values()
self._frozen = True
@property
def number_of_shards(self):
"""Returns the number of shards in the policy or None if unspecified."""
return self._number_of_shards
def set_number_of_shards(self, number_of_shards):
"""Sets the number of shards for the current policy.
If the policy has been frozen then number_of_shards must match the
existing setting.
Args:
number_of_shards: The number of shards to use in the policy.
Raises:
ValueError: If the policy has been frozen and number_of_shards
differs from the frozen value; or number_of_shards <= 0.
"""
if self._frozen:
if self._number_of_shards != number_of_shards:
raise ValueError(
"Can't set sharding policy to use %d shards since it has been "
"frozen to use %d." % (number_of_shards, self._number_of_shards))
else:
if number_of_shards > 0:
self._number_of_shards = number_of_shards
else:
raise ValueError(
"Can't set sharding policy to use %s shards; value must be >0" %
str(number_of_shards))
@property
def shard_dimension(self):
"""Returns the shard dimension of the policy or None if unspecified."""
return self._shard_dimension
def set_shard_dimension(self, shard_dimension):
"""Sets the shard dimension for the current policy.
If the policy has been frozen then shard_dimension must match the
existing setting.
Args:
shard_dimension: The shard dimension to use in the policy.
Raises:
ValueError: If the policy has been frozen and shard_dimension
differs from the frozen value, or shard_dimension can't be
interpreted as a Dimension.
"""
if self._frozen:
if self._shard_dimension != shard_dimension:
raise ValueError(
"Can't set shard dimension to %d since it has been frozen to "
"use %d." % (shard_dimension, self._shard_dimension))
else:
self._shard_dimension = tensor_shape.as_dimension(shard_dimension)
def merge(self, other):
"""Merges the policy of another policy into the current policy.
Args:
other: The policy to merge into this one.
Raises:
ValueError: If this policy has been frozen and the merge conflicts with
the frozen policy.
"""
if other.number_of_shards is not None:
self.set_number_of_shards(other.number_of_shards)
if other.shard_dimension is not None:
self.set_shard_dimension(other.shard_dimension)
def get_sharded_shape(self, shape, shard_index=None):
"""Returns the shape of a shard of a full Tensor.
When given the shape of a 'full-size' Tensor, returns the shape of
the sub-Tensor after it has been sharded. Freezes the policy if it
has not yet been frozen.
Args:
shape: The shape of the full-size Tensor to be sharded.
shard_index: The index of the shard whose shape should be returned.
shard_index can be None for sharding policies that use the same
shape for every shard.
Returns:
The shape of the sharded version of the Tensor.
Raises:
ValueError: If shard_index is None when shards are of different
shapes; or shard_index is not None and
!(0<=shard_index<number_of_shards); or shape does not have at
least self.shard_dimension+1 dimensions; or the value of
shape's shard dimension is not a multiple of
self.number_of_shards
"""
if self._shard_dimension is None or self._number_of_shards is None:
# Don't raise an error if the config is unset.
return None
if shard_index is not None:
if shard_index < 0 or shard_index >= self.number_of_shards:
raise ValueError("shard_index %d, but must be in [0,%d)." %
(shard_index, self._number_of_shards))
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError("shape must be a specified shape not Unknown")
if ndims <= self._shard_dimension:
raise ValueError("shape %s does not contain shard_dimension %d" %
(shape.as_list(), self._shard_dimension))
dims = shape.as_list()
if dims[self._shard_dimension] is None:
raise ValueError("shape %s must have a fixed size for dimension %d "
"that is known at graph construction time." %
(shape.as_list(), self._shard_dimension))
if (dims[self._shard_dimension] % self._number_of_shards) != 0:
raise ValueError("shape %s cannot be sharded %d ways along dimension %d" %
(shape.as_list(), self._number_of_shards,
self._shard_dimension))
dims[self._shard_dimension] /= self._number_of_shards
return tensor_shape.as_shape(dims)
def _unshard_shape(self, shape):
"""Return the unsharded shape that would generate a given sharded shape.
Args:
shape: the sharded shape to unshard
Returns:
The unsharded shape.
Raises:
ValueError: if shape is unknown or does not contain
self.shard_dimension
TypeError: if shape is not convertible to a TensorShape
"""
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError("shape must be a specified shape not Unknown")
if ndims <= self._shard_dimension:
raise ValueError("shape %s does not contain shard_dimension %d" %
(shape.as_list(), self._shard_dimension))
dims = shape.as_list()
dims[self._shard_dimension] *= self._number_of_shards
return tensor_shape.as_shape(dims)
def get_unsharded_shape(self, shapes):
"""Returns the shape of an unsharded Tensor given a list of shards.
When given a list of shapes of shards, returns the shape of the
unsharded Tensor that would generate the shards. Sets defaults for the
policy if number_of_shards or shard_dimension is None.
Args:
shapes: The shapes of the Tensor shards to be combined.
Returns:
The shape of the unsharded version of the Tensor.
Raises:
ValueError: if shapes is not a list of length
self.number_of_shards; or any element of shapes is not a valid
shape consistent with the sharding policy; or the list of
shapes is not a valid sharding of a full shape.
TypeError: if an element of shapes is not convertible to a
TensorShape
"""
self._fill_default_values()
if len(shapes) != self.number_of_shards:
raise ValueError(
"shapes is %s but must be a list of length number_of_shards=%d" % (
str(shapes), self.number_of_shards))
unsharded_shapes = [self._unshard_shape(s) for s in shapes]
for i in xrange(self.number_of_shards - 1):
if not unsharded_shapes[i].is_compatible_with(
unsharded_shapes[self.number_of_shards - 1]):
raise ValueError(
"sharded shapes %s are not consistent shards of a full shape "
"sharded %d ways along dimension %d" % (
str(shapes), self.number_of_shards, self.shard_dimension))
return unsharded_shapes[0]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tpu_sharding.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf.tpu import dynamic_padding_pb2 as dynamic_padding
from tensorflow.python.compat import compat as api_compat
from tensorflow.python.compiler.xla import xla
from tensorflow.python.framework import config
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.tf_export import tf_export
ops.NotDifferentiable("TPUReplicatedInput")
# Operations that indicate some error in the users graph, e.g. a placeholder
# that's introduced outside of the infeed.
_BLACKLISTED_OPS = set([
"Placeholder",
])
# XLA doesn't currently support reading of intermediate tensors, thus some ops
# are not supported.
_UNSUPPORTED_OPS = set([
"AudioSummary",
"AudioSummaryV2",
"HistogramSummary",
"ImageSummary",
"MergeSummary",
"Print",
"ScalarSummary",
"TensorSummary",
"TensorSummaryV2",
])
# Ops which can be safely pruned from XLA compile if they have no consumers.
# These ops should also have no inputs.
_UNCONNECTED_OPS_TO_PRUNE = set(["Placeholder", "VarHandleOp"])
_MAX_WARNING_LINES = 5
_TPU_REPLICATE_ATTR = "_tpu_replicate"
_TPU_COMPILATION_STATUS_ATTR = "_tpu_compilation_status"
_OUTSIDE_COMPILATION_ATTR = "_xla_outside_compilation"
def _tpu_system_device_name(job):
"""Returns the device name for the TPU_SYSTEM device of `job`."""
if job is None:
return "/device:TPU_SYSTEM:0"
else:
return "/job:%s/device:TPU_SYSTEM:0" % job
@tf_export(v1=["tpu.initialize_system"])
def initialize_system(embedding_config=None, job=None):
"""Initializes a distributed TPU system for use with TensorFlow.
Args:
embedding_config: If not None, a `TPUEmbeddingConfiguration` proto
describing the desired configuration of the hardware embedding lookup
tables. If embedding_config is None, no hardware embeddings can be used.
job: The job (the XXX in TensorFlow device specification /job:XXX) that
contains the TPU devices that will be initialized. If job=None it is
assumed there is only one job in the TensorFlow flock, and an error will
be returned if this assumption does not hold.
Returns:
A serialized `TopologyProto` that describes the TPU system. Note:
the topology must be evaluated using `Session.run` before it can be used.
"""
config_string = ("" if embedding_config is None else
embedding_config.SerializeToString())
with ops.device(_tpu_system_device_name(job)):
return tpu_ops.configure_distributed_tpu(embedding_config=config_string)
def initialize_system_for_tpu_embedding(embedding_config, job=None):
"""Initializes a distributed TPU Embedding system for use with TensorFlow.
The following two are equivalent:
1. initialize_system() with embedding_config.
2. initialize_system() without embedding_config, then
initialize_system_for_tpu_embedding().
initialize_system() should not be called with embedding_config if
initialize_system_for_tpu_embedding() is meant to be called later.
Args:
embedding_config: a `TPUEmbeddingConfiguration` proto describing the desired
configuration of the hardware embedding lookup tables.
job: The job (the XXX in TensorFlow device specification /job:XXX) that
contains the TPU devices that will be initialized. If job=None it is
assumed there is only one job in the TensorFlow flock, and an error will
be returned if this assumption does not hold.
Returns:
A no-op.
"""
config_string = embedding_config.SerializeToString()
with ops.device(_tpu_system_device_name(job)):
return tpu_ops.configure_tpu_embedding(config=config_string)
@tf_export(v1=["tpu.shutdown_system"])
def shutdown_system(job=None):
"""Shuts down a running a distributed TPU system.
Args:
job: The job (the XXX in TensorFlow device specification /job:XXX) that
contains the TPU devices that will be shutdown. If job=None it is
assumed there is only one job in the TensorFlow flock, and an error will
be returned if this assumption does not hold.
"""
with ops.device(_tpu_system_device_name(job)):
shutdown_distributed_tpu = tpu_ops.shutdown_distributed_tpu()
return shutdown_distributed_tpu
@tf_export(v1=["tpu.core"])
def core(num):
"""Returns the device name for a core in a replicated TPU computation.
Args:
num: the virtual core number within each replica to which operators should
be assigned.
Returns:
A device name, suitable for passing to `tf.device()`.
"""
return "device:TPU_REPLICATED_CORE:{}".format(num)
def _enclosing_tpu_context_and_graph():
"""Returns the TPUReplicateContext and its associated graph."""
graph = ops.get_default_graph()
while graph is not None:
# pylint: disable=protected-access
context_ = graph._get_control_flow_context()
# pylint: enable=protected-access
while context_ is not None:
if isinstance(context_, TPUReplicateContext):
return context_, graph
context_ = context_.outer_context
graph = getattr(graph, "outer_graph", None)
raise ValueError("get_replicated_var_handle() called without "
"TPUReplicateContext. This shouldn't happen. Please file "
"a bug.")
class TPUReplicateContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside a TPU computation.
The primary role of `TPUReplicateContext` is to mark operators inside a
tpu.replicate() computation with the attribute "_tpu_replicate=XYZ", where XYZ
is a unique name.
We use a `ControlFlowContext` to perform the annotation since it integrates
with Tensorflow constructs like ResourceVariables. For example, if a
`ResourceVariable` is constructed inside a tpu.replicate() block, the
`ResourceVariable` implementation can use
`with ops.control_dependencies(None)` to build the variable's definition
outside the replicated computation.
"""
def __init__(self, name, num_replicas, pivot):
"""Builds a new TPUReplicateContext.
Args:
name: a unique name for the context, used to populate the `_tpu_replicate`
attribute.
num_replicas: an integer that gives the number of replicas for the
computation.
pivot: a pivot node. Nodes in the TPUReplicateContext that do not have any
inputs will have a control dependency on the pivot node. This ensures
that nodes are correctly included in any enclosing control flow
contexts.
"""
super(TPUReplicateContext, self).__init__()
self._num_replicas = num_replicas
self._outer_device_function_stack = None
self._oc_dev_fn_stack = None
self._outside_compilation_cluster = None
self._outside_compilation_counter = 0
self._in_gradient_colocation = None
self._gradient_colocation_stack = []
self._host_compute_core = []
self._name = name
self._name_as_bytes = compat.as_bytes(name)
self._unsupported_ops = []
self._pivot = pivot
self._replicated_vars = {}
def get_replicated_var_handle(self, name, vars_):
"""Returns a variable handle for replicated TPU variable 'var'.
This is a method used by an experimental replicated variable implementation
and is not intended as a public API.
Args:
name: The common name of the variable.
vars_: The replicated TPU variables.
Returns:
The handle of the TPU replicated input node.
"""
handle = self._replicated_vars.get(name)
if handle is not None:
return handle
# Builds a TPUReplicatedInput node for the variable, if one does not already
# exist. The TPUReplicatedInput node must belong to the enclosing
# control-flow scope of the TPUReplicateContext.
# TODO(phawkins): consider changing the contract of the TPU encapsulation
# so the TPUReplicatedInput nodes go inside the TPUReplicateContext scope
# instead.
_, graph = _enclosing_tpu_context_and_graph()
with graph.as_default():
# pylint: disable=protected-access
saved_context = graph._get_control_flow_context()
graph._set_control_flow_context(self.outer_context)
handle = tpu_ops.tpu_replicated_input(
[v.handle for v in vars_], name=name + "/handle")
graph._set_control_flow_context(saved_context)
# pylint: enable=protected-access
self._replicated_vars[name] = handle
return handle
def report_unsupported_operations(self):
if self._unsupported_ops:
op_str = "\n".join([" %s (%s)" % (op.type, op.name)
for op in self._unsupported_ops[:_MAX_WARNING_LINES]])
logging.warning("%d unsupported operations found: \n%s",
len(self._unsupported_ops), op_str)
if len(self._unsupported_ops) > _MAX_WARNING_LINES:
logging.warning("... and %d more" %
(len(self._unsupported_ops) - _MAX_WARNING_LINES))
def EnterGradientColocation(self, op, gradient_uid):
if op is not None:
self._gradient_colocation_stack.append(op)
if not self._outside_compilation_cluster:
try:
outside_attr = op.get_attr(_OUTSIDE_COMPILATION_ATTR)
if self._in_gradient_colocation:
raise NotImplementedError(
"Cannot nest gradient colocation operations outside compilation"
)
if gradient_uid == "__unsupported__":
raise NotImplementedError(
"No gradient_uid calling gradient within outside_compilation")
# When we take the gradient of an op X in an outside_compilation
# cluster C in a forward computation we would like to put the ops
# corresponding to the gradient of X into a new outside_compilation
# cluster C'. However, if we take the gradient of X twice, the second
# one should get yet another new outside_compilation cluster C''.
#
# The mechanism we adopt is to use a 'root_cluster' which is the
# cluster that X was in before we took gradients, and a 'gradient_uid'
# which is different for every invocation of gradients, and put the
# gradient of X in cluster 'root_cluster.gradient_uid'.
#
# When taking a gradient of a gradient, some ops will be colocated
# with Op in the forward pass (e.g., cluster root_cluster) and some in
# the backward pass (e.g., cluster root_cluster.initial_gradient_uid).
# We need all of the grad-of-grad ops to be in the same cluster to
# avoid cyclic dependencies between clusters. We adopt a heuristic
# that puts any op clustered with root_cluster.<xxx> in
# root_cluster.gradient_uid, even if xxx was initial_gradient_uid.
self._in_gradient_colocation = op
parts = outside_attr.split(".")
cluster = parts[0] + "." + gradient_uid
self._EnterOutsideCompilationScope(cluster=cluster)
except ValueError:
# The attr was not present: do nothing.
pass
def ExitGradientColocation(self, op, gradient_uid):
if op is not None:
if not self._gradient_colocation_stack:
raise errors.InternalError(
op.node_def, op,
"Badly nested gradient colocation: empty stack when popping Op " +
op.name)
last_op = self._gradient_colocation_stack.pop()
if op is last_op:
if op is self._in_gradient_colocation:
self._in_gradient_colocation = None
self._ExitOutsideCompilationScope()
else:
raise errors.InternalError(
op.node_def, op, "Badly nested gradient colocation, expected " +
last_op + ", got " + op.name)
def _EnterOutsideCompilationScope(self, cluster=None):
class FakeOp(object):
"""A helper class to determine the current device.
Supports only the type and device set/get methods needed to run the
graph's _apply_device_function method.
"""
def __init__(self):
self._device = ""
@property
def type(self):
return "FakeOp"
@property
def device(self):
return self._device
def _set_device(self, device):
if isinstance(device, pydev.DeviceSpec):
self._device = device.to_string()
else:
self._device = device
def _set_device_from_string(self, device_str):
self._device = device_str
if self._outside_compilation_cluster:
raise NotImplementedError("Cannot nest outside_compilation clusters")
if cluster:
self._outside_compilation_cluster = cluster
else:
self._outside_compilation_cluster = str(self._outside_compilation_counter)
self._outside_compilation_counter += 1
graph = ops.get_default_graph()
fake_op = FakeOp()
graph._apply_device_functions(fake_op) # pylint: disable=protected-access
device = pydev.DeviceSpec.from_string(fake_op.device)
if (device.device_type == "TPU_REPLICATED_CORE" and
device.device_index is not None):
self._host_compute_core.append(self._outside_compilation_cluster + ":" +
str(device.device_index))
self._oc_dev_fn_stack = graph._device_function_stack # pylint: disable=protected-access
graph._device_function_stack = self._outer_device_function_stack # pylint: disable=protected-access
def _ExitOutsideCompilationScope(self):
if not self._outside_compilation_cluster:
raise NotImplementedError(
"Attempted to exit outside_compilation scope when not in scope")
self._outside_compilation_cluster = None
graph = ops.get_default_graph()
graph._device_function_stack = self._oc_dev_fn_stack # pylint: disable=protected-access
def Enter(self):
if not self._outer_device_function_stack:
# Capture the device function stack at the time of first entry
# since that is the stack that will be used outside_compilation.
graph = ops.get_default_graph()
# pylint: disable=protected-access
self._outer_device_function_stack = graph._device_function_stack.copy()
# pylint: enable=protected-access
super(TPUReplicateContext, self).Enter()
def HostComputeCore(self):
return self._host_compute_core
def _RemoveExternalControlEdges(self, op):
"""Remove any external control dependency on this op."""
internal_control_inputs = []
external_control_inputs = []
for x in op.control_inputs:
# pylint: disable=protected-access
is_internal_op = False
ctxt = x._get_control_flow_context()
while ctxt is not None:
if ctxt == self:
is_internal_op = True
break
ctxt = ctxt._outer_context
if is_internal_op:
internal_control_inputs.append(x)
else:
external_control_inputs.append(x)
# pylint: enable=protected-access
# pylint: disable=protected-access
op._remove_all_control_inputs()
op._add_control_inputs(internal_control_inputs)
# pylint: enable=protected-access
return internal_control_inputs, external_control_inputs
def AddOp(self, op):
# pylint: disable=protected-access
if op.type in _BLACKLISTED_OPS:
logging.error("Operation of type %s (%s) is not supported on the TPU. "
"Execution will fail if this op is used in the graph. " %
(op.type, op.name))
if op.type in _UNSUPPORTED_OPS:
self._unsupported_ops.append(op)
if any(x.dtype._is_ref_dtype for x in op.inputs):
raise NotImplementedError(
"Non-resource Variables are not supported inside TPU computations "
"(operator name: %s)" % op.name)
if _TPU_REPLICATE_ATTR in op.node_def.attr:
raise ValueError("TPU computations cannot be nested")
op._set_attr(_TPU_REPLICATE_ATTR,
attr_value_pb2.AttrValue(s=self._name_as_bytes))
if self._outside_compilation_cluster:
op._set_attr(
_OUTSIDE_COMPILATION_ATTR,
attr_value_pb2.AttrValue(
s=compat.as_bytes(self._outside_compilation_cluster)))
if self._num_replicas > 1 or not self._outside_compilation_cluster:
# Prevent feeding or fetching anything that is being compiled,
# and any replicated outside_compilation Op.
op.graph.prevent_feeding(op)
op.graph.prevent_fetching(op)
# Remove any control edges from outer control flow contexts. These may cause
# mismatched frame errors.
(internal_control_inputs,
external_control_inputs) = self._RemoveExternalControlEdges(op)
if not op.inputs:
# Add a control edge from the control pivot to this op.
if not internal_control_inputs:
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot())
# pylint: enable=protected-access
else:
for index in xrange(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x is not x:
op._update_input(index, real_x) # pylint: disable=protected-access
if external_control_inputs:
# Use an identity to pull control inputs as data inputs. Note that we
# ignore ops which don't have outputs. TODO(phawkins): fix that.
with ops.control_dependencies(None):
self.Enter()
external_control_inputs = [
array_ops.identity(x.outputs[0]).op
for x in external_control_inputs
if x.outputs
]
self.Exit()
# pylint: disable=protected-access
op._add_control_inputs(external_control_inputs)
# pylint: enable=protected-access
# Mark op's outputs as seen by this context and any outer contexts.
output_names = [x.name for x in op.outputs]
context = self
while context is not None:
# pylint: disable=protected-access
context._values.update(output_names)
context = context._outer_context
# pylint: enable=protected-access
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
if val.name in self._values:
# Use the real value if it comes from outer context.
result = self._external_values.get(val.name)
return val if result is None else result
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddInnerOp(self, op):
self.AddOp(op)
if self._outer_context:
self._outer_context.AddInnerOp(op)
@property
def grad_state(self):
# Define the gradient loop state associated with the TPUReplicateContext to
# be None as the TPUReplicateContext does not get nested nor does the
# grad_state outside the TPUReplicateContext affect the graph inside so the
# grad_state should be as if this is the top-level gradient state.
return None
@property
def back_prop(self):
"""Forwards to the enclosing while context, if any."""
if self.GetWhileContext():
return self.GetWhileContext().back_prop
return False
def GetControlPivot(self):
return self._pivot
@tf_export(v1=["tpu.outside_compilation"])
def outside_compilation(computation, *args, **kwargs):
"""Builds part of a computation outside any current TPU replicate scope.
Args:
computation: A Python function that builds the computation to
place on the host.
*args: the positional arguments for the computation.
**kwargs: the keyword arguments for the computation.
Returns:
The Tensors returned by computation.
"""
args = [] if args is None else args
graph = ops.get_default_graph()
# If we are in a TPUReplicateContext, signal that we are now
# outside_compilation
initial_context = graph._get_control_flow_context() # pylint: disable=protected-access
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._EnterOutsideCompilationScope() # pylint: disable=protected-access
context = context.outer_context
retval = computation(*args, **kwargs)
# If we are in a TPUReplicateContext, signal that we are no longer
# outside_compilation
final_context = graph._get_control_flow_context() # pylint: disable=protected-access
if initial_context is not final_context:
raise NotImplementedError(
"Control-flow context cannot be different at start and end of an "
"outside_compilation scope")
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._ExitOutsideCompilationScope() # pylint: disable=protected-access
context = context.outer_context
return retval
@tf_export(v1=["tpu.replicate"])
def replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None,
maximum_shapes=None):
"""Builds a graph operator that runs a replicated TPU computation.
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs. Each input can be a nested structure
containing values that are convertible to tensors. Note that passing an
N-dimension list of compatible values will result in a N-dimension list of
scalar tensors rather than a single Rank-N tensors. If you need different
behavior, convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
maximum_shapes: A nested structure of tf.TensorShape representing the shape
to which the respective component of each input element in each replica
should be padded. Any unknown dimensions (e.g.
tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like
object) will be padded to the maximum size of that dimension over all
replicas. The structure of `maximum_shapes` needs to be the same as
`inputs[0]`.
Returns:
A list of outputs, indexed by `[replica_num]` each output can be a nested
structure same as what computation() returns with a few exceptions.
Exceptions include:
1) None output: a NoOp would be returned which control-depends on
computation.
2) Single value output: A tuple containing the value would be returned.
3) Operation-only outputs: a NoOp would be returned which
control-depends on computation.
TODO(b/121383831): Investigate into removing these special cases.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
ValueError: If the static `inputs` dimensions don't match with the values
given in `maximum_shapes`.
ValueError: If the structure of inputs per replica does not match
the structure of `maximum_shapes`.
"""
return split_compile_and_replicate(
computation,
inputs,
infeed_queue,
device_assignment,
name,
maximum_shapes=maximum_shapes)[1]
def _pad_all_input(inputs, padded_shapes):
"""Pad all input tensors given padded_shapes.
The real shape tensors will be concatenated with the padded original inputs.
Args:
inputs: The original inputs.
padded_shapes: A list of padded shapes for each input.
Returns:
The padded inputs and a PaddingMap list which maps the padded input
dimension to the real shape argument index.
"""
# maximum_static_shapes[idx][i] indicates the maximum static size of ith
# dimension of the idx input among all the replicas.
maximum_static_shapes = []
# need_padding[idx][i] indicates whether the ith dimension of the idx input
# needs padding.
need_padding = []
input_shape_tensors = []
for core_idx, inputs_per_core in enumerate(inputs):
for idx, input_tensor in enumerate(inputs_per_core):
input_shape = input_tensor.get_shape().as_list()
if core_idx == 0:
input_shape_tensors.append([])
maximum_static_shapes.append(input_shape)
need_padding.append(np.full_like(input_shape, False, dtype=bool))
else:
for i, s in enumerate(input_shape):
if not s or s != maximum_static_shapes[idx][i]:
need_padding[idx][i] = True
maximum_static_shapes[idx] = max(input_shape,
maximum_static_shapes[idx])
input_shape_tensors[idx].append(array_ops.shape(input_tensor))
maximum_shapes = []
for shapes_per_input in input_shape_tensors:
maximum_shapes.append(
math_ops.reduce_max(array_ops.stack(shapes_per_input), axis=0))
padded_inputs = []
real_shapes = []
padding_maps = []
for core_idx, inputs_per_core in enumerate(inputs):
padded_inputs.append([])
real_shapes.append([])
real_shape_idx = len(inputs_per_core) - 1
for idx, input_tensor in enumerate(inputs_per_core):
input_shape_tensor = input_shape_tensors[idx][core_idx]
input_shape = input_tensor.get_shape().as_list()
padded_shape = padded_shapes[idx]
if any(need_padding[idx]):
for i, s in enumerate(input_shape):
if need_padding[idx][i]:
if core_idx == 0:
real_shape_idx += 1
padding_map = dynamic_padding.PaddingMap()
padding_map.arg_index = idx
padding_map.shape_index = i
padding_map.padding_arg_index = real_shape_idx
padding_maps.append(padding_map)
real_shapes[core_idx].append(
math_ops.cast(input_shape_tensor[i], dtypes.int32))
paddings = []
for i, s in enumerate(padded_shape.dims):
if need_padding[idx][i]:
if s.value:
# Pad to the given maximum value.
padding = [0, s.value - input_shape_tensor[i]]
else:
# If maximum value is not given, then pad to the maximum dimension
# among all the cores.
padding = [0, maximum_shapes[idx][i] - input_shape_tensor[i]]
else:
padding = [0, 0]
paddings.append(padding)
if input_tensor.get_shape().is_fully_defined():
# TODO(rxsang): This is a hack to make sure padded_input has dynamic
# shapes, so any tf.size/tf.shape op performed on it won't be constant
# folded. Do we have better ways to do it?
padded_input = control_flow_ops.cond(
array_ops.constant(True),
lambda: array_ops.pad(input_tensor, paddings), # pylint: disable=cell-var-from-loop
lambda: input_tensor)
else:
padded_input = array_ops.pad(input_tensor, paddings)
padded_inputs[core_idx].append(padded_input)
else:
padded_inputs[core_idx].append(input_tensor)
num_replicas = len(padded_inputs)
for i in range(num_replicas):
padded_inputs[i].extend(real_shapes[i])
return padded_inputs, padding_maps
def split_compile_and_replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None,
use_tpu=True,
maximum_shapes=None):
"""Builds graph operators that runs compilation and replicated computation.
This is a lower level interface than replicate that returns a separate compile
and execute output tensor. In the generated graph the compile op feeds into
the execute op and no additional compilation is incurred when running the
compile op before the execute op. The compile op returns additional
information about the compilation but does not return the compiled program.
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs. Each input can be a nested structure
containing values that are convertible to tensors. Note that passing an
N-dimension list of compatible values will result in a N-dimension list of
scalar tensors rather than a single Rank-N tensors. If you need different
behavior, convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
use_tpu: When false, the input `computation` is executed on the XLA CPU/GPU
backends. Currently, only supports a default placement (computation is
placed on GPU if one is available, and on CPU if not).
maximum_shapes: A nested structure of tf.TensorShape representing the shape
to which the respective component of each input element in each replica
should be padded. Any unknown dimensions (e.g.
tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like
object) will be padded to the maximum size of that dimension over all
replicas. The structure of `maximum_shapes` needs to be the same as
`inputs[0]`.
Returns:
A list of lists with the first list corresponding to the compile op and the
second a list of output tensors, indexed by `[replica_num][output_num]`.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
ValueError: If the static `inputs` dimensions don't match with the values
given in `maximum_shapes`.
ValueError: If the structure of inputs per replica does not match
the structure of `maximum_shapes`.
"""
del name
inputs = [[]] if inputs is None else inputs
metadata_kwargs = {}
if device_assignment is not None:
# Turn the Numpy array into a flattened list so we can pass it as an
# operator attribute.
metadata_kwargs = {
"topology":
device_assignment.topology.serialized(),
"device_assignment":
device_assignment.core_assignment.flatten().tolist()
}
# TODO(phawkins): remove this case after the forward compatibility window
# expires on 2018-10-5.
if api_compat.forward_compatible(2018, 10, 5):
metadata_kwargs["num_cores_per_replica"] = (
device_assignment.num_cores_per_replica)
else:
metadata_kwargs["computation_shape"] = [
device_assignment.num_cores_per_replica
]
# This entry is used for enabling automatic outside compilation.
metadata_kwargs["allow_soft_placement"] = config.get_soft_device_placement()
if ((not isinstance(inputs, list)) or
any(not isinstance(inp, (list, tuple)) for inp in inputs)):
raise TypeError("tpu.replicate() inputs must be a list of lists/tuples")
num_replicas = len(inputs)
# No replicas? Nothing to do.
if num_replicas == 0:
return []
# Checks all replicas have the same structure.
for i in xrange(1, num_replicas):
nest.assert_same_structure(inputs[0], inputs[i])
# Flatten inputs.
flat_inputs = [
nest.flatten(per_replica_input) for per_replica_input in inputs
]
# Converts inputs to Tensors.
flat_inputs = [[ops.convert_to_tensor(x) for x in inp] for inp in flat_inputs]
# Verifies that all replicas have matching numbers and types of inputs
flat_input_types = [x.dtype for x in flat_inputs[0]]
input_arity = len(inputs[0])
flat_input_arity = len(flat_input_types)
for i in range(num_replicas):
if len(inputs[i]) != input_arity:
raise ValueError("Replicas must have the same number of inputs. "
"Replica 0 had {} inputs, replica {} had {} "
"inputs.".format(input_arity, i, len(inputs[i])))
types = [x.dtype for x in flat_inputs[i]]
if types != flat_input_types:
raise ValueError("Replicas must have matching input types. Replica 0 had "
"input types {}, replica {} had input types {}".format(
flat_input_types, i, types))
arg_error = xla.check_function_argument_count(
computation, input_arity, infeed_queue)
if arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s, but the computation needs %s" % (
input_arity, str([i.name for i in inputs[0]]), arg_error))
else:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s and %d additional inputs from infeed,"
" but the computation needs %s" % (input_arity, str(
[i.name
for i in inputs[0]]), infeed_queue.number_of_tuple_elements,
arg_error))
if maximum_shapes:
if infeed_queue:
raise ValueError(
"Dynamic input shapes are not supported with infeed queues")
# Make sure maximum_shapes has the same structure as inputs.
nest.assert_same_structure(inputs[0], maximum_shapes, check_types=False)
# Flatten padded shapes.
flat_maximum_shapes = nest.flatten(maximum_shapes)
flat_maximum_shapes = [
tensor_shape.TensorShape(s) for s in flat_maximum_shapes
]
flat_inputs, padding_maps = _pad_all_input(flat_inputs, flat_maximum_shapes)
serialized_padding_maps = []
for padding_map in padding_maps:
serialized_padding_maps.append(padding_map.SerializeToString())
metadata_kwargs["padding_map"] = serialized_padding_maps
metadata_kwargs["step_marker_location"] = getattr(
computation, "step_marker_location", "STEP_MARK_AT_ENTRY")
graph = ops.get_default_graph()
# Fan-in: Builds a TPUReplicatedInput node for each input.
flat_replicated_inputs = []
for i in range(0, len(flat_inputs[0])):
replicas = [flat_inputs[replica][i] for replica in xrange(num_replicas)]
flat_replicated_inputs.append(
tpu_ops.tpu_replicated_input(replicas, name="input{}".format(i)))
if isinstance(graph, func_graph.FuncGraph):
# When we are in Tensorflow 2.0 function, 'graph' will be a FuncGraph
# object. If both outside graph and this function have a TPU cluster,
# they will have the same cluster name and it will cause problems (because
# we lower functional ops in Tensorflow 2.0). Append function name to
# 'cluster_name' to avoid cluster name collision.
cluster_name = graph.unique_name("cluster_" + graph.name)
else:
cluster_name = graph.unique_name("cluster")
pivot = control_flow_ops.no_op(name=cluster_name + "/pivot")
context = TPUReplicateContext(
name=cluster_name, num_replicas=num_replicas, pivot=pivot)
try:
context.Enter()
metadata = tpu_ops.tpu_replicate_metadata(
num_replicas=num_replicas, use_tpu=use_tpu, **metadata_kwargs)
with tpu_function.tpu_shard_context(
num_replicas), ops.control_dependencies([metadata]):
# Add identity ops so even unused inputs are "consumed" by the
# computation. This is to avoid orphaned TPUReplicatedInput nodes.
# TODO(phawkins): consider instead pruning unused TPUReplicatedInput
# and eliding trivial TPUReplicatedInput/TPUReplicatedOutput pairs.
flat_replicated_inputs = [
array_ops.identity(x, name="replicated_input_{}".format(i))
for i, x in enumerate(flat_replicated_inputs)
]
for i in flat_replicated_inputs:
# pylint: disable=protected-access
# Add an attribute to the identity node so that they could be removed in
# encapsulate TPU computation pass if unused. However we don't remove
# inputs when dynamic padding is enabled.
# TODO(rxsang): Use other ways except argument index in padding_map so
# outside compilation can work with dynamic padding correctly.
if maximum_shapes is None:
i.op._set_attr("_tpu_input_identity",
attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
# Unflatten the computation inputs to match original input structure.
computation_inputs = nest.pack_sequence_as(
structure=inputs[0],
flat_sequence=flat_replicated_inputs[:flat_input_arity])
# If there is an infeed queue, adds the dequeued values to the
# computation's inputs.
if infeed_queue is not None:
infeed_queue.set_number_of_shards(num_replicas)
for t in infeed_queue.generate_dequeue_op():
computation_inputs.append(t)
# Only resource variables work inside a TPU computation, so turn on
# resource variables for the computation.
# TODO(phawkins): consider removing this code. It will
# be less confusing to clients if they knowingly choose to use resource
# variables.
# Partitioned variables is not supported (b/112311320).
vscope = variable_scope.get_variable_scope()
saved_use_resource = vscope.use_resource
saved_custom_getter = vscope.custom_getter
def custom_getter(getter, name, *args, **kwargs):
"""Variables on TPU have a few restrictions."""
partitioner = kwargs["partitioner"]
if partitioner is not None:
kwargs["partitioner"] = None
logging.warning(
"Partitioned variables are not supported on TPU. Got "
"`partitioner` that is {} for variable {}. "
"Setting `partitioner` to `None`."
.format(partitioner, name))
if saved_custom_getter is None:
return getter(name, *args, **kwargs)
else:
return saved_custom_getter(getter, name, *args, **kwargs)
vscope.set_use_resource(True)
vscope.set_custom_getter(custom_getter)
outputs = computation(*computation_inputs)
vscope.set_use_resource(saved_use_resource)
vscope.set_custom_getter(saved_custom_getter)
outputs_is_flat = xla.is_flat(outputs)
if outputs_is_flat:
output_tensors, control_deps = _postprocess_flat_outputs(outputs)
else:
output_tensors, control_deps = _postprocess_non_flat_outputs(outputs)
# tensor_tracer imports tpu.py. Local import to tensor_tracer to avoid
# import-cycle
# pylint: disable=g-import-not-at-top
from tensorflow.python.tpu import tensor_tracer
# pylint: enable=g-import-not-at-top
if tensor_tracer.TensorTracer.is_enabled():
tt = tensor_tracer.TensorTracer()
output_tensors = tt.trace_tpu(ops.get_default_graph(),
output_tensors, control_deps,
num_replicas)
context.ExitResult(output_tensors)
finally:
context.report_unsupported_operations()
context.Exit()
host_compute_core = context.HostComputeCore()
if host_compute_core:
attr_value = attr_value_pb2.AttrValue()
attr_value.list.s.extend([compat.as_bytes(x) for x in host_compute_core])
metadata._set_attr("host_compute_core", attr_value) # pylint: disable=protected-access
with ops.control_dependencies([metadata]):
if use_tpu:
compile_status = tpu_ops.tpu_compilation_result()
op = compile_status.op
attr_value = attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name))
op._set_attr(_TPU_COMPILATION_STATUS_ATTR, attr_value) # pylint: disable=protected-access
else:
compile_status = control_flow_ops.no_op(name="compilation_status")
if not output_tensors:
# Returns a list of NoOps dependent on the replication Op, indexed by
# [replica_num].
return [
compile_status,
[
control_flow_ops.group(control_deps, name="shard_%d" % i)
for i in range(num_replicas)
]
]
# Fan-out: Builds a TPUReplicatedOutput node for each output.
replicated_outputs = [[] for i in xrange(num_replicas)]
for i, t in enumerate(output_tensors):
# Fan-out: Builds a TPUReplicatedOutput node for each output.
ys = tpu_ops.tpu_replicated_output(
t, num_replicas, name="output{}".format(i))
# Wraps the outputs in identity operators so the names of any possible
# `fetch` nodes are preserved by the replication rewrite.
with ops.control_dependencies(control_deps):
for replica in xrange(num_replicas):
replicated_outputs[replica].append(
array_ops.identity(
ys[replica], name="output_%d_shard_%d" % (i, replica)))
if not outputs_is_flat:
replicated_outputs = [
nest.pack_sequence_as(outputs, replica_outs)
for replica_outs in replicated_outputs
]
return [compile_status, replicated_outputs]
def _postprocess_flat_outputs(outputs):
"""Validates non-flat outputs, add backs device assignments and other attrs.
Args:
outputs: Output from `computation` inside `tpu.rewrite`.
Returns:
Tensors and Operations extracted from outputs.
"""
# Following code segment is to preserve legacy behavior. Previously we only
# supported flat outputs and thus for consistency it was nice to convert even
# single element into a tuple. But now that we support arbitrary output
# structure, this is no longer necessary.
# TODO(b/121383831): Migrate all legacy use cases and delete this special
# case.
# If the computation returns `None`, make it an empty tuple.
if outputs is None:
outputs = tuple()
# If the computation only returned one value, makes it a tuple.
if not isinstance(outputs, collections_abc.Sequence):
outputs = (outputs,)
# Append `no_op` here so that fetching any return value of this function
# will trigger TPUExecute node.
outputs += (control_flow_ops.no_op(),)
try:
with ops.device(core(0)):
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
"convertible to Tensors. Got '%s'" % str(e))
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU functions must return zero-or more Tensor values followed by "
"zero or more Operations.")
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
new_output_tensors = []
for t in output_tensors:
with ops.device(t.device if t.device else core(0)):
o = array_ops.identity(t)
# pylint: disable=protected-access
o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
new_output_tensors.append(o)
return new_output_tensors, output_operations
def _postprocess_non_flat_outputs(outputs):
"""Validates non-flat outputs, add backs device assignments and other attrs.
Args:
outputs: Output from `computation` inside `tpu.rewrite`.
Returns:
Tensors extracted from outputs and an empty list because Operations are not
allowed in non-flat outputs..
"""
# Flatten output items.
flat_outputs = nest.flatten(outputs)
# Convert all non-Operation outputs to Tensors.
for i, o in enumerate(flat_outputs):
if isinstance(o, ops.Operation):
raise ValueError(
"tpu.rewrite does not support Operation as return value in non-flat "
"output structure. You can set returned Operations as control "
"dependencies of returned Tensors so Operations are triggered when "
'Tensors are evaluated. Operation found: "%s"' % o.name)
try:
o = ops.convert_to_tensor(o)
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
'convertible to Tensors. Got error: "%s"' % str(e))
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
with ops.device(core(0)):
o = array_ops.identity(o)
# pylint: disable=protected-access
o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
flat_outputs[i] = array_ops.identity(o)
# All flat_outputs are Tensors, and no Operations.
return flat_outputs, []
def split_compile_and_shard(computation,
inputs=None,
num_shards=1,
input_shard_axes=None,
outputs_from_all_shards=True,
output_shard_axes=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Shards `computation` for parallel execution.
`inputs` must be a list of Tensors or None (equivalent to an empty list), each
of which has a corresponding split axis (from `input_shard_axes`). Each input
is split into `num_shards` pieces along the corresponding axis, and
computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
If `outputs_from_all_shards` is true, the outputs from all shards of
`computation` are concatenated back together along their `output_shards_axes`.
Otherwise, each output is taken from an arbitrary shard.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty list). Each
input tensor has a corresponding shard axes, given by `input_shard_axes`,
which must have size divisible by `num_shards`.
num_shards: The number of shards.
input_shard_axes: A list of dimensions along which to shard `inputs`, or
`None`. `None` means "shard all inputs along dimension 0". If not `None`,
there must be one dimension per input.
outputs_from_all_shards: Boolean or list of boolean. For each output, if
`True`, outputs from all shards are concatenated along the corresponding
`output_shard_axes` entry. Otherwise, each output is taken
from an arbitrary shard. If the argument is a boolean, the argument's
value is used for each output.
output_shard_axes: A list of dimensions along which to concatenate the
outputs of `computation`, or `None`. `None` means "concatenate all outputs
along dimension 0". If not `None`, there must be one dimension per output.
Ignored if `outputs_from_all_shards` is False.
infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs
of `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A tuple of (compile op, [output tensors]).
Raises:
ValueError: If num_shards <= 0
ValueError: If len(input_shard_axes) != len(inputs)
ValueError: If len(output_shard_axes) != len(outputs from `computation`)
"""
# TODO(phawkins): consider adding support for broadcasting Tensors passed as
# inputs.
if num_shards <= 0:
raise ValueError("num_shards must be a positive integer.")
inputs = [] if inputs is None else inputs
if not isinstance(inputs, list):
raise TypeError("tpu.shard()'s inputs must be a list of Tensors or None.")
# Converts inputs to Tensors.
inputs = [ops.convert_to_tensor(x) for x in inputs]
if input_shard_axes is None:
input_shard_axes = [0] * len(inputs)
if len(inputs) != len(input_shard_axes):
raise ValueError("Length of input_shard_axes must be equal to the number "
"of inputs.")
if inputs:
# Splits the `inputs` along the corresponding `input_shard_axes`, giving
# lists with layout [input][shard]
split_inputs = [
array_ops.split(x, num_shards, axis=axis)
for (axis, x) in zip(input_shard_axes, inputs)]
# Transposes the input lists to have layout [shard][input]
transposed_inputs = [list(i) for i in zip(*split_inputs)]
else:
transposed_inputs = [[]] * num_shards
compile_op, outputs = split_compile_and_replicate(
computation,
transposed_inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# There must be at least one shard since num_shards > 0.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
if isinstance(outputs[0], ops.Operation):
# pylint: enable=indexing-exception
# There were no outputs from the computation and replicate returned a list
# of NoOps with control dependencies on the computation. Return the first
# one so it can be used as a control dependency or fetch node.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return compile_op, [outputs[0]]
# pylint: enable=indexing-exception
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
num_outputs = len(outputs[0])
# pylint: enable=indexing-exception
if output_shard_axes is None:
output_shard_axes = [0] * num_outputs
if num_outputs != len(output_shard_axes):
raise ValueError("Length of output_shard_axes must be equal to the number "
"of outputs.")
if isinstance(outputs_from_all_shards, bool):
outputs_from_all_shards = [outputs_from_all_shards] * num_outputs
if num_outputs != len(outputs_from_all_shards):
raise ValueError("Length of outputs_from_all_shards must be equal to the "
"number of outputs.")
results = []
for (axis, all_shards, x) in zip(output_shard_axes, outputs_from_all_shards,
zip(*outputs)):
if all_shards:
# Concatenate all of the outputs together (use stack for scalars).
shape = x[0].shape
is_scalar = shape is not None and (shape.ndims == 0)
results.append((array_ops.stack(list(x)) if is_scalar
else array_ops.concat(list(x), axis=axis)))
else:
# TODO(phawkins): use a smarter policy, e.g., round-robin across shards.
results.append(x[0])
return compile_op, results
@tf_export(v1=["tpu.shard"])
def shard(computation,
inputs=None,
num_shards=1,
input_shard_axes=None,
outputs_from_all_shards=True,
output_shard_axes=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Shards `computation` for parallel execution.
`inputs` must be a list of Tensors or None (equivalent to an empty list), each
of which has a corresponding split axis (from `input_shard_axes`). Each input
is split into `num_shards` pieces along the corresponding axis, and
computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
TODO(phawkins): consider adding support for broadcasting Tensors passed
as inputs.
If `outputs_from_all_shards` is true, the outputs from all shards of
`computation` are concatenated back together along their `output_shards_axes`.
Otherwise, each output is taken from an arbitrary shard.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty list). Each
input tensor has a corresponding shard axes, given by `input_shard_axes`,
which must have size divisible by `num_shards`.
num_shards: The number of shards.
input_shard_axes: A list of dimensions along which to shard `inputs`, or
`None`. `None` means "shard all inputs along dimension 0". If not `None`,
there must be one dimension per input.
outputs_from_all_shards: Boolean or list of boolean. For each output, if
`True`, outputs from all shards are concatenated along the corresponding
`output_shard_axes` entry. Otherwise, each output is taken
from an arbitrary shard. If the argument is a boolean, the argument's
value is used for each output.
output_shard_axes: A list of dimensions along which to concatenate the
outputs of `computation`, or `None`. `None` means "concatenate all outputs
along dimension 0". If not `None`, there must be one dimension per output.
Ignored if `outputs_from_all_shards` is False.
infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs
of `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A list of output tensors.
Raises:
ValueError: If num_shards <= 0
ValueError: If len(input_shard_axes) != len(inputs)
ValueError: If len(output_shard_axes) != len(outputs from `computation`)
"""
return split_compile_and_shard(
computation,
inputs=inputs,
num_shards=num_shards,
input_shard_axes=input_shard_axes,
outputs_from_all_shards=outputs_from_all_shards,
output_shard_axes=output_shard_axes,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)[1]
@tf_export(v1=["tpu.batch_parallel"])
def batch_parallel(computation,
inputs=None,
num_shards=1,
infeed_queue=None,
device_assignment=None,
name=None):
"""Shards `computation` along the batch dimension for parallel execution.
Convenience wrapper around shard().
`inputs` must be a list of Tensors or None (equivalent to an empty list).
Each input is split into `num_shards` pieces along the 0-th dimension, and
computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
The outputs from all shards are concatenated back together along their 0-th
dimension.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty list). The
0-th dimension of each Tensor must have size divisible by `num_shards`.
num_shards: The number of shards.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A list of output tensors.
Raises:
ValueError: If `num_shards <= 0`
"""
return shard(
computation,
inputs,
num_shards=num_shards,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
@tf_export(v1=["tpu.rewrite"])
def rewrite(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Rewrites `computation` for execution on a TPU system.
Args:
computation: A Python function that builds a computation to apply to the
input. If the function takes n inputs, 'inputs' should be a list of n
tensors.
`computation` may return a list of operations and tensors. Tensors must
come before operations in the returned list. The return value of
`rewrite` is a list of tensors corresponding to the tensors from the
output of `computation`.
All `Operation`s constructed during `computation` will be executed when
evaluating any of the returned output tensors, not just the ones returned.
inputs: A list of input tensors or `None` (equivalent to an empty list).
Each input can be a nested structure containing values that are
convertible to tensors. Note that passing an N-dimension list of
compatible values will result in a N-dimention list of scalar tensors
rather than a single Rank-N tensors. If you need different behavior,
convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: (Deprecated) Does nothing.
Returns:
Same data structure as if computation(*inputs) is called directly with some
exceptions for correctness. Exceptions include:
1) None output: a NoOp would be returned which control-depends on
computation.
2) Single value output: A tuple containing the value would be returned.
3) Operation-only outputs: a NoOp would be returned which
control-depends on computation.
TODO(b/121383831): Investigate into removing these special cases.
"""
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return replicate(
computation,
None if inputs is None else [inputs],
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)[0]
# pylint: enable=indexing-exception
# Operations that indicate some error in the user's inference graph.
_BLACKLISTED_INFERENCE_OPS = set([
"ReadVariableOp",
"AssignVariableOp",
"AssignAddVariableOp",
"AssignSubVariableOp",
"VarHandleOp",
"Variable",
"VariableV2",
])
def under_tpu_inference_context():
"""Check if it is currently under `_TPUInferenceContext`."""
graph = ops.get_default_graph()
while graph:
context = graph._get_control_flow_context() # pylint: disable=protected-access
while context:
if isinstance(context, _TPUInferenceContext):
return True
context = context.outer_context
if isinstance(graph, function._FuncGraph): # pylint: disable=protected-access
graph = graph._outer_graph # pylint: disable=protected-access
elif isinstance(graph, func_graph.FuncGraph):
graph = graph.outer_graph
else:
return False
class _TPUInferenceContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside a TPU inference computation.
The primary role of `TPUReplicateContext` is to sanity check operators inside
a tpu.rewrite_for_inference() computation.
"""
def __init__(self, name):
super(_TPUInferenceContext, self).__init__()
self._name = name
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
# pylint: disable=protected-access
if op.type in _BLACKLISTED_INFERENCE_OPS:
raise NotImplementedError(
"Operation of type %s (%s) is not supported on the TPU for inference."
" Execution will fail if this op is used in the graph. Make sure your"
" variables are using variable_scope." % (op.type, op.name))
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
result = val
if self._outer_context:
result = self._outer_context.AddValue(val)
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
@property
def grad_state(self):
return None
def validate_inference_rewrite_for_variables(graph):
"""Validates whether rewrite_for_inference() 'worked' for variables.
The rewrite_for_inference() method is supposed to append GuaranteeConstOps
after ReadVariableOps, but this mechanism works only if you are using
tf.compat.v1.get_variable() to create and access variables in your tpu
computation. This validation method can be called immediately after calling
tpu.rewrite_for_inference() to check whether GuaranteeConstOps where added
to the graph.
Typical usages:
tpu.validate_inference_rewrite_for_variables(
tf.compat.v1.get_default_graph())
tpu.validate_inference_rewrite_for_variables(sess.graph)
Args:
graph: The graph which needs to be validated.
Raises:
RuntimeError: if validation failed.
"""
if not any(x.type == "GuaranteeConst" for x in graph.get_operations()):
raise RuntimeError(
"No GuaranteeConst ops found in the graph after running "
"tpu.rewrite_for_inference(...). Please check that you are using "
"tf.get_variable() to create and access variables in your tpu "
"computation.")
def rewrite_for_inference(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Rewrites `computation` for inference on a TPU system.
Other than 'rewriting' the computation to run on a TPU, if using variables
in your computation, it moves the ReadVariableOps outside the TPU
computation, and adds GuaranteeConst ops just after the ReadVariableOps.
This mechanism works only if you are using tf.compat.v1.get_variable() to
create and access variables in your tpu computation. You can validate
whether this worked, by calling validate_inference_rewrite_for_variables()
method immediately after this method to check whether GuaranteeConstOps
where added to the graph.
Args:
computation: A Python function that builds a computation to apply to the
input. If the function takes n inputs, 'inputs' should be a list of n
tensors. If the function returns m outputs, rewrite will return a list of
m tensors.
inputs: A list of input tensors or `None` (equivalent to an empty list).
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: The name of the operator.
Returns:
A list of output tensors.
"""
def guarantee_const_getter(getter, name, *args, **kwargs):
with ops.control_dependencies(None):
return array_ops.guarantee_const(
getter(name, *args, **kwargs), name=name + "/GuaranteeConst")
def wrapped_computation(*args, **kwargs):
"""Execute computation under `_TPUInferenceContext`."""
context = _TPUInferenceContext(
name=ops.get_default_graph().unique_name("rewrite_for_inference"))
try:
context.Enter()
vscope = variable_scope.get_variable_scope()
prev_custom_getter = vscope.custom_getter
prev_caching_device = vscope.caching_device
vscope.set_custom_getter(guarantee_const_getter)
vscope.set_caching_device(lambda op: op.device)
result = computation(*args, **kwargs)
vscope.set_custom_getter(prev_custom_getter)
vscope.set_caching_device(prev_caching_device)
finally:
context.Exit()
return result
# pylint: disable=undefined-variable
return rewrite(
wrapped_computation,
inputs=inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# pylint: enable=undefined-variable
def prune_unconnected_ops_from_xla(prune_graph):
"""Prunes unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE.
Args:
prune_graph: A tensorflow graph from which we wish to prune unconnected ops
as listed in _UNCONNECTED_OPS_TO_PRUNE. In general, these ops should have
no inputs and no consumers. These can often be left behind due to graph
construction rewiring (for instance TF-Hub). While they never execute,
they will cause XLA compile to fail so we strip them from XLA compile by
removing the tpu_replicate attribute.
"""
# Scan over the top level graph and all function graphs.
for graph in [prune_graph] + [
f for f in prune_graph._functions.values() # pylint: disable=protected-access
]:
if not isinstance(graph, ops.Graph):
continue
for op in graph.get_operations():
if op.type not in _UNCONNECTED_OPS_TO_PRUNE:
continue
outputs_consumed = False
for output in op.outputs:
if output.consumers():
outputs_consumed = True
break
if not outputs_consumed:
logging.info(
"Pruning OP %s of type %s from XLA Compile due to "
"it being disconnected.", op.name, op.type)
op._clear_attr(_TPU_REPLICATE_ATTR) # pylint: disable=protected-access
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tpu.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Operations for handling session logging and shutdown notifications."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from google.protobuf import text_format
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
_WATCHDOG = None
class CoordinatorResetError(errors.AbortedError):
"""Raised when the monitored session should reset."""
def __init__(self):
errors.AbortedError.__init__(
self, None, None, 'Resetting session loop due to worker shutdown.')
def _clone_session(session, graph=None):
return session_lib.Session(
target=session.sess_str,
config=session._config, # pylint: disable=protected-access
graph=graph if graph else session.graph)
class WorkerHeartbeatManager(object):
"""Manages the status/heartbeat monitor for a set of workers."""
def __init__(self, session, devices, heartbeat_ops, request_placeholder):
"""Construct a new WorkerHeartbeatManager.
(Prefer using `WorkerHeartbeatManager.from_devices` when possible.)
Args:
session: `tf.compat.v1.Session`, session to use for heartbeat operations.
devices: `list[string]` Set of devices to connect to.
heartbeat_ops: `list[tf.Operation]` Heartbeat operations.
request_placeholder: `tf.Placeholder[String]` Placeholder used to specify
the WorkerHeartbeatRequest protocol buffer.
"""
self._session = session
self._devices = devices
self._ops = heartbeat_ops
self._request_placeholder = request_placeholder
@staticmethod
def from_devices(session, devices):
"""Construct a heartbeat manager for the given devices."""
if not devices:
logging.error('Trying to create heartbeat manager with no devices?')
logging.info('Creating heartbeat manager for %s', devices)
request_placeholder = array_ops.placeholder(
name='worker_heartbeat_request', dtype=dtypes.string)
heartbeat_ops = []
for device in devices:
with ops.device(device):
heartbeat_ops.append(tpu_ops.worker_heartbeat(request_placeholder))
return WorkerHeartbeatManager(session, devices, heartbeat_ops,
request_placeholder)
def num_workers(self):
return len(self._devices)
def configure(self, message):
"""Configure heartbeat manager for all devices.
Args:
message: `event_pb2.WorkerHeartbeatRequest`
Returns: `None`
"""
logging.info('Configuring worker heartbeat: %s',
text_format.MessageToString(message))
self._session.run(self._ops,
{self._request_placeholder: message.SerializeToString()})
def ping(self, request=None, timeout_in_ms=5000):
"""Ping all workers, returning the parsed status results."""
if request is None:
request = event_pb2.WorkerHeartbeatRequest()
options = config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
results = self._session.run(
self._ops,
feed_dict={self._request_placeholder: request.SerializeToString()},
options=options)
parsed_results = [
event_pb2.WorkerHeartbeatResponse.FromString(res_pb)
for res_pb in results
]
logging.debug('Ping results: %s', parsed_results)
return parsed_results
def lame_workers(self):
"""Ping all workers, returning manager containing lame workers (or None)."""
ping_results = self.ping()
lame_workers = []
for ping_response, device, op in zip(ping_results, self._devices,
self._ops):
if ping_response.health_status != event_pb2.OK:
lame_workers.append((device, op))
if not lame_workers:
return None
bad_devices, bad_ops = zip(*lame_workers)
return WorkerHeartbeatManager(self._session, bad_devices, bad_ops,
self._request_placeholder)
def __repr__(self):
return 'HeartbeatManager(%s)' % ','.join(self._devices)
# Default timeout is set to allow other shutdown triggered operations (log
# flushing etc) to finish before terminating the worker.
def shutdown(self, wait_time_in_ms=60000):
"""Shutdown all workers after `shutdown_timeout_secs`."""
logging.info('Shutting down %s.', self)
req = event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(timeout_ms=wait_time_in_ms),
shutdown_mode=event_pb2.SHUTDOWN_AFTER_TIMEOUT)
self.configure(req)
# Wait for workers to shutdown.
sleep_sec = 10.0 + wait_time_in_ms / 1000
logging.info('Waiting %.2f seconds for worker shutdown.', sleep_sec)
time.sleep(sleep_sec)
def all_worker_devices(session):
"""Return a list of devices for each worker in the system."""
devices = session.list_devices()
devices_that_support_heartbeats = []
for device in devices:
name = device.name
# Pick devices that have a TPU but target the attached CPU
if ':TPU:0' in name and 'coordinator' not in name:
devices_that_support_heartbeats.append(name.replace('TPU', 'CPU'))
return devices_that_support_heartbeats
class WatchdogManager(threading.Thread):
"""Configures worker watchdog timer and handles periodic pings.
Usage:
# Ping workers every minute, shutting down workers if they haven't received
# a ping after 1 hour.
watchdog_manager = WatchdogManager(
ping_interval=60, shutdown_timeout=3600
)
# Use as a context manager, resetting watchdog on context exit:
with watchdog_manager:
session.run(...)
# Or setup globally; watchdog will remain active until program exit.
watchdog_manager.configure_and_run()
"""
def __init__(self,
session,
devices=None,
ping_interval=60,
shutdown_timeout=3600):
"""Initialize a watchdog manager.
Args:
session: Session connected to worker devices. A cloned session and graph
will be created for managing worker pings.
devices: Set of devices to monitor. If none, all workers will be
monitored.
ping_interval: Time, in seconds, between watchdog pings.
shutdown_timeout: Time, in seconds, before watchdog timeout.
"""
threading.Thread.__init__(self)
self.ping_interval = ping_interval
self.shutdown_timeout = shutdown_timeout
self.daemon = True
self._config = session._config # pylint: disable=protected-access
self._target = session.sess_str
self._running = False
self._devices = devices
self._graph = None
self._session = None
self._worker_manager = None
def _reset_manager(self):
"""Reset the graph, session and worker manager."""
self._graph = ops.Graph()
self._session = session_lib.Session(
target=self._target,
graph=self._graph,
config=self._config,
)
if self._devices is None:
self._devices = all_worker_devices(self._session)
with self._graph.as_default():
self._worker_manager = WorkerHeartbeatManager.from_devices(
self._session, self._devices)
self._worker_manager.configure(
event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(
timeout_ms=self.shutdown_timeout * 1000,),
shutdown_mode=event_pb2.WAIT_FOR_COORDINATOR))
def configure_and_run(self):
logging.info(
'Enabling watchdog timer with %d second timeout '
'and %d second ping interval.', self.shutdown_timeout,
self.ping_interval)
self._reset_manager()
self._running = True
self.start()
def stop(self):
logging.info('Stopping worker watchdog.')
self._worker_manager.configure(
event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(timeout_ms=-1,),
shutdown_mode=event_pb2.NOT_CONFIGURED))
self._running = False
self.join()
def __enter__(self):
self.configure_and_run()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def run(self):
# Don't fetch logs or adjust timing: just ping the watchdog.
#
# If we hit an exception, reset our session as it is likely broken.
while self._running:
try:
self._worker_manager.ping(request=None)
time.sleep(self.ping_interval)
except errors.OpError as e:
# Catch any TF errors that occur so we don't stop sending heartbeats
logging.debug('Caught error while sending heartbeat: %s', e)
self._reset_manager()
def start_worker_watchdog(session,
devices=None,
ping_interval=60,
shutdown_timeout=3600):
"""Start global worker watchdog to shutdown workers on coordinator exit."""
global _WATCHDOG
if _WATCHDOG is None:
# Ensure we can send a few pings before we timeout!
ping_interval = min(shutdown_timeout / 10., ping_interval)
_WATCHDOG = WatchdogManager(session, devices, ping_interval,
shutdown_timeout)
_WATCHDOG.configure_and_run()
class GracefulShutdownHook(session_run_hook.SessionRunHook):
"""Session hook that watches for shutdown events.
If a shutdown is indicated, `saver.save(checkpoint_prefix)` is executed, and a
SystemShutdown exception is raised to terminate the main session. If `saver`
is None the `SAVERS` collection will be read to find a saver.
`on_shutdown_hooks` is an optional list of functions that should be called
after checkpointing. The function is called with (`run_context`,
`all_workers`, `lame_workers`).
If `heartbeat_group` is not specified, it will default to all CPU workers
in the system.
"""
def __init__(self, checkpoint_prefix, saver=None, on_shutdown_hooks=None):
self._saver = saver
self._checkpoint_prefix = checkpoint_prefix
self._on_shutdown_hooks = on_shutdown_hooks if on_shutdown_hooks else []
# Worker heartbeats are managed independently of the main training graph.
self._graph = ops.Graph()
self._workers = None
self._session = None
self._heartbeat_supported = False
def after_create_session(self, training_session, coord): # pylint: disable=unused-argument
# N.B. We have to pull the global step here to avoid it being unavailable
# at checkpoint time; the graph has been frozen at that point.
if training_util.get_global_step() is None and self.saver() is not None:
raise ValueError(
'Saver defined but no global step. Run `get_or_create_global_step()`'
' in your model definition to allow checkpointing.')
with self._graph.as_default():
logging.info('Installing graceful shutdown hook.')
self._session = _clone_session(training_session, self._graph)
self._workers = WorkerHeartbeatManager.from_devices(
self._session, all_worker_devices(self._session))
self._heartbeat_supported = self._workers.num_workers() > 0
if self._heartbeat_supported:
try:
self._workers.configure(
event_pb2.WorkerHeartbeatRequest(
shutdown_mode=event_pb2.WAIT_FOR_COORDINATOR))
except errors.InvalidArgumentError:
logging.warn(
'TPU device does not support heartbeats. Failure '
'handling will be disabled.')
self._heartbeat_supported = False
else:
logging.warn(
'No workers support hearbeats. Failure handling will be disabled.')
def saver(self):
if self._saver:
return self._saver
savers = ops.get_collection(ops.GraphKeys.SAVERS)
if not savers:
return None
if not isinstance(savers, list):
return savers
if len(savers) > 1:
logging.error(
'Multiple savers in the SAVERS collection. On-demand checkpointing '
'will be disabled. Pass an explicit `saver` to the constructor to '
'override this behavior.')
return None
return savers[0]
def after_run(self, run_context, run_values):
del run_values
if not self._heartbeat_supported:
return
lame_workers = self._workers.lame_workers()
if lame_workers:
logging.info('ShutdownHook: lame workers found: %s', lame_workers)
if self.saver():
logging.info('ShutdownHook: saving checkpoint to %s',
self._checkpoint_prefix)
self.saver().save(
run_context.session,
self._checkpoint_prefix,
global_step=training_util.get_global_step(),
write_state=True,
)
else:
logging.info('ShutdownHook: no Saver defined.')
for fn in self._on_shutdown_hooks:
fn(run_context, self._workers, lame_workers)
class ResetComputation(object):
"""Hook to reset a TPUEstimator computation loop.
This hook shuts down all workers and resets the monitored session loop by
throwing a CoordinatorResetError.
"""
def __init__(self):
pass
def __call__(self, run_context, all_workers, lame_workers):
del run_context, lame_workers
all_workers.shutdown()
logging.info('Resetting coordinator.')
raise CoordinatorResetError()
class ShutdownLameWorkers(object):
"""Shutdown lamed workers.
Processing will continue normally (typically by waiting for the down
workers to be restarted).
"""
def __init__(self):
pass
def __call__(self, run_context, all_workers, lame_workers):
lame_workers.shutdown()
class ShutdownAllWorkers(object):
"""Shutdown all workers.
Processing will continue normally (typically by waiting for the down
workers to be restarted).
"""
def __init__(self):
pass
def __call__(self, run_context, all_workers, lame_workers):
all_workers.shutdown()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/session_support.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Defines the `Topology` class, that describes a TPU fabric topology."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf.tpu import topology_pb2
def _tpu_device_name(job, task, device):
"""Returns the device name for the TPU `device` on `task` of `job`."""
if job is None:
return "/task:%d/device:TPU:%d" % (task, device)
else:
return "/job:%s/task:%d/device:TPU:%d" % (job, task, device)
def _tpu_host_device_name(job, task):
"""Returns the device name for the CPU device on `task` of `job`."""
if job is None:
return "/task:%d/device:CPU:0" % task
else:
return "/job:%s/task:%d/device:CPU:0" % (job, task)
class Topology(object):
"""Describes a set of TPU devices.
Represents both the shape of the physical mesh, and the mapping between
TensorFlow TPU devices to physical mesh coordinates.
"""
def __init__(self, serialized=None, mesh_shape=None, device_coordinates=None):
"""Builds a Topology object.
If `serialized` is not `None`, the topology is parsed from `serialized` and
the other arguments are ignored. Otherwise, the topology is computed from
`mesh_shape` and `device_coordinates`.
Args:
serialized: A serialized `TopologyProto`, or `None`. If not `None`, the
serialized proto is parsed to discover the topology.
mesh_shape: A sequence of 3 positive integers, or `None`. If not `None`,
the shape of the TPU topology, in number of cores. Ignored if
`serialized` is not `None`.
device_coordinates: A rank 3 numpy array that describes the mapping from
TensorFlow TPU devices to TPU fabric coordinates, or `None`. Ignored
if `serialized is not `None`.
Raises:
ValueError: If `serialized` does not describe a well-formed topology.
ValueError: If `serialized` is `None` and `mesh_shape` is not a sequence
of 3 positive integers.
ValueError: If `serialized` is `None` and `device_coordinates` is not a
rank 3 numpy int32 array that describes a valid coordinate mapping.
"""
self._serialized = serialized
if serialized:
self._parse_topology(serialized)
else:
self._mesh_shape = np.asarray(mesh_shape, dtype=np.int32)
self._device_coordinates = np.asarray(device_coordinates, np.int32)
if len(self._mesh_shape) != 3 or any(self._mesh_shape < 1):
raise ValueError("`mesh_shape` must be a sequence of 3 positive "
"entries; got {}".format(self._mesh_shape))
if (len(self._device_coordinates.shape) != 3 or
self._device_coordinates.shape[2] != len(self._mesh_shape)):
raise ValueError("`device_coordinates` must be a rank 3 int32 array "
"with minor dimension equal to the mesh shape rank")
self._topology_tasks, self._topology_devices = self._invert_topology()
def _parse_topology(self, serialized):
"""Parses a serialized `TopologyProto` into `self`."""
proto = topology_pb2.TopologyProto()
proto.ParseFromString(serialized)
self._mesh_shape = np.array(proto.mesh_shape, dtype=np.int32)
if len(self._mesh_shape) != 3 or any(self._mesh_shape < 1):
raise ValueError("`mesh_shape` must be a vector of size 3 with positive "
"entries; got {}".format(self._mesh_shape))
if proto.num_tasks < 0:
raise ValueError("`num_tasks` must be >= 0; got {}".format(
proto.num_tasks))
if proto.num_tpu_devices_per_task < 0:
raise ValueError("`num_tpu_devices_per_task` must be >= 0; got {}".format(
proto.num_tpu_devices_per_task))
expected_coordinates_size = (
proto.num_tasks * proto.num_tpu_devices_per_task * len(
proto.mesh_shape))
if len(proto.device_coordinates) != expected_coordinates_size:
raise ValueError("`device_coordinates` must have shape num_tasks ({}) * "
"num_tpu_devices_per_task ({}) * len(mesh_shape) ({}); "
"got shape {}".format(proto.num_tasks,
proto.num_tpu_devices_per_task,
proto.mesh_shape,
len(proto.device_coordinates)))
coords = np.array(proto.device_coordinates, dtype=np.int32)
if any(coords < 0):
raise ValueError("`device_coordinates` must be >= 0")
coords = coords.reshape((proto.num_tasks, proto.num_tpu_devices_per_task,
len(proto.mesh_shape)))
self._device_coordinates = coords
def _invert_topology(self):
"""Inverts a [task,device,axis] topology to [x,y,z] -> task/device maps."""
tasks = np.full(list(self.mesh_shape), -1, dtype=np.int32)
devices = np.full(list(self.mesh_shape), -1, dtype=np.int32)
for task in xrange(self.device_coordinates.shape[0]):
for device in xrange(self.device_coordinates.shape[1]):
x, y, z = self.device_coordinates[task, device, :]
tasks[x, y, z] = task
devices[x, y, z] = device
return tasks, devices
@property
def mesh_shape(self):
"""A rank 1 int32 array describing the shape of the TPU topology."""
return self._mesh_shape
@property
def mesh_rank(self):
"""Returns the number of dimensions in the mesh."""
return len(self._mesh_shape)
@property
def device_coordinates(self):
"""Describes the mapping from TPU devices to topology coordinates.
Returns:
A rank 3 int32 array with shape `[tasks, devices, axis]`.
`tasks` is the number of tasks in the TPU cluster, `devices` is the number
of TPU devices per task, and `axis` is the number of axes in the TPU
cluster topology. Each entry gives the `axis`-th coordinate in the
topology of a task/device pair. TPU topologies are 3-dimensional, with
dimensions `(x, y, core number)`.
"""
return self._device_coordinates
def task_ordinal_at_coordinates(self, device_coordinates):
"""Returns the TensorFlow task number attached to `device_coordinates`.
Args:
device_coordinates: An integer sequence describing a device's physical
coordinates in the TPU fabric.
Returns:
Returns the TensorFlow task number that contains the TPU device with those
physical coordinates.
"""
return self._topology_tasks[tuple(device_coordinates)]
def tpu_device_ordinal_at_coordinates(self, device_coordinates):
"""Returns the TensorFlow device number at `device_coordinates`.
Args:
device_coordinates: An integer sequence describing a device's physical
coordinates in the TPU fabric.
Returns:
Returns the TensorFlow device number within the task corresponding to
attached to the device with those physical coordinates.
"""
return self._topology_devices[tuple(device_coordinates)]
def cpu_device_name_at_coordinates(self, device_coordinates, job=None):
"""Returns the CPU device attached to a logical core."""
return _tpu_host_device_name(
job, self._topology_tasks[tuple(device_coordinates)])
def tpu_device_name_at_coordinates(self, device_coordinates, job=None):
"""Returns the name of the TPU device assigned to a logical core."""
return _tpu_device_name(job,
self._topology_tasks[tuple(device_coordinates)],
self._topology_devices[tuple(device_coordinates)])
@property
def num_tasks(self):
"""Returns the number of TensorFlow tasks in the TPU slice."""
return self._device_coordinates.shape[0]
@property
def num_tpus_per_task(self):
"""Returns the number of TPU devices per task in the TPU slice."""
return self._device_coordinates.shape[1]
def serialized(self):
"""Returns the serialized form of the topology."""
if self._serialized is None:
proto = topology_pb2.TopologyProto()
proto.mesh_shape[:] = list(self._mesh_shape)
proto.num_tasks = self._device_coordinates.shape[0]
proto.num_tpu_devices_per_task = self._device_coordinates.shape[1]
proto.device_coordinates.extend(list(self._device_coordinates.flatten()))
self._serialized = proto.SerializeToString()
return self._serialized
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/topology.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow_estimator.python.estimator.tpu.tpu_context import *
# pylint: enable=wildcard-import,unused-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tpu_context.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Hook for asynchronous checkpointing.
This hook dispatches checkpoint writing operations in a separate thread to
allow execution to continue on the main thread.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
class AsyncCheckpointSaverHook(basic_session_run_hooks.CheckpointSaverHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances. Used for
callbacks that run immediately before or after this hook saves the
checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create AsyncCheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._save_thread = None
self._write_graph_thread = None
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
self._listeners = listeners or []
self._steps_per_run = 1
self._summary_writer = None
self._global_step_tensor = None
self._last_checkpoint_step = None
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
def _write_graph_fn(self):
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir, "graph.pbtxt")
self._write_graph_thread = threading.Thread(target=_write_graph_fn,
args=[self])
self._write_graph_thread.start()
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._timer.update_last_triggered_step(global_step)
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
logging.info("Triggering checkpoint. %s", global_step)
if self._save(run_context.session, global_step):
run_context.request_stop()
def end(self, session):
if self._save_thread:
logging.info("Waiting for any pending checkpoints to finish.")
self._save_thread.join()
if self._write_graph_thread:
logging.info("Waiting for any pending write_graph to finish.")
self._write_graph_thread.join()
last_step = session.run(self._global_step_tensor)
if self._last_checkpoint_step != last_step:
self._save(session, last_step, asynchronous=False)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step, asynchronous=True):
"""Saves the latest checkpoint, returns should_stop."""
def _save_fn():
"""Run the saver process."""
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
start_time = time.time()
for l in self._listeners:
l.before_save(session, step)
self._get_saver().save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
for l in self._listeners:
l.after_save(session, step)
end_time = time.time()
logging.info("Checkpoint actual writing time: (%.3f sec)",
end_time - start_time)
logging.info("Checkpoint finished for %d into %s.", step, self._save_path)
if not asynchronous:
self._last_checkpoint_step = step
_save_fn()
return
if self._save_thread is not None:
self._save_thread.join(timeout=0.1)
if self._save_thread.is_alive():
logging.info("Saver thread still in progress, skipping checkpoint.")
return
self._last_checkpoint_step = step
self._save_thread = threading.Thread(target=_save_fn)
self._save_thread.start()
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor."
.format(collection_key))
self._saver = savers[0]
return savers[0]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/async_checkpoint.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Helper context for running models with bfloat16."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
def _get_custom_getter():
"""Returns a custom getter that this class's methods must be called under.
All methods of this class must be called under a variable scope that was
passed this custom getter. Example:
```python
network = ConvNetBuilder(...)
with tf.compat.v1.variable_scope('cg',
custom_getter=network.get_custom_getter()):
network.conv(...)
# Call more methods of network here
```
Currently, this custom getter only does anything if self.use_tf_layers is
True. In that case, it causes variables to be stored as dtype
self.variable_type, then casted to the requested dtype, instead of directly
storing the variable as the requested dtype.
"""
def inner_custom_getter(getter, *args, **kwargs):
"""Custom getter that forces variables to have type self.variable_type."""
cast_to_bfloat16 = False
requested_dtype = kwargs['dtype']
if requested_dtype == dtypes.bfloat16:
# Only change the variable dtype if doing so does not decrease variable
# precision.
kwargs['dtype'] = dtypes.float32
cast_to_bfloat16 = True
var = getter(*args, **kwargs)
# This if statement is needed to guard the cast, because batch norm
# assigns directly to the return value of this custom getter. The cast
# makes the return value not a variable so it cannot be assigned. Batch
# norm variables are always in fp32 so this if statement is never
# triggered for them.
if cast_to_bfloat16:
var = math_ops.cast(var, dtypes.bfloat16)
return var
return inner_custom_getter
@tf_export(v1=['tpu.bfloat16_scope'])
@tf_contextlib.contextmanager
def bfloat16_scope():
"""Scope class for bfloat16 variables so that the model uses custom getter.
This enables variables to be read as bfloat16 type when using get_variable.
"""
with variable_scope.variable_scope(
'', custom_getter=_get_custom_getter()) as varscope:
yield varscope
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/bfloat16.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPU system metadata and associated tooling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tpu
_PINGING_MASTER_TIMEOUT_IN_MS = 5 * 60 * 1000 # 10 min
_RETRY_TIMES = 12 * 24 # 1 day
_INITIAL_TPU_SYSTEM_TIMEOUT_IN_MS = 300 * 1000 # 5 mins
_TPU_DEVICE_REG = re.compile(r'.*task:(\d+)/.*device:TPU:(\d+)$')
_DEVICE_TYPE_REGEX = re.compile('.*device:([^:]+).*')
_DEFAULT_JOB_NAME = 'tpu_worker'
_DEFAULT_COORDINATOR_JOB_NAME = 'coordinator'
_LOCAL_MASTERS = ('', 'local')
# _TPUSystemMetadata is used by TPUEstimator to hold TPU configuration,
# including num_cores and num_hosts.
_TPUSystemMetadata = collections.namedtuple('_TPUSystemMetadata', [
'num_cores',
'num_hosts',
'num_of_cores_per_host',
'topology',
'devices',
])
def _query_tpu_system_metadata(master_address, cluster_def=None,
query_topology=False):
"""Automatically detects the TPU system metadata in the system."""
tpu_core_count = 0
devices = []
device_dict = collections.defaultdict(list)
if context.executing_eagerly():
device_names = context.list_devices()
devices = []
# We want the output type to match in both eager and session mode
for name in device_names:
device_match = _DEVICE_TYPE_REGEX.match(name)
device_type = 'CPU'
if device_match:
device_type = device_match.group(1)
devices.append(session_lib._DeviceAttributes(name, device_type, 0, 0)) # pylint: disable=protected-access
else:
# TODO(b/120564445): Replace with standard library for retries.
retry_count = 1
while True:
logging.info('Querying Tensorflow master (%s) for TPU system metadata.',
master_address)
try:
with ops.Graph().as_default():
with session_lib.Session(
master_address,
config=get_session_config_with_timeout(
_PINGING_MASTER_TIMEOUT_IN_MS,
cluster_def)) as sess:
devices = sess.list_devices()
break
except errors.DeadlineExceededError:
msg = ('Failed to connect to the Tensorflow master. The TPU worker may '
'not be ready (still scheduling) or the Tensorflow master '
'address is incorrect: got (%s).' %
(master_address))
# TODO(xiejw): For local or grpc master we might not need retry logic
# here.
if retry_count <= _RETRY_TIMES:
logging.warning('%s', msg)
logging.warning('Retrying (%d/%d).', retry_count, _RETRY_TIMES)
retry_count += 1
else:
raise ValueError(msg)
for device in devices:
match = _TPU_DEVICE_REG.match(device.name)
if match:
host_id = match.group(1)
core_id = match.group(2)
device_dict[host_id].append(core_id)
tpu_core_count += 1
num_of_cores_per_host = 0
if tpu_core_count:
num_cores_per_host_set = set(
[len(core_ids) for core_ids in device_dict.values()])
if len(num_cores_per_host_set) != 1:
raise RuntimeError(
'TPU cores on each host is not same. This should not happen!. '
'devices: {}'.format(devices))
num_of_cores_per_host = num_cores_per_host_set.pop()
topology = None
if query_topology:
if not tpu_core_count:
raise RuntimeError(
'Cannot find any TPU cores in the system (master address {}). '
'This usually means the master address is incorrect or the '
'TPU worker has some problems. Available devices: {}'.format(
master_address, devices))
topology = _obtain_topology(master_address, cluster_def)
# We sort the metadata devices so that downstream users get a sorted list
# for creating mirrored variables correctly.
def _sort_key(device):
spec = tf_device.DeviceSpec.from_string(device.name)
return (spec.job, spec.replica, spec.task, spec.device_type,
spec.device_index)
devices = tuple(sorted(devices, key=_sort_key))
metadata = _TPUSystemMetadata(
num_cores=tpu_core_count,
num_hosts=len(device_dict),
num_of_cores_per_host=num_of_cores_per_host,
topology=topology,
devices=devices)
if tpu_core_count:
logging.info('Found TPU system:')
logging.info('*** Num TPU Cores: %d', metadata.num_cores)
logging.info('*** Num TPU Workers: %d', metadata.num_hosts)
logging.info('*** Num TPU Cores Per Worker: %d',
metadata.num_of_cores_per_host)
for device in metadata.devices:
logging.info('*** Available Device: %s', device)
else:
logging.info('Failed to find TPU: %s', metadata)
return metadata
def _obtain_topology(master_address, cluster_def):
"""Obtains TPU fabric topology."""
try:
logging.info('Initializing TPU system (master: %s) to fetch topology '
'for model parallelism. This might take a while.',
master_address)
with ops.Graph().as_default():
session_config = get_session_config_with_timeout(
_INITIAL_TPU_SYSTEM_TIMEOUT_IN_MS, cluster_def)
with session_lib.Session(
master_address, config=session_config) as sess:
topology = sess.run(tpu.initialize_system())
return topology
except errors.DeadlineExceededError:
raise ValueError(
'Fail to initialize TPU system with master (%s). '
'Please double check the TPU system is functional.' % (
master_address))
def get_session_config_with_timeout(timeout_in_secs, cluster_def):
"""Returns a session given a timeout and a cluster configuration."""
config = config_pb2.ConfigProto(
operation_timeout_in_ms=timeout_in_secs, cluster_def=cluster_def)
return config
def master_job(master, cluster_def):
"""Returns the canonnical job name to use to place TPU computations on.
Args:
master: A `string` representing the TensorFlow master to use.
cluster_def: A ClusterDef object describing the TPU cluster.
Returns:
A string containing the job name, or None if no job should be specified.
Raises:
ValueError: If the user needs to specify a tpu_job_name, because we are
unable to infer the job name automatically, or if the user-specified job
names are inappropriate.
"""
# If the user specifies the tpu_job_name, use that.
if master in _LOCAL_MASTERS:
return None
if (not cluster_def or not cluster_def.job):
return _DEFAULT_JOB_NAME
job_names = set([job.name for job in cluster_def.job])
if _DEFAULT_JOB_NAME in job_names:
# b/37868888 tracks allowing ClusterSpec propagation to reuse job names.
raise ValueError('Currently, tpu_worker is not an allowed job name.')
if len(job_names) == 1:
return cluster_def.job[0].name
if len(job_names) == 2:
if _DEFAULT_COORDINATOR_JOB_NAME in job_names:
job_names.remove(_DEFAULT_COORDINATOR_JOB_NAME)
return job_names.pop()
# TODO(b/67716447): Include more sophisticated heuristics.
raise ValueError(
'Could not infer TPU job name. Please specify a tpu_job_name as part '
'of your TPUConfig.')
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tpu_system_metadata.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""Helper library for handling infeed between hosts and TPUs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_sharding
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.util import nest
def partition_or_replicate_on_host(tensor, dims):
"""Partitions or replicates the input tensor.
The ops inside this function are placed on the host side.
Args:
tensor: The input tensor which will be partitioned or replicated.
dims: A list of integer describes how to partition the input tensor.
Returns:
An iterator of `Tensor`s or a list of partitioned tensors.
"""
if dims is None:
return itertools.repeat(tensor)
dims = np.array(dims)
output = [tensor]
shape_list = np.array(tensor.shape.as_list())
quotients, remainders = np.divmod(shape_list, dims)
for axis, (quotient, remainder, dim, original_size) in enumerate(
zip(quotients, remainders, dims, shape_list)):
if dim <= 1:
continue
if remainder > 0:
# For each dimension, when it cannot be evenly partitioned, XLA assumes
# tensors are partitioned in a greedy manner by using
# ceil_ratio(size/dim) first. E.g. 2D tensor with shape (5, 14) and dims
# are (2, 4). Since 5 % 2 = 1 and 14 % 4 = 2, [5, 14] =>
# [[(3, 4), (3, 4), (2, 4), (2, 2)],
# [(2, 4), (2, 4), (2, 4), (2, 2)]]
ceil_ratio = quotient + 1
num_full_slots, left_over = np.divmod(original_size, ceil_ratio)
num_or_size_splits = [ceil_ratio] * num_full_slots + [left_over]
if len(num_or_size_splits) < dim:
num_or_size_splits += [0] * (dim - len(num_or_size_splits))
new_output = []
for x in output:
new_output.append(
array_ops.split(
x, num_or_size_splits=num_or_size_splits, axis=axis))
output = new_output
else:
output = [array_ops.split(x, int(dim), axis=axis) for x in output]
output = nest.flatten(output)
return output
def _tag_sharding_attribute_for_dequeued_tensor(tensor, dims):
"""Tags appropriate XLA sharding attribute to the dequeued tensor.
The sharding attribute of the dequeued tensor will be a tuple.
Args:
tensor: The dequeued tensor on TPU.
dims: A list of integer describes how the tensor is partitioned.
Returns:
The same tensor with the xla_sharding attribute.
"""
if dims is None:
return xla_sharding.replicate(tensor, assign_tuple_sharding=True)
elif np.prod(dims) == 1:
return xla_sharding.assign_device(tensor, 0, assign_tuple_sharding=True)
else:
tile_assignment = np.arange(np.prod(dims)).reshape(dims)
return xla_sharding.tile(
tensor=tensor,
tile_assignment=tile_assignment,
assign_tuple_sharding=True)
def tag_sharding_attribute_for_dequeued_tensors(dequeues, dims):
"""Tags appropriate XLA sharding attribute to the dequeued tensors.
Args:
dequeues: A list of dequeued tensors on TPU.
dims: A list of integer describes how the tensor is partitioned.
Returns:
The same dequeues with appropriate xla_sharding attribute.
"""
nest.assert_shallow_structure(dequeues, dims)
return nest.map_structure_up_to(
dequeues, _tag_sharding_attribute_for_dequeued_tensor, dequeues, dims)
class InfeedQueue(object):
"""A helper object to build a device infeed queue.
The InfeedQueue builds the host-side and device-side Ops to enqueue and
dequeue elements, respectively, and ensures that their types and
shapes match.
"""
def __init__(self,
number_of_tuple_elements=None,
tuple_types=None,
tuple_shapes=None,
shard_dimensions=None,
name=None):
"""Creates a new InfeedQueue with the given configuration.
The configuration need not be fully specified at creation since it
can be modified subsequently by methods that set the values
explicitly or infer them from the shapes of inputs.
Args:
number_of_tuple_elements: the number of Tensors fed atomically through the
queue, must be present unless it can be inferred from other arguments.
tuple_types: if not None, a list of types of the elements of the queue.
tuple_shapes: if not None, a list of shapes of the elements of the queue.
shard_dimensions: if not None, a list of dimensions on which the
elements of the queue should be sharded during automatic
parallelization.
name: the name of the queue.
Raises:
ValueError: if number_of_tuple_elements <= 0; or
number_of_tuple_arguments, tuple_types, tuple_shapes, and
shard_dimensions are all None; or the length of tuple_types,
tuple_shapes, or shard_dimensions is not equal to
number_of_tuple_elements; or any element of shard_dimensions
can't be converted to a Dimension.
TypeError: if any element of tuple_types or tuple_shapes can't
be converted to a dtype or TensorShape, respectively.
"""
self._frozen = False
self._generated_enqueue_ops = False
self._generated_dequeue_op = False
self._name = "InfeedQueue" if name is None else name
if number_of_tuple_elements is None:
if tuple_types is not None:
number_of_tuple_elements = len(tuple_types)
elif tuple_shapes is not None:
number_of_tuple_elements = len(tuple_shapes)
elif shard_dimensions is not None:
number_of_tuple_elements = len(shard_dimensions)
else:
raise ValueError(
"number of tuple elements cannot be inferred from InfeedQueue "
"constructor")
if number_of_tuple_elements <= 0:
raise ValueError("number_of_tuple_elements %d must be > 0" %
number_of_tuple_elements)
# Make an empty sharding policy for each tuple element.
self._sharding_policies = [
tpu_sharding.ShardingPolicy()
for _ in xrange(number_of_tuple_elements)
]
if tuple_types is not None:
self.set_tuple_types(tuple_types)
else:
self._tuple_types = None
if tuple_shapes is not None:
self.set_tuple_shapes(tuple_shapes)
else:
self._tuple_shapes = None
if shard_dimensions is not None:
self.set_shard_dimensions(shard_dimensions)
self._validate()
def _validate(self):
"""Checks that the configuration is self-consistent.
Raises:
ValueError: if the shapes and sharding policies don't match.
"""
if self.tuple_shapes is not None:
for (policy, shape) in zip(self._sharding_policies, self._tuple_shapes):
# Raise an error if the policy is incompatible with the shape.
_ = policy.get_sharded_shape(shape)
@property
def number_of_tuple_elements(self):
"""Returns the number of InfeedQueue tuple elements."""
return len(self._sharding_policies)
@property
def tuple_types(self):
"""Returns the types of the InfeedQueue tuple elements."""
return self._tuple_types
def set_tuple_types(self, tuple_types):
"""Sets the type of each element of the queue.
tuple_types must be a list of length
self.number_of_tuple_elements, and each element must be
convertible to a dtype.
Args:
tuple_types: the types of each queue element.
Raises:
ValueError: if tuple_types is not of length
self.number_of_tuple_elements.
TypeError: if an element of tuple_types cannot be converted to a
dtype.
"""
if len(tuple_types) != self.number_of_tuple_elements:
raise ValueError("tuple_types is %s, but must be a list of length %d" %
(str(tuple_types), self.number_of_tuple_elements))
if self._frozen:
for (frozen, updated) in zip(self._tuple_types, tuple_types):
if frozen != updated:
raise ValueError(
"Trying to update InfeedQueue with frozen configuration with an "
"incompatible type. Frozen types are %s, updated types are %s" % (
str(self._tuple_types), str(tuple_types)))
else:
try:
self._tuple_types = [dtypes.as_dtype(t) for t in tuple_types]
except (TypeError) as e:
raise TypeError(
"tuple_types is %s, but must be a list of elements each "
"convertible to dtype: got error %s" % (str(tuple_types), str(e)))
@property
def tuple_shapes(self):
"""Returns the shapes of the InfeedQueue tuple elements."""
return self._tuple_shapes
def set_tuple_shapes(self, tuple_shapes):
"""Sets the shape of each element of the queue.
tuple_shapes must be a list of length
self.number_of_tuple_elements, and each element must be
convertible to a TensorShape.
Args:
tuple_shapes: the shapes of each queue element.
Raises:
ValueError: if tuple_shapes is not of length
self.number_of_tuple_elements.
TypeError: if an element of tuple_shapes cannot be converted to
a TensorShape.
"""
if len(tuple_shapes) != self.number_of_tuple_elements:
raise ValueError("tuple_shapes is %s, but must be a list of length %d" %
(str(tuple_shapes), self.number_of_tuple_elements))
try:
tuple_shapes = [tensor_shape.as_shape(shape) for shape in tuple_shapes]
except (ValueError, TypeError) as e:
raise TypeError(
"tuple_shapes is %s, but must be a list of elements each "
"convertible to TensorShape: got error %s" % (str(tuple_shapes),
str(e)))
if self._frozen:
for (frozen, updated) in zip(self._tuple_shapes, tuple_shapes):
if frozen != updated:
raise ValueError(
"Trying to update InfeedQueue with frozen configuration with an "
"incompatible shape. Frozen shapes are %s, updated shapes are %s"
% (str(self._tuple_shapes), str(tuple_shapes)))
else:
self._tuple_shapes = tuple_shapes
self._validate()
@property
def sharding_policies(self):
"""Returns the sharding policies of the InfeedQueue tuple elements."""
return self._sharding_policies
@property
def shard_dimensions(self):
"""Gets the shard dimension of each tuple element.
Returns:
A list of length number_of_tuple_elements, where each list entry
is the shard dimension of that tuple element or None if the
shard dimension has not been set.
"""
# The number of shards is always the same for all the policies.
return [policy.shard_dimension for policy in self._sharding_policies]
def set_shard_dimensions(self, shard_dimensions):
"""Sets the shard_dimension of each element of the queue.
shard_dimensions must be a list of length
self.number_of_tuple_elements, and each element must be
convertible to a Dimension compatible with self.tuple_shapes.
Args:
shard_dimensions: the dimensions of each queue element.
Raises:
ValueError: if shard_dimensions is not of length
self.number_of_tuple_elements; or an element of
shard_dimensions cannot be converted to a Dimension; or an
element of shard_dimensions is a Dimension that is out of
range for the corresponding tuple element shape.
"""
if len(shard_dimensions) != self.number_of_tuple_elements:
raise ValueError("shard_dimensions is %s, but must be a list of length %d"
% (str(shard_dimensions),
self.number_of_tuple_elements))
for (policy, dimension) in zip(self._sharding_policies, shard_dimensions):
policy.set_shard_dimension(dimension)
self._validate()
@property
def number_of_shards(self):
"""Gets the number of shards to use for the InfeedQueue.
Returns:
Number of shards or None if the number of shards has not been set.
"""
# The number of shards is always the same for all the policies.
return self._sharding_policies[0].number_of_shards
def set_number_of_shards(self, number_of_shards):
"""Sets the number of shards to use for the InfeedQueue.
Args:
number_of_shards: number of ways to shard the InfeedQueue.
Raises:
ValueError: if number_of_shards is not > 0; or the policies have
been frozen and number_of_shards was already set to something
else.
"""
for policy in self._sharding_policies:
policy.set_number_of_shards(number_of_shards)
self._validate()
def set_configuration_from_input_tensors(self, input_tensors):
"""Sets the shapes and types of the queue tuple elements.
input_tensors is a list of Tensors whose types and shapes are used
to set the queue configuration.
Args:
input_tensors: list of Tensors of the same types and shapes as
the desired queue Tuple.
Raises:
ValueError: if input_tensors is not a list of length
self.number_of_tuple_elements
"""
if len(input_tensors) != self.number_of_tuple_elements:
raise ValueError("input_tensors is %s, but should be a list of %d Tensors"
% (str(input_tensors), self.number_of_tuple_elements))
self.set_tuple_shapes([t.shape for t in input_tensors])
self.set_tuple_types([t.dtype for t in input_tensors])
def set_configuration_from_sharded_input_tensors(self, input_tensors):
"""Sets the shapes and types of the queue tuple elements.
input_tensors is a list of lists of Tensors whose types and shapes are used
to set the queue configuration. The length of the outer list is the number
of shards required, and each inner list is the tuple of Tensors to use to
determine the types and shapes of the corresponding shard. This method
depends on the shard dimension, and calling it freezes the shard policy.
Args:
input_tensors: list of lists of Tensors. The outer list length corresponds
to the desired number of shards, and each inner list is the size
and shape of the desired configuration of the corresponding shard.
Raises:
ValueError: if any inner list is not a list of length
self.number_of_tuple_elements; or the inner lists do not combine to
form a consistent unsharded shape.
TypeError: if the types of the Tensors in the inner lists do not match.
"""
if not self._frozen:
# Unset the tuple shapes in case the configuration becomes
# transiently inconsistent.
self._tuple_shapes = None
number_of_shards = len(input_tensors)
self.set_number_of_shards(number_of_shards)
for t in input_tensors:
if len(t) != self.number_of_tuple_elements:
raise ValueError(
"input_tensors is %s but must be a list of lists, where each inner"
" list has length number_of_tuple_elements=%d" % (
str(input_tensors), self.number_of_tuple_elements))
# Transpose the inputs to make a list of shard shapes for each tuple
# element.
sharded_shapes = [[t[i].shape for t in input_tensors]
for i in xrange(self.number_of_tuple_elements)]
# For each tuple, get the unsharded shape using that tuple's policy.
unsharded_shapes = [
policy.get_unsharded_shape(s)
for (policy, s) in zip(self._sharding_policies, sharded_shapes)
]
self.set_tuple_shapes(unsharded_shapes)
for i in xrange(1, self.number_of_shards):
for (t1, t2) in zip(input_tensors[0], input_tensors[i]):
if t1.dtype != t2.dtype:
raise TypeError(
"types of the tuple elements of input_tensors %s are not "
"consistent" % str(input_tensors))
self.set_tuple_types([t.dtype for t in input_tensors[0]])
def freeze(self):
"""Freezes the InfeedQueue so it can no longer be modified.
The configuration is implicitly frozen before any host-side or
device-side Ops are generated. The configuration cannot be frozen
until the types and shapes of the tuple elements have been set.
Raises:
ValueError: if the types or shapes of the tuple elements have not been
set.
"""
self._frozen = True
if self._tuple_types is None:
raise ValueError(
"Can't freeze an InfeedQueue without setting all tuple types.")
if self._tuple_shapes is None:
raise ValueError(
"Can't freeze an InfeedQueue without setting all tuple shapes.")
for shape in self._tuple_shapes:
if shape.dims is None:
raise ValueError(
"Can't freeze an InfeedQueue without setting all tuple shapes.")
for policy in self._sharding_policies:
policy.freeze()
self._validate()
def generate_dequeue_op(self, tpu_device=0):
"""Generates the device-side Op to dequeue a tuple from the queue.
Implicitly freezes the queue configuration if it is not already
frozen, which will raise errors if the shapes and types have not
been fully specified.
Args:
tpu_device: The TPU device ordinal where the infeed instruction should be
placed. If None, no explicit placement will be performed, and it is up
to the user to call this API from within a proper TPU device scope.
The XLA code will fail if the TPU dequeue instruction is not bound to
any device.
Returns:
A list of Outputs corresponding to a shard of infeed dequeued
into XLA, suitable for use within a replicated block.
Raises:
ValueError: if the types or shapes of the tuple elements have not been
set; or if a dequeue op has already been generated.
"""
self.freeze()
if self._generated_dequeue_op:
raise ValueError("Can't generate two dequeue Ops from the same queue")
self._generated_dequeue_op = True
full_name = "%s/dequeue" % self._name
sharded_shapes = [
policy.get_sharded_shape(shape)
for (shape, policy) in zip(self._tuple_shapes, self._sharding_policies)
]
if tpu_device is not None:
with ops.device(tpu.core(tpu_device)):
return tpu_ops.infeed_dequeue_tuple(
dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)
else:
return tpu_ops.infeed_dequeue_tuple(
dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)
def _generate_enqueue_op(self,
inputs,
name_prefix,
index,
device=None,
tpu_ordinal=-1):
"""Generate a host-side Op to enqueue a tuple to the queue.
If device is None the inputs are all required to have the same
device specification, and the enqueue Op is colocated with
inputs[0]. Otherwise the enqueue Op is placed on 'device'.
Args:
inputs: a list of Tensors with the types and shapes of the tuple elements.
name_prefix: the base name for the Op.
index: the shard index, used to uniquify the Op name.
device: device to place the Op on, or None if it should be
colocated with the inputs.
tpu_ordinal: ordinal of the TPU device on the host to use for
infeed if device is a CPU device. Should be set to -1 if device
is a TPU device.
Returns:
An Op corresponding to a shard of infeed enqueued at the host,
suitable for use within a replicated block.
Raises:
ValueError: if device is None and inputs do not all have the
same device specification.
"""
full_name = "%s/%d" % (name_prefix, index)
shapes = [t.shape for t in inputs]
if device is None:
devices = [t.device for t in inputs]
for i in xrange(1, self.number_of_tuple_elements):
if devices[0] != devices[i]:
raise ValueError(
"input devices for shard %d are %s, but should all be the same" %
(index, str(devices)))
with ops.colocate_with(inputs[0]):
return tpu_ops.infeed_enqueue_tuple(
inputs=inputs,
shapes=shapes,
name=full_name,
device_ordinal=tpu_ordinal)
else:
with ops.device(device):
return tpu_ops.infeed_enqueue_tuple(
inputs=inputs,
shapes=shapes,
name=full_name,
device_ordinal=tpu_ordinal)
def generate_enqueue_ops(self,
sharded_inputs,
tpu_ordinal_function=None,
placement_function=None):
"""Generates the host-side Ops to enqueue the shards of a tuple.
sharded_inputs is a list, one for each shard, of lists of
Tensors. sharded_inputs[0] is the tuple of Tensors to use to feed
shard 0 if the queue. Returns the host-side Ops that must be run to
enqueue the sharded tuple. The Op for shard i is colocated with the inputs
for shard i.
Implicitly freezes the queue configuration if it is not already
frozen. If the configuration has already been frozen, and is not
compatible with the types and shapes of sharded_inputs, an error
will be raised.
Args:
sharded_inputs: a list of lists of Tensors. The length of the outer list
determines the number of shards. Each inner list indicates the types
and shapes of the tuples in the corresponding shard.
tpu_ordinal_function: if not None, a function that takes the
shard index as input and returns the ordinal of the TPU device
the shard's infeed should be placed on. tpu_ordinal_function must be
set if the inputs are placed on CPU devices.
placement_function: if not None, a function that takes the shard index as
input and returns the host device where the enqueue op should be placed
on.
Returns:
A list of host-side Ops, one for each shard, that when executed together
will enqueue a full-size element of infeed.
Raises:
ValueError: if the queue configuration has previously been frozen and the
shapes of the elements of sharded_inputs are not compatible with the
frozen configuration; or if the shapes of the elements of sharded_inputs
don't form a consistent unsharded tuple; or if the elements of a tuple
have different device constraints.
TypeError: if the queue configuration has previously been frozen and the
types of the elements of sharded_inputs are not compatible with the
frozen configuration; or if the types of the elements of sharded_inputs
don't form a consistent unsharded tuple.
"""
self.set_configuration_from_sharded_input_tensors(sharded_inputs)
self.freeze()
if self._generated_enqueue_ops:
raise ValueError("Can't generate two enqueue Ops from the same queue")
self._generated_enqueue_ops = True
if tpu_ordinal_function is None:
tpu_ordinal_function = lambda index: -1
name_prefix = "%s/enqueue" % self._name
return [
self._generate_enqueue_op(
shard,
name_prefix,
index,
tpu_ordinal=tpu_ordinal_function(index),
device=placement_function(index) if placement_function else None)
for (shard, index) in zip(sharded_inputs, xrange(self.number_of_shards))
]
# TODO(misard) Generalize this to the case of systems that don't
# have 8 devices per host, and figure out what to do with
# model-parallelism.
def _default_placement_function(self, index):
return "/task:%d/device:CPU:0" % (index / 8)
def _default_ordinal_function(self, index):
return index % 8
# TODO(b/36470756) remove this from tutorials once we have a better story
# for automatic placement of input pipelines.
def split_inputs_and_generate_enqueue_ops(self,
inputs,
device_assignment=None,
placement_function=None,
tpu_ordinal_function=None):
"""POORLY-PERFORMING ON MULTI-HOST SYSTEMS.
Generates the host-side Ops to enqueue a tuple.
This method performs poorly because it takes an entire input on a single
host, splits it, and distributes it to all of the cores. It is present only
to simplify tutorial examples.
inputs is a list of Tensors to use to feed the queue. Each input is split
into self.number_of_shards shards. Returns an Op for each shard to enqueue
the shard. The Op for shard i is placed on device placement_function(i).
Implicitly freezes the queue configuration if it is not already
frozen. If the configuration has already been frozen, and is not
compatible with the types and shapes of inputs, an error
will be raised.
Args:
inputs: a list of Tensors which indicates the types and shapes of the
queue tuple.
device_assignment: if not `None`, a TPU `DeviceAssignment`. If
device_assignment is not `None`, but `placement_function` and
`ordinal_function` are None, then `device_assignment` will be used to
place infeeds on the first k TPU shards, where k is the number of shards
in the queue. If all three are `None`, then default placement and
ordinal functions are used.
placement_function: if not None, a function that takes the shard
index as input and returns a device string indicating which
device the shard's infeed should be placed on. If placement_function
and tpu_ordinal_function are None, inputs are sharded round-robin
across the devices in the system.
tpu_ordinal_function: if not None, a function that takes the
shard index as input and returns the ordinal of the TPU device
the shard's infeed should be placed on. If placement_function
and tpu_ordinal_function are None, inputs are sharded round-robin
across the devices in the system.
Returns:
A list of host-side Ops, one for each shard, that when executed together
will enqueue a full-size element of infeed.
Raises:
ValueError: if the queue configuration has previously been frozen and the
shapes of the elements of inputs are not compatible with the frozen
configuration.
TypeError: if the queue configuration has previously been frozen and the
types of the elements of inputs are not compatible with the frozen
configuration.
"""
if device_assignment is None:
if placement_function is None:
placement_function = self._default_placement_function
if tpu_ordinal_function is None:
tpu_ordinal_function = self._default_ordinal_function
else:
def _placement_function_from_map(index):
return device_assignment.host_device(replica=index)
def _ordinal_function_from_map(index):
return device_assignment.tpu_ordinal(replica=index)
if placement_function is None:
placement_function = _placement_function_from_map
if tpu_ordinal_function is None:
tpu_ordinal_function = _ordinal_function_from_map
self.set_configuration_from_input_tensors(inputs)
self.freeze()
if self._generated_enqueue_ops:
raise ValueError("Can't generate two enqueue Ops from the same queue")
self._generated_enqueue_ops = True
split_name_prefix = "%s/split" % self._name
if self.number_of_shards == 1:
transposed_sharded_inputs = [[inp] for inp in inputs]
else:
def split_fn(inp, num_shards, axis, name):
with ops.colocate_with(inp):
return array_ops.split(inp, num_shards, axis=axis, name=name)
transposed_sharded_inputs = [
split_fn(
inp,
self.number_of_shards,
axis=policy.shard_dimension,
name="%s/%d" % (split_name_prefix, index))
for (inp, policy, index) in zip(inputs, self._sharding_policies,
xrange(self.number_of_tuple_elements))
]
sharded_inputs = [[shard[i] for shard in transposed_sharded_inputs]
for i in xrange(self.number_of_shards)]
name_prefix = "%s/enqueue" % self._name
return [
self._generate_enqueue_op(
shard,
name_prefix,
index,
device=placement_function(index),
tpu_ordinal=tpu_ordinal_function(index))
for (shard, index) in zip(sharded_inputs, xrange(self.number_of_shards))
]
class _PartitionedInfeedQueue(InfeedQueue):
"""A helper object to build a device infeed queue with input partition.
Args:
number_of_tuple_elements: the number of Tensors fed atomically through the
queue, must be present unless it can be inferred from other arguments.
device_assignment: A TPU `DeviceAssignment` which is used to place all the
partitions to different TPU infeed queues.
host_id: The id of the host machine.
input_partition_dims: A nested list/tuple of integers. Each inner
list/tuple describes how to partition the corresponding input tensor.
tuple_types: If not None, a list of types of the elements of the queue.
tuple_shapes: If not None, a list of shapes of the elements of the queue.
name: The name of the queue.
"""
def __init__(self,
number_of_tuple_elements,
device_assignment,
host_id,
input_partition_dims=None,
tuple_types=None,
tuple_shapes=None,
name=None):
super(_PartitionedInfeedQueue, self).__init__(
number_of_tuple_elements=number_of_tuple_elements,
tuple_types=tuple_types,
tuple_shapes=None,
shard_dimensions=None,
name="PartitionedInfeedQueue" if name is None else name)
self._input_partition_dims = input_partition_dims
self._host_id = host_id
self._device_assignment = device_assignment
def generate_dequeue_op(self, tpu_device=0):
"""Generate TPU dequeue ops.
Args:
tpu_device: The TPU device ordinal where the infeed instruction should be
placed.
Returns:
A list of Outputs corresponding to a partition of infeed dequeued
into XLA, suitable for use within a replicated block.
Raises:
ValueError: if the types or shapes of the tuple elements have not been
set; or if a dequeue op has already been generated.
"""
self.freeze()
if self._generated_dequeue_op:
raise ValueError("Can't generate two dequeue Ops from the same queue")
self._generated_dequeue_op = True
full_name = "%s/dequeue" % self._name
sharded_shapes = [
policy.get_sharded_shape(shape)
for (shape, policy) in zip(self._tuple_shapes, self._sharding_policies)
]
with ops.device(tpu.core(tpu_device)):
values = tpu_ops.infeed_dequeue_tuple(
dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)
return tag_sharding_attribute_for_dequeued_tensors(
values, self._input_partition_dims)
def generate_enqueue_ops(self, sharded_inputs):
"""Generates the host-side Ops to enqueue the partitioned inputs.
sharded_inputs is a list, one for each replica, of lists of
Tensors. sharded_inputs[i] is the tuple of Tensors to use to feed
replica i.
sharded_inputs[i][j] is partitioned by self._input_partition_dims[j].
For example, if sharded_inputs[i][j] is a 2-D Tensor:
[[A, B, C, D],
[E ,F, G, H]]
self._input_partition_dims[j] is [2, 4].
sharded_inputs[i][j] will be partitioned and flattened into:
[A, B, C, D, E, F, G, H] and fed into the logical core ids:
[0, 1, 2, 3, 4, 5, 6, 7] respectively.
Args:
sharded_inputs: a list of lists of Tensors. The length of the
outer list determines the number of shards. Each inner list indicates
the types and shapes of the tuples in the corresponding shard.
Returns:
A list of host-side Ops, one for each shard, that when executed together
will enqueue a full-size element of infeed.
Raises:
ValueError: if the queue configuration has previously been frozen and the
shapes of the elements of sharded_inputs are not compatible with the
frozen configuration; or if the shapes of the elements of sharded_inputs
don't form a consistent unsharded tuple; or if the elements of a tuple
have different device constraints; or if the partition dims are invalid.
TypeError: if the queue configuration has previously been frozen and the
types of the elements of sharded_inputs are not compatible with the
frozen configuration; or if the types of the elements of sharded_inputs
don't form a consistent unsharded tuple.
"""
self.set_configuration_from_sharded_input_tensors(sharded_inputs)
number_of_replicas = len(sharded_inputs)
number_of_tuple_elements = len(sharded_inputs[0])
assert len(self._input_partition_dims) == number_of_tuple_elements
enqueue_ops = []
for replica_index in range(number_of_replicas):
flattened_inputs = sharded_inputs[replica_index]
inputs_part_dims_flat = nest.flatten_up_to(flattened_inputs,
self._input_partition_dims)
inputs_parted_iters = [
iter(self._check_dims_and_partition_or_replicate_on_host(x, dims))
for x, dims in zip(sharded_inputs[replica_index],
inputs_part_dims_flat)
]
# Find the replica_id of the host's logical core 0.
# The self._host_id is guaranteed to contain the logical core 0,
# even when num_cores_per_replica > num_cores_per_host -- the function
# caller makes sure that this host_id will must be receiving data (calls
# input_fn).
replica_id = self._device_assignment.lookup_replicas(
task_id=self._host_id, logical_core=0)[replica_index]
for logical_core in xrange(self._device_assignment.num_cores_per_replica):
# Places different partitions to different logic cores.
# Since there can be multiple hosts per replica, we need to find
# the actual host (device) of this logical core.
device = self._device_assignment.host_device(
replica=replica_id, logical_core=logical_core)
with ops.device(device):
ordinal = self._device_assignment.tpu_ordinal(
replica=replica_id, logical_core=logical_core)
infeed_inputs = []
for it in inputs_parted_iters:
input_for_device = next(it, None)
if input_for_device is not None:
infeed_inputs.append(input_for_device)
if infeed_inputs:
enqueue_ops.append(
tpu_ops.infeed_enqueue_tuple(
inputs=infeed_inputs,
shapes=[x.shape for x in infeed_inputs],
name="enqueue/replica_{0}/input_{1}".format(
replica_index, logical_core),
device_ordinal=ordinal))
return enqueue_ops
def _check_input_partition_dims(self, tensor, dims):
"""Checks that input partition dims are valid for the `Tensor`.
Args:
tensor: Input tensor for partitioning.
dims: A list of integer describes how to partition the input tensor.
Raises:
ValueError: If the tensor can't be partitioned by dims or the
num_cores_per_replica doesn't match the number of
partitions(dims.prod()).
"""
# No partitioning specified, so don't perform further checks.
if dims is None:
return
dims = np.array(dims)
if (dims < 1).any():
raise ValueError("All input partition dims must be >= 1.")
# No partitioning, so don't perform further checks.
if dims.prod() == 1:
return
if dims.prod() != self._device_assignment.num_cores_per_replica:
raise ValueError(
"The product of each input partition dim should equal to "
"num_cores_per_replica. (dim = {}, num_cores_per_replica "
"= {})".format(dims, self._device_assignment.num_cores_per_replica))
if dims.shape[0] != tensor.shape.ndims:
raise ValueError(
"Input partition dims must have the same number of dimensions "
"as the `Tensor` to be partitioned. (tensor shape = {}, input "
"partition dims = {}).".format(tensor.shape.as_list(), dims))
tensor.shape.assert_is_fully_defined()
def _check_dims_and_partition_or_replicate_on_host(self, tensor, dims):
"""Checks dims and partitions or replicates the input tensor.
The ops inside this function are placed on the host side.
Args:
tensor: The input tensor which will be partitioned or replicated.
dims: A list of integer describes how to partition the input tensor.
Returns:
An iterator of `Tensor`s or a list of partitioned tensors.
"""
self._check_input_partition_dims(tensor, dims)
return partition_or_replicate_on_host(tensor, dims)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tpu_feed.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tpu_function helpers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import convolutional
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import training_loop
class TPUContextTest(test.TestCase):
@test_util.deprecated_graph_mode_only
def testIsInContext(self):
"""Test that control_flow_util can check that we're in a TPU context."""
z1 = array_ops.identity(1)
pivot = control_flow_ops.no_op()
context = tpu.TPUReplicateContext(b"context", 1, pivot=pivot)
context.Enter()
z2 = array_ops.identity(1)
context.Exit()
self.assertFalse(control_flow_util.IsInXLAContext(z1.op))
self.assertTrue(control_flow_util.IsInXLAContext(z2.op))
class TPULayerRewriteTest(test.TestCase):
@test_util.deprecated_graph_mode_only
def testUsingInfeedQueueWithRegularizer(self):
"""Test that Layer regularizers can reference data created in loops."""
def make_regularizer(scale):
return lambda inputs: scale * math_ops.reduce_sum(math_ops.square(inputs))
def training_step(inputs, scale):
outputs = convolutional.conv2d(
inputs,
filters=16,
kernel_size=(3, 3),
data_format="channels_first",
kernel_regularizer=make_regularizer(scale))
loss = math_ops.reduce_mean(math_ops.square(outputs))
return loss.op
inputs = array_ops.zeros(shape=(128, 32, 32, 16))
scale = array_ops.ones(shape=())
infeed = tpu_feed.InfeedQueue(
tuple_types=[dtypes.float32, dtypes.float32],
tuple_shapes=[inputs.shape, scale.shape])
def loop():
return training_loop.repeat(5, training_step, infeed_queue=infeed)
# This should not throw an error.
tpu.rewrite(loop)
class TPUGraphPruneTest(test.TestCase):
def test_prune_unconnected_ops(self):
with ops.Graph().as_default():
a = array_ops.placeholder(dtype=dtypes.float32, name="a")
b = array_ops.placeholder(dtype=dtypes.float32, name="b")
constant_op.constant(1.0, name="constant")
x = variable_scope.get_variable(
name="x",
dtype=dtypes.float32,
shape=[],
use_resource=True,
initializer=init_ops.constant_initializer(2.0))
y = variable_scope.get_variable(
name="y",
dtype=dtypes.float32,
shape=[],
use_resource=True,
initializer=init_ops.constant_initializer(3.0))
math_ops.add(a, b)
math_ops.add(x, y)
graph_def = ops.get_default_graph().as_graph_def()
for node in graph_def.node:
# Attach a TPU_REPLICATE_ATTR to each node.
node.attr[tpu._TPU_REPLICATE_ATTR].s = b"0"
# Rewire placeholder "a" and variable "y" leaving them unconnected.
for (input_index, node_input) in enumerate(node.input):
if node_input == "b":
node.input[input_index] = "constant"
if node_input == "y":
node.input[input_index] = "x"
with ops.Graph().as_default() as graph:
# Reimport the graph and prune unconnected ops.
importer.import_graph_def(graph_def)
tpu.prune_unconnected_ops_from_xla(ops.get_default_graph())
# Verify that ops "a" and "x" still have TPU_REPLICATE_ATTR.
a = graph.get_operation_by_name("import/a").get_attr(
tpu._TPU_REPLICATE_ATTR)
self.assertEqual(b"0", a)
x = graph.get_operation_by_name("import/x").get_attr(
tpu._TPU_REPLICATE_ATTR)
self.assertEqual(b"0", x)
# Verify that ops "b" and "y" have TPU_REPLICATE_ATTR removed.
with self.assertRaisesRegexp(
ValueError,
"Operation \'import/b\' has no attr named \'_tpu_replicate\'"):
graph.get_operation_by_name("import/b").get_attr(
tpu._TPU_REPLICATE_ATTR)
with self.assertRaisesRegexp(
ValueError,
"Operation \'import/y\' has no attr named \'_tpu_replicate\'"):
graph.get_operation_by_name("import/y").get_attr(
tpu._TPU_REPLICATE_ATTR)
def do_einsum():
a = array_ops.placeholder(dtype=dtypes.float32, name="a", shape=[2, 3, 4])
b = array_ops.placeholder(dtype=dtypes.float32, name="b", shape=[2, 4, 5])
return special_math_ops.einsum("abc,acd->abd", a, b)
def find_einsum(g):
graph_def = g.as_graph_def()
for node in graph_def.node:
if node.op == "XlaEinsum":
return True
return False
class TPUXlaEinsumTest(test.TestCase):
def test_tpu_rewrite_uses_xla_einsum(self):
with ops.Graph().as_default() as g:
tpu.rewrite(do_einsum)
self.assertTrue(find_einsum(g))
def test_default_does_not_use_xla_einsum(self):
with ops.Graph().as_default() as g:
do_einsum()
self.assertFalse(find_einsum(g))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tpu_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""Tensor Tracer report generation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import collections
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tensor_tracer_pb2
_TRACER_LOG_PREFIX = ' [>>>TT>>>]'
_MARKER_SECTION_BEGIN = '!!!!!!! section-begin:'
_MARKER_SECTION_END = '!!!!!!! section-end:'
_SECTION_NAME_CONFIG = 'configuration'
_SECTION_NAME_REASON = 'reason'
_SECTION_NAME_OP_LIST = 'op-list'
_SECTION_NAME_TENSOR_LIST = 'tensor-list'
_SECTION_NAME_CACHE_INDEX_MAP = 'cache-index-map'
_SECTION_NAME_GRAPH = 'graph'
_SECTION_NAME_TENSOR_TRACER_CHECKPOINT = 'tensor_tracer_checkpoint'
_FIELD_NAME_VERSION = 'version:'
_FIELD_NAME_DEVICE = 'device:'
_FIELD_NAME_TRACE_MODE = 'trace-mode:'
_FIELD_NAME_SUBMODE = 'submode:'
_FIELD_NAME_NUM_REPLICAS = 'num-replicas:'
_FIELD_NAME_NUM_REPLICAS_PER_HOST = 'num-replicas-per-host:'
_FIELD_NAME_NUM_HOSTS = 'num-hosts:'
_FIELD_NAME_NUM_OPS = 'number-of-ops:'
_FIELD_NAME_NUM_TENSORS = 'number-of-tensors:'
_FIELD_NAME_NUM_CACHE_INDICES = 'number-of-indices:'
_FIELD_NAME_TOPOLOGICAL_SORT_SUCCEED = 'topological-sort-succeed:'
_CURRENT_VERSION = 'use-outside-compilation'
_TT_REPORT_PROTO = 'tensor_tracer_report.proto'
def topological_sort(g):
"""Performs topological sort on the given graph.
Args:
g: the graph.
Returns:
A pair where the first element indicates if the topological
sort succeeded (True if there is no cycle found; False if a
cycle is found) and the second element is either the sorted
list of nodes or the cycle of nodes found.
"""
def _is_loop_edge(op):
"""Returns true if the op is the end of a while-loop creating a cycle."""
return op.type in ['NextIteration']
def _in_op_degree(op):
"""Returns the number of incoming edges to the given op.
The edge calculation skips the edges that come from 'NextIteration' ops.
NextIteration creates a cycle in the graph. We break cycles by treating
this op as 'sink' and ignoring all outgoing edges from it.
Args:
op: Tf.Operation
Returns:
the number of incoming edges.
"""
count = 0
for op in op.control_inputs + [in_tensor.op for in_tensor in op.inputs]:
if not _is_loop_edge(op):
count += 1
return count
sorted_ops = []
op_in_degree = {op: _in_op_degree(op) for op in g.get_operations()}
frontier = [op for (op, degree) in op_in_degree.items() if degree == 0]
frontier.sort(key=lambda op: op.name)
while frontier:
op = frontier.pop()
# Remove the op from graph, and remove its outgoing edges.
sorted_ops.append(op)
if _is_loop_edge(op):
continue
# pylint: disable=protected-access
consumers = list(op._control_outputs)
# pylint: enable=protected-access
for out_tensor in op.outputs:
consumers += [consumer_op for consumer_op in out_tensor.consumers()]
consumers.sort(key=lambda op: op.name)
for consumer in consumers:
# For each deleted edge shift the bucket of the vertex.
op_in_degree[consumer] -= 1
if op_in_degree[consumer] == 0:
frontier.append(consumer)
if op_in_degree[consumer] < 0:
raise ValueError('consumer:%s degree mismatch'%consumer.name)
left_ops = set([op for (op, degree) in op_in_degree.items() if degree > 0])
if left_ops:
return (True, left_ops)
else:
assert len(g.get_operations()) == len(sorted_ops)
return (False, sorted_ops)
class TensorTracerConfig(object):
"""Tensor Tracer config object."""
def __init__(self):
self.version = _CURRENT_VERSION
self.device_type = None
self.num_replicas = None
self.num_replicas_per_host = None
self.num_hosts = None
class TensorTraceOrder(object):
"""Class that is responsible from storing the trace-id of the tensors."""
def __init__(self, graph_order, traced_tensors):
self.graph_order = graph_order
self.traced_tensors = traced_tensors
self._create_tensor_maps()
def _create_tensor_maps(self):
"""Creates tensor to cache id maps."""
self.tensorname_to_cache_idx = {}
self.cache_idx_to_tensor_idx = []
for out_tensor in self.traced_tensors:
tensor_name = out_tensor.name
if tensor_name in self.tensorname_to_cache_idx:
raise ValueError(
'Tensor name %s should not be already in '
'tensorname_to_cache_idx'%tensor_name)
if tensor_name not in self.graph_order.tensor_to_idx:
raise ValueError(
'Tensor name %s is not in the tensor_to_idx'%tensor_name)
tensor_idx = self.graph_order.tensor_to_idx[tensor_name]
cache_idx = len(self.tensorname_to_cache_idx)
self.tensorname_to_cache_idx[tensor_name] = cache_idx
self.cache_idx_to_tensor_idx.append(tensor_idx)
if len(self.tensorname_to_cache_idx) != len(
self.cache_idx_to_tensor_idx):
raise RuntimeError('len(self.tensorname_to_cache_idx) != '
'len(self.cache_idx_to_tensor_idx')
def sort_tensors_and_ops(graph):
"""Returns a wrapper that has consistent tensor and op orders."""
graph_wrapper = collections.namedtuple('GraphWrapper',
['graph', 'operations', 'op_to_idx',
'tensors', 'tensor_to_idx',
'contains_cycle',
'topological_order_or_cycle'])
contains_cycle, topological_order_or_cycle = topological_sort(graph)
if not contains_cycle:
operations = topological_order_or_cycle
else:
operations = graph.get_operations()
op_to_idx = {op.name: index for index, op
in enumerate(operations)}
tensors = []
for op in operations:
tensors.extend(op.outputs)
tensor_to_idx = {tensor.name: index for index, tensor in
enumerate(tensors)}
return graph_wrapper(graph=graph, operations=operations, op_to_idx=op_to_idx,
tensors=tensors, tensor_to_idx=tensor_to_idx,
contains_cycle=contains_cycle,
topological_order_or_cycle=topological_order_or_cycle)
class OpenReportFile(object):
"""Context manager for writing report file."""
def __init__(self, tt_parameters):
if not tt_parameters.report_file_path:
self._report_file = None
return
try:
self._report_file = gfile.Open(tt_parameters.report_file_path, 'w')
except IOError as e:
raise e
def __enter__(self):
return self._report_file
def __exit__(self, unused_type, unused_value, unused_traceback):
if self._report_file:
self._report_file.close()
class TTReportHandle(object):
"""Utility class responsible from creating a tensor tracer report."""
def __init__(self):
self.instrument_records = {}
self._report_file = None
def instrument(self, name, explanation):
self.instrument_records[name] = explanation
def instrument_op(self, op, explanation):
self.instrument(op.name, explanation)
def instrument_tensor(self, tensor, explanation):
self.instrument(tensor.name, explanation)
def create_report_proto(self, tt_config, tt_parameters, tensor_trace_order,
tensor_trace_points, collected_signature_types):
"""Creates and returns a proto that stores tensor tracer configuration.
Args:
tt_config: TensorTracerConfig object holding information about the run
environment (device, # cores, # hosts), and tensor tracer version
information.
tt_parameters: TTParameters objects storing the user provided parameters
for tensor tracer.
tensor_trace_order: TensorTraceOrder object storing a topological order of
the graph.
tensor_trace_points: Progromatically added trace_points/checkpoints.
collected_signature_types: The signature types collected, e,g, norm,
max, min, mean...
Returns:
TensorTracerReport proto.
"""
report = tensor_tracer_pb2.TensorTracerReport()
report.config.version = tt_config.version
report.config.device = tt_config.device_type
report.config.num_cores = tt_config.num_replicas
report.config.num_hosts = tt_config.num_hosts
report.config.num_cores_per_host = tt_config.num_replicas_per_host
for core in tt_parameters.included_cores:
report.config.included_cores.append(core)
report.config.submode = tt_parameters.submode
report.config.trace_mode = tt_parameters.trace_mode
for signature_name, _ in sorted(collected_signature_types.items(),
key=lambda x: x[1]):
report.config.signatures.append(signature_name)
tf_graph = tensor_trace_order.graph_order.graph
report.graphdef.CopyFrom(tf_graph.as_graph_def())
for tensor in tensor_trace_order.graph_order.tensors:
tensor_def = tensor_tracer_pb2.TensorTracerReport.TracedTensorDef()
tensor_def.name = tensor.name
if tensor.name in tensor_trace_order.tensorname_to_cache_idx:
tensor_def.is_traced = True
tensor_def.cache_index = (
tensor_trace_order.tensorname_to_cache_idx[tensor.name])
else:
tensor_def.is_traced = False
if tensor.name in tensor_trace_points:
tensor_def.trace_point_name = tensor_trace_points[tensor.name]
if tensor.name in self.instrument_records:
tensor_def.explanation = self.instrument_records[tensor.name]
elif tensor.op.name in self.instrument_records:
tensor_def.explanation = self.instrument_records[tensor.op.name]
report.tensordef[tensor.name].CopyFrom(tensor_def)
return report
def write_report_proto(self, report_proto, tt_parameters):
"""Writes the given report proto under trace_dir."""
gfile.MakeDirs(tt_parameters.trace_dir)
report_path = os.path.join(tt_parameters.trace_dir, _TT_REPORT_PROTO)
with gfile.GFile(report_path, 'wb') as f:
f.write(report_proto.SerializeToString())
def create_report(self, tt_config, tt_parameters,
tensor_trace_order, tensor_trace_points):
"""Creates a report file and writes the trace information."""
with OpenReportFile(tt_parameters) as self._report_file:
self._write_config_section(tt_config, tt_parameters)
self._write_op_list_section(tensor_trace_order.graph_order)
self._write_tensor_list_section(tensor_trace_order.graph_order)
self._write_trace_points(tensor_trace_points)
self._write_cache_index_map_section(tensor_trace_order)
self._write_reason_section()
self._write_graph_section(tensor_trace_order.graph_order)
def _write_trace_points(self, tensor_trace_points):
"""Writes the list of checkpoints."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN,
_SECTION_NAME_TENSOR_TRACER_CHECKPOINT))
for (tensor, checkpoint_name) in tensor_trace_points:
self._write_report('%s %s\n'%(tensor.name, checkpoint_name))
self._write_report('%s %s\n'%(_MARKER_SECTION_END,
_SECTION_NAME_TENSOR_TRACER_CHECKPOINT))
def _write_report(self, content):
"""Writes the given content to the report."""
line = '%s %s'%(_TRACER_LOG_PREFIX, content)
if self._report_file:
self._report_file.write(line)
else:
logging.info(line)
def _write_config_section(self, tt_config, tt_parameters):
"""Writes the config section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_CONFIG))
self._write_report('%s %s\n'%(_FIELD_NAME_VERSION, tt_config.version))
self._write_report('%s %s\n'%(_FIELD_NAME_DEVICE, tt_config.device_type))
self._write_report('%s %s\n'%(_FIELD_NAME_TRACE_MODE,
tt_parameters.trace_mode))
self._write_report('%s %s\n'%(_FIELD_NAME_SUBMODE,
tt_parameters.submode))
if tt_parameters.included_cores:
self._write_report('%s %s\n'%(_FIELD_NAME_NUM_REPLICAS,
len(tt_parameters.included_cores)))
else:
self._write_report('%s %s\n'%(_FIELD_NAME_NUM_REPLICAS,
tt_config.num_replicas))
self._write_report('%s %s\n'%(_FIELD_NAME_NUM_REPLICAS_PER_HOST,
tt_config.num_replicas_per_host))
self._write_report('%s %s\n'%(_FIELD_NAME_NUM_HOSTS, tt_config.num_hosts))
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_CONFIG))
def _write_reason_section(self):
"""Writes the reason section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_REASON))
for key in sorted(self.instrument_records):
self._write_report('"%s" %s\n'%(key, self.instrument_records[key]))
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_REASON))
def _write_op_list_section(self, graph_order):
"""Writes the Op-list section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_OP_LIST))
self._write_report('%s %d\n'%(_FIELD_NAME_NUM_OPS,
len(graph_order.operations)))
for i in range(0, len(graph_order.operations)):
op = graph_order.operations[i]
line = '%d "%s" %s'%(i, op.name, op.type)
for out_tensor in op.outputs:
if out_tensor.name not in graph_order.tensor_to_idx:
raise ValueError(
'out_tensor %s is not in tensor_to_idx'%out_tensor.name)
line += ' %d'%graph_order.tensor_to_idx[out_tensor.name]
line += '\n'
self._write_report(line)
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_OP_LIST))
def _write_tensor_list_section(self, graph_order):
"""Writes the tensor-list section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN,
_SECTION_NAME_TENSOR_LIST))
self._write_report('%s %d\n'%(_FIELD_NAME_NUM_TENSORS,
len(graph_order.tensors)))
for i in range(0, len(graph_order.tensors)):
tensor = graph_order.tensors[i]
line = '%d "%s"'%(i, tensor.name)
consumers = tensor.consumers()
consumers.sort(key=lambda op: op.name)
for consumer_op in consumers:
if consumer_op.name not in graph_order.op_to_idx:
raise ValueError(
'consumer_op %s is not in op_to_idx'%consumer_op.name)
line += ' %d'%graph_order.op_to_idx[consumer_op.name]
line += '\n'
self._write_report(line)
self._write_report('%s %s\n'%(_MARKER_SECTION_END,
_SECTION_NAME_TENSOR_LIST))
def _write_cache_index_map_section(self, tensor_trace_order):
"""Writes the mapping from cache index to tensor index to the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN,
_SECTION_NAME_CACHE_INDEX_MAP))
self._write_report('%s %d\n'%(
_FIELD_NAME_NUM_CACHE_INDICES,
len(tensor_trace_order.cache_idx_to_tensor_idx)))
for cache_idx in range(0, len(tensor_trace_order.cache_idx_to_tensor_idx)):
tensor_idx = tensor_trace_order.cache_idx_to_tensor_idx[cache_idx]
line = '%d %d\n'%(cache_idx, tensor_idx)
self._write_report(line)
self._write_report('%s %s\n'%(_MARKER_SECTION_END,
_SECTION_NAME_CACHE_INDEX_MAP))
def _write_graph_section(self, graph_order):
"""Writes the graph section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_GRAPH))
self._write_report('%s %s\n'%(_FIELD_NAME_TOPOLOGICAL_SORT_SUCCEED,
not graph_order.contains_cycle))
l = list(graph_order.topological_order_or_cycle)
for i in range(0, len(l)):
self._write_report('%d "%s"\n'%(i, l[i].name))
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_GRAPH))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tensor_tracer_report.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""Optional helper for gradient handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu.ops import tpu_ops
def get_gradients_through_compute_gradients(optimizer, loss, activations):
"""Compute gradients to send to TPU embedding.
Args:
optimizer: a subclass of optimizer.Optimizer, usually CrossShardOptimizer.
Used to call compute_gradients().
loss: a Tensor to call optimizer.compute_gradients() on.
activations: an OrderedDict mapping feature_name to Tensors of activations.
Returns:
An OrderedDict mapping from feature name Strings to Tensors of gradients of
the loss wrt the activations of the features.
"""
activation_list = activations.values()
grads_and_vars = optimizer.compute_gradients(loss, activation_list)
grads = [grad for grad, _ in grads_and_vars]
feature_to_gradient_dict = collections.OrderedDict(
zip(activations.keys(), grads))
return feature_to_gradient_dict
def create_dummy_table_variables(tpu_embedding):
"""Create dummy embedding table variables.
The sole purpose of these dummy variables are to trigger gradient
calcuation wrt them so that the gradients wrt activation can be captured
and later sent to TPU embedding.
Args:
tpu_embedding: TPUEmbedding, dummy table variables will be created for use
with tpu_embedding.
Returns:
A tuple of dummy variables and their initializer.
Raises:
RuntimeError: if collection to store gradients already exists and is not
empty.
"""
dummy_table_variables = collections.OrderedDict()
for table_id, table in enumerate(tpu_embedding.table_to_features_dict):
dummy_table_variables[table] = (
# Explicitly specifying collections prevents this variable from
# being added to the GLOBAL_VARIABLES collection, so that Saver()
# ignores it.
# But Tensorflow optimizer creates slot variable for these dummy
# variable, e.g. tpu_embedding_dummy_table_variable_mlp_user/Adam{_1},
# which will be in GLOBAL_VARIABLES collection,
variable_scope.get_variable(
'tpu_embedding_dummy_table_variable_{}'.format(table),
dtype=dtypes.float32,
shape=[1],
use_resource=True,
trainable=True,
collections=['tpu_embedding_dummy_table_variables']))
g = ops.get_default_graph()
table_gradients = g.get_collection_ref(
'tpu_embedding_gradients_table_{}'.format(table_id))
if table_gradients:
raise RuntimeError(
'tpu_embedding_gradients_table_{} is not empty.'.format(table_id))
table_gradients.extend(
[None] * len(tpu_embedding.table_to_features_dict[table]))
return (dummy_table_variables,
variables.variables_initializer(
dummy_table_variables.values(),
name='tpu_embedding_dummy_table_variables_init'))
def hook_dummy_table_variables_to_activations(tpu_embedding, activations,
dummy_table_variables):
"""Have activations depend on dummy table variables for gradient intercept.
Args:
tpu_embedding: TPUEmbedding, activations and dummy_table_variables are from
tpu_embedding.
activations: An OrderedDict of feature name String to activation tensors.
dummy_table_variables: An OrderedDict of table name String to dummy table
variables.
Returns:
An OrderedDict of feature name String to activation tensors, which can be
used just as the activations input.
"""
new_activations = collections.OrderedDict()
for feature in activations:
table = tpu_embedding.feature_to_config_dict[feature].table_id
new_activations[feature] = tpu_ops.tpu_embedding_activations(
dummy_table_variables[table],
activations[feature],
table_id=list(tpu_embedding.table_to_config_dict).index(table),
lookup_id=tpu_embedding.table_to_features_dict[table].index(feature))
return new_activations
def get_gradients_through_dummy_table_variables(tpu_embedding):
"""Get gradients wrt the activations of each feature.
Args:
tpu_embedding: TPUEmbedding, create dummy table variable to be used with
tpu_embedding.
Returns:
An OrderedDict mapping feature name to gradient.
Raises:
ValueError: if some gradients are not defined.
"""
g = ops.get_default_graph()
feature_to_gradient_dict = collections.OrderedDict()
for table_id, table in enumerate(tpu_embedding.table_to_config_dict):
table_gradients = g.get_collection(
'tpu_embedding_gradients_table_{}'.format(table_id))
if all(gradient is None for gradient in table_gradients):
raise ValueError(
'Table {} with id {} has undefined gradients: this is probably '
'because the model asked TPUEmbedding to compute activations that '
'were not used.'.format(table, table_id))
if any(gradient is None for gradient in table_gradients):
# TODO(bfontain): create a white-list for optimizers which are compatible
# with `tf.stop_gradient`.
logging.warn(
'Table {} with id {} has undefined gradients: this is probably '
'because the model asked TPUEmbedding to compute activations that '
'were not used, or tf.stop_gradient() is applied. Gradients of zeros '
'are sent back to TPUEmbedding instead. Gradients of zeros and no '
'gradients are equivalent for SGD, AdaGrad, FTRL, momentum, etc, but '
'might differ for other optimizers due to implementation of tpu '
'embedding optimziers.'
.format(table, table_id))
for feature, gradient in zip(tpu_embedding.table_to_features_dict[table],
table_gradients):
if gradient is not None:
feature_to_gradient_dict[feature] = gradient
else:
dimension = tpu_embedding.table_to_config_dict[table].dimension
batch_size = tpu_embedding.batch_size_per_core
max_sequence_length = (
tpu_embedding.feature_to_config_dict[feature].max_sequence_length)
if max_sequence_length:
feature_to_gradient_dict[feature] = array_ops.zeros(
[batch_size, max_sequence_length, dimension])
else:
feature_to_gradient_dict[feature] = array_ops.zeros(
[batch_size, dimension])
return feature_to_gradient_dict
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tpu_embedding_gradient.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import *
# pylint: enable=wildcard-import,unused-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/_tpu_estimator_embedding.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import,redefined-builtin
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import *
# used by tests
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _clone_export_output_with_tensors
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _create_global_step
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _export_output_to_tensors
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _get_scaffold
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _Inputs
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _ITERATIONS_PER_LOOP_VAR
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _TPU_ENQUEUE_OPS
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _TPU_ESTIMATOR
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import _TPU_TRAIN_OP
# pylint: enable=wildcard-import,unused-import,redefined-builtin
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tpu/tpu_estimator.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.