python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(123)
class LinearOperatorShape(linalg.LinearOperator):
"""LinearOperator that implements the methods ._shape and _shape_tensor."""
def __init__(self,
shape,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None):
self._stored_shape = shape
super(LinearOperatorShape, self).__init__(
dtype=dtypes.float32,
graph_parents=None,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square)
def _shape(self):
return tensor_shape.TensorShape(self._stored_shape)
def _shape_tensor(self):
return constant_op.constant(self._stored_shape, dtype=dtypes.int32)
def _matmul(self):
raise NotImplementedError("Not needed for this test.")
class LinearOperatorMatmulSolve(linalg.LinearOperator):
"""LinearOperator that wraps a [batch] matrix and implements matmul/solve."""
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None):
self._matrix = ops.convert_to_tensor(matrix, name="matrix")
super(LinearOperatorMatmulSolve, self).__init__(
dtype=self._matrix.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square)
def _shape(self):
return self._matrix.get_shape()
def _shape_tensor(self):
return array_ops.shape(self._matrix)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
x = ops.convert_to_tensor(x, name="x")
return math_ops.matmul(
self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
rhs = ops.convert_to_tensor(rhs, name="rhs")
assert not adjoint_arg, "Not implemented for this test class."
return linalg_ops.matrix_solve(self._matrix, rhs, adjoint=adjoint)
class LinearOperatorTest(test.TestCase):
def test_all_shape_properties_defined_by_the_one_property_shape(self):
shape = (1, 2, 3, 4)
operator = LinearOperatorShape(shape)
self.assertAllEqual(shape, operator.shape)
self.assertAllEqual(4, operator.tensor_rank)
self.assertAllEqual((1, 2), operator.batch_shape)
self.assertAllEqual(4, operator.domain_dimension)
self.assertAllEqual(3, operator.range_dimension)
@test_util.run_deprecated_v1
def test_all_shape_methods_defined_by_the_one_method_shape(self):
with self.cached_session():
shape = (1, 2, 3, 4)
operator = LinearOperatorShape(shape)
self.assertAllEqual(shape, operator.shape_tensor().eval())
self.assertAllEqual(4, operator.tensor_rank_tensor().eval())
self.assertAllEqual((1, 2), operator.batch_shape_tensor().eval())
self.assertAllEqual(4, operator.domain_dimension_tensor().eval())
self.assertAllEqual(3, operator.range_dimension_tensor().eval())
def test_is_x_properties(self):
operator = LinearOperatorShape(
shape=(2, 2),
is_non_singular=False,
is_self_adjoint=True,
is_positive_definite=False)
self.assertFalse(operator.is_non_singular)
self.assertTrue(operator.is_self_adjoint)
self.assertFalse(operator.is_positive_definite)
def test_generic_to_dense_method_non_square_matrix_static(self):
matrix = rng.randn(2, 3, 4)
operator = LinearOperatorMatmulSolve(matrix)
with self.cached_session():
operator_dense = operator.to_dense()
self.assertAllEqual((2, 3, 4), operator_dense.get_shape())
self.assertAllClose(matrix, self.evaluate(operator_dense))
@test_util.run_deprecated_v1
def test_generic_to_dense_method_non_square_matrix_tensor(self):
matrix = rng.randn(2, 3, 4)
matrix_ph = array_ops.placeholder(dtypes.float64)
operator = LinearOperatorMatmulSolve(matrix_ph)
with self.cached_session():
operator_dense = operator.to_dense()
self.assertAllClose(
matrix, operator_dense.eval(feed_dict={matrix_ph: matrix}))
def test_matvec(self):
matrix = [[1., 0], [0., 2.]]
operator = LinearOperatorMatmulSolve(matrix)
x = [1., 1.]
with self.cached_session():
y = operator.matvec(x)
self.assertAllEqual((2,), y.get_shape())
self.assertAllClose([1., 2.], self.evaluate(y))
def test_solvevec(self):
matrix = [[1., 0], [0., 2.]]
operator = LinearOperatorMatmulSolve(matrix)
y = [1., 1.]
with self.cached_session():
x = operator.solvevec(y)
self.assertAllEqual((2,), x.get_shape())
self.assertAllClose([1., 1 / 2.], self.evaluate(x))
def test_is_square_set_to_true_for_square_static_shapes(self):
operator = LinearOperatorShape(shape=(2, 4, 4))
self.assertTrue(operator.is_square)
def test_is_square_set_to_false_for_square_static_shapes(self):
operator = LinearOperatorShape(shape=(2, 3, 4))
self.assertFalse(operator.is_square)
def test_is_square_set_incorrectly_to_false_raises(self):
with self.assertRaisesRegexp(ValueError, "but.*was square"):
_ = LinearOperatorShape(shape=(2, 4, 4), is_square=False).is_square
@test_util.run_deprecated_v1
def test_is_square_set_inconsistent_with_other_hints_raises(self):
with self.assertRaisesRegexp(ValueError, "is always square"):
matrix = array_ops.placeholder(dtypes.float32)
LinearOperatorMatmulSolve(matrix, is_non_singular=True, is_square=False)
with self.assertRaisesRegexp(ValueError, "is always square"):
matrix = array_ops.placeholder(dtypes.float32)
LinearOperatorMatmulSolve(
matrix, is_positive_definite=True, is_square=False)
@test_util.run_deprecated_v1
def test_non_square_operators_raise_on_determinant_and_solve(self):
operator = LinearOperatorShape((2, 3))
with self.assertRaisesRegexp(NotImplementedError, "not be square"):
operator.determinant()
with self.assertRaisesRegexp(NotImplementedError, "not be square"):
operator.log_abs_determinant()
with self.assertRaisesRegexp(NotImplementedError, "not be square"):
operator.solve(rng.rand(2, 2))
with self.assertRaisesRegexp(ValueError, "is always square"):
matrix = array_ops.placeholder(dtypes.float32)
LinearOperatorMatmulSolve(
matrix, is_positive_definite=True, is_square=False)
@test_util.run_deprecated_v1
def test_is_square_manual_set_works(self):
matrix = array_ops.placeholder(dtypes.float32)
# Default is None.
operator = LinearOperatorMatmulSolve(matrix)
self.assertEqual(None, operator.is_square)
# Set to True
operator = LinearOperatorMatmulSolve(matrix, is_square=True)
self.assertTrue(operator.is_square)
@test_util.run_deprecated_v1
def test_linear_operator_matmul_hints_closed(self):
matrix = array_ops.placeholder(dtypes.float32)
operator1 = LinearOperatorMatmulSolve(matrix)
operator_matmul = operator1.matmul(operator1)
self.assertEqual(None, operator_matmul.is_square)
self.assertEqual(None, operator_matmul.is_non_singular)
self.assertEqual(None, operator_matmul.is_self_adjoint)
self.assertEqual(None, operator_matmul.is_positive_definite)
operator2 = LinearOperatorMatmulSolve(
matrix,
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True,
)
operator_matmul = operator2.matmul(operator2)
self.assertTrue(operator_matmul.is_square)
self.assertTrue(operator_matmul.is_non_singular)
self.assertEqual(None, operator_matmul.is_self_adjoint)
self.assertEqual(None, operator_matmul.is_positive_definite)
@test_util.run_deprecated_v1
def test_linear_operator_matmul_hints_false(self):
matrix = array_ops.placeholder(dtypes.float32)
operator1 = LinearOperatorMatmulSolve(
matrix,
is_non_singular=False,
is_self_adjoint=False,
is_positive_definite=False,
is_square=True,
)
operator_matmul = operator1.matmul(operator1)
self.assertTrue(operator_matmul.is_square)
self.assertFalse(operator_matmul.is_non_singular)
self.assertEqual(None, operator_matmul.is_self_adjoint)
self.assertEqual(None, operator_matmul.is_positive_definite)
operator2 = LinearOperatorMatmulSolve(
matrix,
is_non_singular=False,
is_self_adjoint=False,
is_positive_definite=False,
is_square=False,
)
operator_matmul = operator2.matmul(operator2)
self.assertEqual(None, operator_matmul.is_square)
self.assertEqual(None, operator_matmul.is_non_singular)
self.assertEqual(None, operator_matmul.is_self_adjoint)
self.assertEqual(None, operator_matmul.is_positive_definite)
@test_util.run_deprecated_v1
def test_linear_operator_matmul_hint_infer_square(self):
matrix1 = array_ops.placeholder(shape=[2, 3], dtype=dtypes.float32)
matrix2 = array_ops.placeholder(shape=[3, 2], dtype=dtypes.float32)
matrix3 = array_ops.placeholder(shape=[3, 4], dtype=dtypes.float32)
operator1 = LinearOperatorMatmulSolve(matrix1, is_square=False)
operator2 = LinearOperatorMatmulSolve(matrix2, is_square=False)
operator3 = LinearOperatorMatmulSolve(matrix3, is_square=False)
self.assertTrue(operator1.matmul(operator2).is_square)
self.assertTrue(operator2.matmul(operator1).is_square)
self.assertFalse(operator1.matmul(operator3).is_square)
def testDispatchedMethods(self):
operator = linalg.LinearOperatorFullMatrix(
[[1., 0.5], [0.5, 1.]],
is_square=True,
is_self_adjoint=True,
is_non_singular=True,
is_positive_definite=True)
methods = {
"trace": linalg.trace,
"diag_part": linalg.diag_part,
"log_abs_determinant": linalg.logdet,
"determinant": linalg.det
}
for method in methods:
op_val = getattr(operator, method)()
linalg_val = methods[method](operator)
self.assertAllClose(
self.evaluate(op_val),
self.evaluate(linalg_val))
# Solve and Matmul go here.
adjoint = linalg.adjoint(operator)
self.assertIsInstance(adjoint, linalg.LinearOperator)
cholesky = linalg.cholesky(operator)
self.assertIsInstance(cholesky, linalg.LinearOperator)
inverse = linalg.inv(operator)
self.assertIsInstance(inverse, linalg.LinearOperator)
def testDispatchMatmulSolve(self):
operator = linalg.LinearOperatorFullMatrix(
np.float64([[1., 0.5], [0.5, 1.]]),
is_square=True,
is_self_adjoint=True,
is_non_singular=True,
is_positive_definite=True)
rhs = np.random.uniform(-1., 1., size=[3, 2, 2])
for adjoint in [False, True]:
for adjoint_arg in [False, True]:
op_val = operator.matmul(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
matmul_val = math_ops.matmul(
operator, rhs, adjoint_a=adjoint, adjoint_b=adjoint_arg)
self.assertAllClose(
self.evaluate(op_val), self.evaluate(matmul_val))
op_val = operator.solve(rhs, adjoint=adjoint)
solve_val = linalg.solve(operator, rhs, adjoint=adjoint)
self.assertAllClose(
self.evaluate(op_val), self.evaluate(solve_val))
def testDispatchMatmulLeftOperatorIsTensor(self):
mat = np.float64([[1., 0.5], [0.5, 1.]])
right_operator = linalg.LinearOperatorFullMatrix(
mat,
is_square=True,
is_self_adjoint=True,
is_non_singular=True,
is_positive_definite=True)
lhs = np.random.uniform(-1., 1., size=[3, 2, 2])
for adjoint in [False, True]:
for adjoint_arg in [False, True]:
op_val = math_ops.matmul(
lhs, mat, adjoint_a=adjoint, adjoint_b=adjoint_arg)
matmul_val = math_ops.matmul(
lhs, right_operator, adjoint_a=adjoint, adjoint_b=adjoint_arg)
self.assertAllClose(
self.evaluate(op_val), self.evaluate(matmul_val))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_addition
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(0)
add_operators = linear_operator_addition.add_operators
# pylint: disable=unused-argument
class _BadAdder(linear_operator_addition._Adder):
"""Adder that will fail if used."""
def can_add(self, op1, op2):
raise AssertionError("BadAdder.can_add called!")
def _add(self, op1, op2, operator_name, hints):
raise AssertionError("This line should not be reached")
# pylint: enable=unused-argument
class LinearOperatorAdditionCorrectnessTest(test.TestCase):
"""Tests correctness of addition with combinations of a few Adders.
Tests here are done with the _DEFAULT_ADDITION_TIERS, which means
add_operators should reduce all operators resulting in one single operator.
This shows that we are able to correctly combine adders using the tiered
system. All Adders should be tested separately, and there is no need to test
every Adder within this class.
"""
def test_one_operator_is_returned_unchanged(self):
op_a = linalg.LinearOperatorDiag([1., 1.])
op_sum = add_operators([op_a])
self.assertEqual(1, len(op_sum))
self.assertIs(op_sum[0], op_a)
def test_at_least_one_operators_required(self):
with self.assertRaisesRegexp(ValueError, "must contain at least one"):
add_operators([])
def test_attempting_to_add_numbers_raises(self):
with self.assertRaisesRegexp(TypeError, "contain only LinearOperator"):
add_operators([1, 2])
@test_util.run_deprecated_v1
def test_two_diag_operators(self):
op_a = linalg.LinearOperatorDiag(
[1., 1.], is_positive_definite=True, name="A")
op_b = linalg.LinearOperatorDiag(
[2., 2.], is_positive_definite=True, name="B")
with self.cached_session():
op_sum = add_operators([op_a, op_b])
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertIsInstance(op, linalg_lib.LinearOperatorDiag)
self.assertAllClose([[3., 0.], [0., 3.]], op.to_dense().eval())
# Adding positive definite operators produces positive def.
self.assertTrue(op.is_positive_definite)
# Real diagonal ==> self-adjoint.
self.assertTrue(op.is_self_adjoint)
# Positive definite ==> non-singular
self.assertTrue(op.is_non_singular)
# Enforce particular name for this simple case
self.assertEqual("Add/B__A/", op.name)
@test_util.run_deprecated_v1
def test_three_diag_operators(self):
op1 = linalg.LinearOperatorDiag(
[1., 1.], is_positive_definite=True, name="op1")
op2 = linalg.LinearOperatorDiag(
[2., 2.], is_positive_definite=True, name="op2")
op3 = linalg.LinearOperatorDiag(
[3., 3.], is_positive_definite=True, name="op3")
with self.cached_session():
op_sum = add_operators([op1, op2, op3])
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertTrue(isinstance(op, linalg_lib.LinearOperatorDiag))
self.assertAllClose([[6., 0.], [0., 6.]], op.to_dense().eval())
# Adding positive definite operators produces positive def.
self.assertTrue(op.is_positive_definite)
# Real diagonal ==> self-adjoint.
self.assertTrue(op.is_self_adjoint)
# Positive definite ==> non-singular
self.assertTrue(op.is_non_singular)
@test_util.run_deprecated_v1
def test_diag_tril_diag(self):
op1 = linalg.LinearOperatorDiag(
[1., 1.], is_non_singular=True, name="diag_a")
op2 = linalg.LinearOperatorLowerTriangular(
[[2., 0.], [0., 2.]],
is_self_adjoint=True,
is_non_singular=True,
name="tril")
op3 = linalg.LinearOperatorDiag(
[3., 3.], is_non_singular=True, name="diag_b")
with self.cached_session():
op_sum = add_operators([op1, op2, op3])
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertIsInstance(op, linalg_lib.LinearOperatorLowerTriangular)
self.assertAllClose([[6., 0.], [0., 6.]], op.to_dense().eval())
# The diag operators will be self-adjoint (because real and diagonal).
# The TriL operator has the self-adjoint hint set.
self.assertTrue(op.is_self_adjoint)
# Even though op1/2/3 are non-singular, this does not imply op is.
# Since no custom hint was provided, we default to None (unknown).
self.assertEqual(None, op.is_non_singular)
@test_util.run_deprecated_v1
def test_matrix_diag_tril_diag_uses_custom_name(self):
op0 = linalg.LinearOperatorFullMatrix(
[[-1., -1.], [-1., -1.]], name="matrix")
op1 = linalg.LinearOperatorDiag([1., 1.], name="diag_a")
op2 = linalg.LinearOperatorLowerTriangular(
[[2., 0.], [1.5, 2.]], name="tril")
op3 = linalg.LinearOperatorDiag([3., 3.], name="diag_b")
with self.cached_session():
op_sum = add_operators([op0, op1, op2, op3], operator_name="my_operator")
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertIsInstance(op, linalg_lib.LinearOperatorFullMatrix)
self.assertAllClose([[5., -1.], [0.5, 5.]], op.to_dense().eval())
self.assertEqual("my_operator", op.name)
def test_incompatible_domain_dimensions_raises(self):
op1 = linalg.LinearOperatorFullMatrix(rng.rand(2, 3))
op2 = linalg.LinearOperatorDiag(rng.rand(2, 4))
with self.assertRaisesRegexp(ValueError, "must.*same domain dimension"):
add_operators([op1, op2])
def test_incompatible_range_dimensions_raises(self):
op1 = linalg.LinearOperatorFullMatrix(rng.rand(2, 3))
op2 = linalg.LinearOperatorDiag(rng.rand(3, 3))
with self.assertRaisesRegexp(ValueError, "must.*same range dimension"):
add_operators([op1, op2])
def test_non_broadcastable_batch_shape_raises(self):
op1 = linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3))
op2 = linalg.LinearOperatorDiag(rng.rand(4, 3, 3))
with self.assertRaisesRegexp(ValueError, "Incompatible shapes"):
add_operators([op1, op2])
class LinearOperatorOrderOfAdditionTest(test.TestCase):
"""Test that the order of addition is done as specified by tiers."""
def test_tier_0_additions_done_in_tier_0(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([1.])
diag3 = linalg.LinearOperatorDiag([1.])
addition_tiers = [
[linear_operator_addition._AddAndReturnDiag()],
[_BadAdder()],
]
# Should not raise since all were added in tier 0, and tier 1 (with the
# _BadAdder) was never reached.
op_sum = add_operators([diag1, diag2, diag3], addition_tiers=addition_tiers)
self.assertEqual(1, len(op_sum))
self.assertIsInstance(op_sum[0], linalg.LinearOperatorDiag)
def test_tier_1_additions_done_by_tier_1(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([1.])
tril = linalg.LinearOperatorLowerTriangular([[1.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnDiag()],
[linear_operator_addition._AddAndReturnTriL()],
[_BadAdder()],
]
# Should not raise since all were added by tier 1, and the
# _BadAdder) was never reached.
op_sum = add_operators([diag1, diag2, tril], addition_tiers=addition_tiers)
self.assertEqual(1, len(op_sum))
self.assertIsInstance(op_sum[0], linalg.LinearOperatorLowerTriangular)
def test_tier_1_additions_done_by_tier_1_with_order_flipped(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([1.])
tril = linalg.LinearOperatorLowerTriangular([[1.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnTriL()],
[linear_operator_addition._AddAndReturnDiag()],
[_BadAdder()],
]
# Tier 0 could convert to TriL, and this converted everything to TriL,
# including the Diags.
# Tier 1 was never used.
# Tier 2 was never used (therefore, _BadAdder didn't raise).
op_sum = add_operators([diag1, diag2, tril], addition_tiers=addition_tiers)
self.assertEqual(1, len(op_sum))
self.assertIsInstance(op_sum[0], linalg.LinearOperatorLowerTriangular)
@test_util.run_deprecated_v1
def test_cannot_add_everything_so_return_more_than_one_operator(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([2.])
tril5 = linalg.LinearOperatorLowerTriangular([[5.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnDiag()],
]
# Tier 0 (the only tier) can only convert to Diag, so it combines the two
# diags, but the TriL is unchanged.
# Result should contain two operators, one Diag, one TriL.
op_sum = add_operators([diag1, diag2, tril5], addition_tiers=addition_tiers)
self.assertEqual(2, len(op_sum))
found_diag = False
found_tril = False
with self.cached_session():
for op in op_sum:
if isinstance(op, linalg.LinearOperatorDiag):
found_diag = True
self.assertAllClose([[3.]], op.to_dense().eval())
if isinstance(op, linalg.LinearOperatorLowerTriangular):
found_tril = True
self.assertAllClose([[5.]], op.to_dense().eval())
self.assertTrue(found_diag and found_tril)
def test_intermediate_tier_is_not_skipped(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([1.])
tril = linalg.LinearOperatorLowerTriangular([[1.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnDiag()],
[_BadAdder()],
[linear_operator_addition._AddAndReturnTriL()],
]
# tril cannot be added in tier 0, and the intermediate tier 1 with the
# BadAdder will catch it and raise.
with self.assertRaisesRegexp(AssertionError, "BadAdder.can_add called"):
add_operators([diag1, diag2, tril], addition_tiers=addition_tiers)
class AddAndReturnScaledIdentityTest(test.TestCase):
def setUp(self):
self._adder = linear_operator_addition._AddAndReturnScaledIdentity()
@test_util.run_deprecated_v1
def test_identity_plus_identity(self):
id1 = linalg.LinearOperatorIdentity(num_rows=2)
id2 = linalg.LinearOperatorIdentity(num_rows=2, batch_shape=[3])
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(id1, id2))
operator = self._adder.add(id1, id2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorScaledIdentity)
with self.cached_session():
self.assertAllClose(2 *
linalg_ops.eye(num_rows=2, batch_shape=[3]).eval(),
operator.to_dense().eval())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
@test_util.run_deprecated_v1
def test_identity_plus_scaled_identity(self):
id1 = linalg.LinearOperatorIdentity(num_rows=2, batch_shape=[3])
id2 = linalg.LinearOperatorScaledIdentity(num_rows=2, multiplier=2.2)
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(id1, id2))
operator = self._adder.add(id1, id2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorScaledIdentity)
with self.cached_session():
self.assertAllClose(3.2 *
linalg_ops.eye(num_rows=2, batch_shape=[3]).eval(),
operator.to_dense().eval())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
@test_util.run_deprecated_v1
def test_scaled_identity_plus_scaled_identity(self):
id1 = linalg.LinearOperatorScaledIdentity(
num_rows=2, multiplier=[2.2, 2.2, 2.2])
id2 = linalg.LinearOperatorScaledIdentity(num_rows=2, multiplier=-1.0)
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(id1, id2))
operator = self._adder.add(id1, id2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorScaledIdentity)
with self.cached_session():
self.assertAllClose(1.2 *
linalg_ops.eye(num_rows=2, batch_shape=[3]).eval(),
operator.to_dense().eval())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
class AddAndReturnDiagTest(test.TestCase):
def setUp(self):
self._adder = linear_operator_addition._AddAndReturnDiag()
@test_util.run_deprecated_v1
def test_identity_plus_identity_returns_diag(self):
id1 = linalg.LinearOperatorIdentity(num_rows=2)
id2 = linalg.LinearOperatorIdentity(num_rows=2, batch_shape=[3])
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(id1, id2))
operator = self._adder.add(id1, id2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorDiag)
with self.cached_session():
self.assertAllClose(2 *
linalg_ops.eye(num_rows=2, batch_shape=[3]).eval(),
operator.to_dense().eval())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
@test_util.run_deprecated_v1
def test_diag_plus_diag(self):
diag1 = rng.rand(2, 3, 4)
diag2 = rng.rand(4)
op1 = linalg.LinearOperatorDiag(diag1)
op2 = linalg.LinearOperatorDiag(diag2)
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(op1, op2))
operator = self._adder.add(op1, op2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorDiag)
with self.cached_session():
self.assertAllClose(
linalg.LinearOperatorDiag(diag1 + diag2).to_dense().eval(),
operator.to_dense().eval())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
class AddAndReturnTriLTest(test.TestCase):
def setUp(self):
self._adder = linear_operator_addition._AddAndReturnTriL()
@test_util.run_deprecated_v1
def test_diag_plus_tril(self):
diag = linalg.LinearOperatorDiag([1., 2.])
tril = linalg.LinearOperatorLowerTriangular([[10., 0.], [30., 0.]])
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(diag, diag))
self.assertTrue(self._adder.can_add(diag, tril))
operator = self._adder.add(diag, tril, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorLowerTriangular)
with self.cached_session():
self.assertAllClose([[11., 0.], [30., 2.]], operator.to_dense().eval())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
class AddAndReturnMatrixTest(test.TestCase):
def setUp(self):
self._adder = linear_operator_addition._AddAndReturnMatrix()
@test_util.run_deprecated_v1
def test_diag_plus_diag(self):
diag1 = linalg.LinearOperatorDiag([1., 2.])
diag2 = linalg.LinearOperatorDiag([-1., 3.])
hints = linear_operator_addition._Hints(
is_positive_definite=False, is_non_singular=False)
self.assertTrue(self._adder.can_add(diag1, diag2))
operator = self._adder.add(diag1, diag2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorFullMatrix)
with self.cached_session():
self.assertAllClose([[0., 0.], [0., 5.]], operator.to_dense().eval())
self.assertFalse(operator.is_positive_definite)
self.assertFalse(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
rng = np.random.RandomState(2016)
class LinearOperatorZerosTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
@staticmethod
def tests_to_skip():
return [
"cholesky", "log_abs_det", "inverse", "solve", "solve_with_broadcast"]
@staticmethod
def operator_shapes_infos():
shapes_info = linear_operator_test_util.OperatorShapesInfo
return [
shapes_info((1, 1)),
shapes_info((1, 3, 3)),
shapes_info((3, 4, 4)),
shapes_info((2, 1, 4, 4))]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
del ensure_self_adjoint_and_pd
del use_placeholder
shape = list(build_info.shape)
assert shape[-1] == shape[-2]
batch_shape = shape[:-2]
num_rows = shape[-1]
operator = linalg_lib.LinearOperatorZeros(
num_rows, batch_shape=batch_shape, dtype=dtype)
matrix = array_ops.zeros(shape=shape, dtype=dtype)
return operator, matrix
def test_assert_positive_definite(self):
operator = linalg_lib.LinearOperatorZeros(num_rows=2)
with self.assertRaisesOpError("non-positive definite"):
operator.assert_positive_definite()
def test_assert_non_singular(self):
with self.assertRaisesOpError("non-invertible"):
operator = linalg_lib.LinearOperatorZeros(num_rows=2)
operator.assert_non_singular()
@test_util.run_deprecated_v1
def test_assert_self_adjoint(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorZeros(num_rows=2)
operator.assert_self_adjoint().run() # Should not fail
def test_non_scalar_num_rows_raises_static(self):
with self.assertRaisesRegexp(ValueError, "must be a 0-D Tensor"):
linalg_lib.LinearOperatorZeros(num_rows=[2])
with self.assertRaisesRegexp(ValueError, "must be a 0-D Tensor"):
linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=[2])
def test_non_integer_num_rows_raises_static(self):
with self.assertRaisesRegexp(TypeError, "must be integer"):
linalg_lib.LinearOperatorZeros(num_rows=2.)
with self.assertRaisesRegexp(TypeError, "must be integer"):
linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=2.)
def test_negative_num_rows_raises_static(self):
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
linalg_lib.LinearOperatorZeros(num_rows=-2)
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=-2)
def test_non_1d_batch_shape_raises_static(self):
with self.assertRaisesRegexp(ValueError, "must be a 1-D"):
linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=2)
def test_non_integer_batch_shape_raises_static(self):
with self.assertRaisesRegexp(TypeError, "must be integer"):
linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=[2.])
def test_negative_batch_shape_raises_static(self):
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=[-2])
@test_util.run_deprecated_v1
def test_non_scalar_num_rows_raises_dynamic(self):
with self.cached_session():
num_rows = array_ops.placeholder(dtypes.int32)
operator = linalg_lib.LinearOperatorZeros(
num_rows, assert_proper_shapes=True)
with self.assertRaisesOpError("must be a 0-D Tensor"):
operator.to_dense().eval(feed_dict={num_rows: [2]})
@test_util.run_deprecated_v1
def test_negative_num_rows_raises_dynamic(self):
with self.cached_session():
n = array_ops.placeholder(dtypes.int32)
operator = linalg_lib.LinearOperatorZeros(
num_rows=n, assert_proper_shapes=True)
with self.assertRaisesOpError("must be non-negative"):
operator.to_dense().eval(feed_dict={n: -2})
operator = linalg_lib.LinearOperatorZeros(
num_rows=2, num_columns=n, assert_proper_shapes=True)
with self.assertRaisesOpError("must be non-negative"):
operator.to_dense().eval(feed_dict={n: -2})
@test_util.run_deprecated_v1
def test_non_1d_batch_shape_raises_dynamic(self):
with self.cached_session():
batch_shape = array_ops.placeholder(dtypes.int32)
operator = linalg_lib.LinearOperatorZeros(
num_rows=2, batch_shape=batch_shape, assert_proper_shapes=True)
with self.assertRaisesOpError("must be a 1-D"):
operator.to_dense().eval(feed_dict={batch_shape: 2})
@test_util.run_deprecated_v1
def test_negative_batch_shape_raises_dynamic(self):
with self.cached_session():
batch_shape = array_ops.placeholder(dtypes.int32)
operator = linalg_lib.LinearOperatorZeros(
num_rows=2, batch_shape=batch_shape, assert_proper_shapes=True)
with self.assertRaisesOpError("must be non-negative"):
operator.to_dense().eval(feed_dict={batch_shape: [-2]})
def test_wrong_matrix_dimensions_raises_static(self):
operator = linalg_lib.LinearOperatorZeros(num_rows=2)
x = rng.randn(3, 3).astype(np.float32)
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
operator.matmul(x)
@test_util.run_deprecated_v1
def test_wrong_matrix_dimensions_raises_dynamic(self):
num_rows = array_ops.placeholder(dtypes.int32)
x = array_ops.placeholder(dtypes.float32)
with self.cached_session():
operator = linalg_lib.LinearOperatorZeros(
num_rows, assert_proper_shapes=True)
y = operator.matmul(x)
with self.assertRaisesOpError("Incompatible.*dimensions"):
y.eval(feed_dict={num_rows: 2, x: rng.rand(3, 3)})
def test_is_x_flags(self):
# The is_x flags are by default all True.
operator = linalg_lib.LinearOperatorZeros(num_rows=2)
self.assertFalse(operator.is_positive_definite)
self.assertFalse(operator.is_non_singular)
self.assertTrue(operator.is_self_adjoint)
def test_zeros_matmul(self):
operator1 = linalg_lib.LinearOperatorIdentity(num_rows=2)
operator2 = linalg_lib.LinearOperatorZeros(num_rows=2)
self.assertTrue(isinstance(
operator1.matmul(operator2),
linalg_lib.LinearOperatorZeros))
self.assertTrue(isinstance(
operator2.matmul(operator1),
linalg_lib.LinearOperatorZeros))
class LinearOperatorZerosNotSquareTest(
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
def operator_and_matrix(self, build_info, dtype, use_placeholder):
del use_placeholder
shape = list(build_info.shape)
batch_shape = shape[:-2]
num_rows = shape[-2]
num_columns = shape[-1]
operator = linalg_lib.LinearOperatorZeros(
num_rows, num_columns, is_square=False, is_self_adjoint=False,
batch_shape=batch_shape, dtype=dtype)
matrix = array_ops.zeros(shape=shape, dtype=dtype)
return operator, matrix
if __name__ == "__main__":
linear_operator_test_util.add_tests(LinearOperatorZerosTest)
linear_operator_test_util.add_tests(LinearOperatorZerosNotSquareTest)
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_zeros_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
import scipy.linalg
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.ops.linalg import linear_operator_toeplitz
from tensorflow.python.platform import test
linalg = linalg_lib
_to_complex = linear_operator_toeplitz._to_complex
class LinearOperatorToeplitzTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""We overwrite the FFT operation mapping for testing."""
with test.TestCase._constrain_devices_and_set_default(
self, sess, use_gpu, force_gpu) as sess:
with spectral_ops_test_util.fft_kernel_label_map():
yield sess
def setUp(self):
# TODO(srvasude): Lower these tolerances once specialized solve and
# determinants are implemented.
self._atol[dtypes.float32] = 1e-3
self._rtol[dtypes.float32] = 1e-3
self._atol[dtypes.float64] = 1e-10
self._rtol[dtypes.float64] = 1e-10
self._atol[dtypes.complex64] = 1e-3
self._rtol[dtypes.complex64] = 1e-3
self._atol[dtypes.complex128] = 1e-10
self._rtol[dtypes.complex128] = 1e-10
@staticmethod
def tests_to_skip():
# Skip solve tests, as these could have better stability
# (currently exercises the base class).
# TODO(srvasude): Enable these when solve is implemented.
return ["cholesky", "inverse", "solve", "solve_with_broadcast"]
@staticmethod
def operator_shapes_infos():
shape_info = linear_operator_test_util.OperatorShapesInfo
# non-batch operators (n, n) and batch operators.
return [
shape_info((1, 1)),
shape_info((1, 6, 6)),
shape_info((3, 4, 4)),
shape_info((2, 1, 3, 3))
]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
row = np.random.uniform(low=1., high=5., size=shape[:-1])
col = np.random.uniform(low=1., high=5., size=shape[:-1])
# Make sure first entry is the same
row[..., 0] = col[..., 0]
if ensure_self_adjoint_and_pd:
# Note that a Toeplitz matrix generated from a linearly decreasing
# non-negative sequence is positive definite. See
# https://www.math.cinvestav.mx/~grudsky/Papers/118_29062012_Albrecht.pdf
# for details.
row = np.linspace(start=10., stop=1., num=shape[-1])
# The entries for the first row and column should be the same to guarantee
# symmetric.
row = col
lin_op_row = math_ops.cast(row, dtype=dtype)
lin_op_col = math_ops.cast(col, dtype=dtype)
if use_placeholder:
lin_op_row = array_ops.placeholder_with_default(
lin_op_row, shape=None)
lin_op_col = array_ops.placeholder_with_default(
lin_op_col, shape=None)
operator = linear_operator_toeplitz.LinearOperatorToeplitz(
row=lin_op_row,
col=lin_op_col,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
flattened_row = np.reshape(row, (-1, shape[-1]))
flattened_col = np.reshape(col, (-1, shape[-1]))
flattened_toeplitz = np.zeros(
[flattened_row.shape[0], shape[-1], shape[-1]])
for i in range(flattened_row.shape[0]):
flattened_toeplitz[i] = scipy.linalg.toeplitz(
flattened_col[i],
flattened_row[i])
matrix = np.reshape(flattened_toeplitz, shape)
matrix = math_ops.cast(matrix, dtype=dtype)
return operator, matrix
def test_scalar_row_col_raises(self):
with self.assertRaisesRegexp(ValueError, "must have at least 1 dimension"):
linear_operator_toeplitz.LinearOperatorToeplitz(1., 1.)
with self.assertRaisesRegexp(ValueError, "must have at least 1 dimension"):
linear_operator_toeplitz.LinearOperatorToeplitz([1.], 1.)
with self.assertRaisesRegexp(ValueError, "must have at least 1 dimension"):
linear_operator_toeplitz.LinearOperatorToeplitz(1., [1.])
if __name__ == "__main__":
linear_operator_test_util.add_tests(LinearOperatorToeplitzTest)
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_toeplitz_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(0)
class BaseLinearOperatorLowRankUpdatetest(object):
"""Base test for this type of operator."""
# Subclasses should set these attributes to either True or False.
# If True, A = L + UDV^H
# If False, A = L + UV^H or A = L + UU^H, depending on _use_v.
_use_diag_update = None
# If True, diag is > 0, which means D is symmetric positive definite.
_is_diag_update_positive = None
# If True, A = L + UDV^H
# If False, A = L + UDU^H or A = L + UU^H, depending on _use_diag_update
_use_v = None
@staticmethod
def operator_shapes_infos():
shape_info = linear_operator_test_util.OperatorShapesInfo
# Previously we had a (2, 10, 10) shape at the end. We did this to test the
# inversion and determinant lemmas on not-tiny matrices, since these are
# known to have stability issues. This resulted in test timeouts, so this
# shape has been removed, but rest assured, the tests did pass.
return [
shape_info((0, 0)),
shape_info((1, 1)),
shape_info((1, 3, 3)),
shape_info((3, 4, 4)),
shape_info((2, 1, 4, 4))]
def _gen_positive_diag(self, dtype, diag_shape):
if dtype.is_complex:
diag = linear_operator_test_util.random_uniform(
diag_shape, minval=1e-4, maxval=1., dtype=dtypes.float32)
return math_ops.cast(diag, dtype=dtype)
return linear_operator_test_util.random_uniform(
diag_shape, minval=1e-4, maxval=1., dtype=dtype)
def operator_and_matrix(self, shape_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
# Recall A = L + UDV^H
shape = list(shape_info.shape)
diag_shape = shape[:-1]
k = shape[-2] // 2 + 1
u_perturbation_shape = shape[:-1] + [k]
diag_update_shape = shape[:-2] + [k]
# base_operator L will be a symmetric positive definite diagonal linear
# operator, with condition number as high as 1e4.
base_diag = self._gen_positive_diag(dtype, diag_shape)
lin_op_base_diag = base_diag
# U
u = linear_operator_test_util.random_normal_correlated_columns(
u_perturbation_shape, dtype=dtype)
lin_op_u = u
# V
v = linear_operator_test_util.random_normal_correlated_columns(
u_perturbation_shape, dtype=dtype)
lin_op_v = v
# D
if self._is_diag_update_positive or ensure_self_adjoint_and_pd:
diag_update = self._gen_positive_diag(dtype, diag_update_shape)
else:
diag_update = linear_operator_test_util.random_normal(
diag_update_shape, stddev=1e-4, dtype=dtype)
lin_op_diag_update = diag_update
if use_placeholder:
lin_op_base_diag = array_ops.placeholder_with_default(
base_diag, shape=None)
lin_op_u = array_ops.placeholder_with_default(u, shape=None)
lin_op_v = array_ops.placeholder_with_default(v, shape=None)
lin_op_diag_update = array_ops.placeholder_with_default(
diag_update, shape=None)
base_operator = linalg.LinearOperatorDiag(
lin_op_base_diag,
is_positive_definite=True,
is_self_adjoint=True)
operator = linalg.LinearOperatorLowRankUpdate(
base_operator,
lin_op_u,
v=lin_op_v if self._use_v else None,
diag_update=lin_op_diag_update if self._use_diag_update else None,
is_diag_update_positive=self._is_diag_update_positive)
# The matrix representing L
base_diag_mat = array_ops.matrix_diag(base_diag)
# The matrix representing D
diag_update_mat = array_ops.matrix_diag(diag_update)
# Set up mat as some variant of A = L + UDV^H
if self._use_v and self._use_diag_update:
# In this case, we have L + UDV^H and it isn't symmetric.
expect_use_cholesky = False
matrix = base_diag_mat + math_ops.matmul(
u, math_ops.matmul(diag_update_mat, v, adjoint_b=True))
elif self._use_v:
# In this case, we have L + UDV^H and it isn't symmetric.
expect_use_cholesky = False
matrix = base_diag_mat + math_ops.matmul(u, v, adjoint_b=True)
elif self._use_diag_update:
# In this case, we have L + UDU^H, which is PD if D > 0, since L > 0.
expect_use_cholesky = self._is_diag_update_positive
matrix = base_diag_mat + math_ops.matmul(
u, math_ops.matmul(diag_update_mat, u, adjoint_b=True))
else:
# In this case, we have L + UU^H, which is PD since L > 0.
expect_use_cholesky = True
matrix = base_diag_mat + math_ops.matmul(u, u, adjoint_b=True)
if expect_use_cholesky:
self.assertTrue(operator._use_cholesky)
else:
self.assertFalse(operator._use_cholesky)
return operator, matrix
class LinearOperatorLowRankUpdatetestWithDiagUseCholesky(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UDU^H, D > 0, L > 0 ==> A > 0 and we can use a Cholesky."""
_use_diag_update = True
_is_diag_update_positive = True
_use_v = False
def setUp(self):
# Decrease tolerance since we are testing with condition numbers as high as
# 1e4.
self._atol[dtypes.float32] = 1e-5
self._rtol[dtypes.float32] = 1e-5
self._atol[dtypes.float64] = 1e-10
self._rtol[dtypes.float64] = 1e-10
self._rtol[dtypes.complex64] = 1e-4
class LinearOperatorLowRankUpdatetestWithDiagCannotUseCholesky(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UDU^H, D !> 0, L > 0 ==> A !> 0 and we cannot use a Cholesky."""
@staticmethod
def tests_to_skip():
return ["cholesky"]
_use_diag_update = True
_is_diag_update_positive = False
_use_v = False
def setUp(self):
# Decrease tolerance since we are testing with condition numbers as high as
# 1e4. This class does not use Cholesky, and thus needs even looser
# tolerance.
self._atol[dtypes.float32] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._atol[dtypes.float64] = 1e-9
self._rtol[dtypes.float64] = 1e-9
self._rtol[dtypes.complex64] = 2e-4
class LinearOperatorLowRankUpdatetestNoDiagUseCholesky(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UU^H, L > 0 ==> A > 0 and we can use a Cholesky."""
_use_diag_update = False
_is_diag_update_positive = None
_use_v = False
def setUp(self):
# Decrease tolerance since we are testing with condition numbers as high as
# 1e4.
self._atol[dtypes.float32] = 1e-5
self._rtol[dtypes.float32] = 1e-5
self._atol[dtypes.float64] = 1e-10
self._rtol[dtypes.float64] = 1e-10
self._rtol[dtypes.complex64] = 1e-4
class LinearOperatorLowRankUpdatetestNoDiagCannotUseCholesky(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UV^H, L > 0 ==> A is not symmetric and we cannot use a Cholesky."""
@staticmethod
def tests_to_skip():
return ["cholesky"]
_use_diag_update = False
_is_diag_update_positive = None
_use_v = True
def setUp(self):
# Decrease tolerance since we are testing with condition numbers as high as
# 1e4. This class does not use Cholesky, and thus needs even looser
# tolerance.
self._atol[dtypes.float32] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._atol[dtypes.float64] = 1e-9
self._rtol[dtypes.float64] = 1e-9
self._rtol[dtypes.complex64] = 2e-4
class LinearOperatorLowRankUpdatetestWithDiagNotSquare(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""A = L + UDU^H, D > 0, L > 0 ==> A > 0 and we can use a Cholesky."""
_use_diag_update = True
_is_diag_update_positive = True
_use_v = True
class LinearOpearatorLowRankUpdateBroadcastsShape(test.TestCase):
"""Test that the operator's shape is the broadcast of arguments."""
def test_static_shape_broadcasts_up_from_operator_to_other_args(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3)
u = array_ops.ones(shape=[2, 3, 2])
diag = array_ops.ones(shape=[2, 2])
operator = linalg.LinearOperatorLowRankUpdate(base_operator, u, diag)
# domain_dimension is 3
self.assertAllEqual([2, 3, 3], operator.shape)
self.assertAllEqual([2, 3, 3], self.evaluate(operator.to_dense()).shape)
@test_util.run_deprecated_v1
def test_dynamic_shape_broadcasts_up_from_operator_to_other_args(self):
num_rows_ph = array_ops.placeholder(dtypes.int32)
base_operator = linalg.LinearOperatorIdentity(num_rows=num_rows_ph)
u_shape_ph = array_ops.placeholder(dtypes.int32)
u = array_ops.ones(shape=u_shape_ph)
operator = linalg.LinearOperatorLowRankUpdate(base_operator, u)
feed_dict = {
num_rows_ph: 3,
u_shape_ph: [2, 3, 2], # batch_shape = [2]
}
with self.cached_session():
shape_tensor = operator.shape_tensor().eval(feed_dict=feed_dict)
self.assertAllEqual([2, 3, 3], shape_tensor)
dense = operator.to_dense().eval(feed_dict=feed_dict)
self.assertAllEqual([2, 3, 3], dense.shape)
def test_u_and_v_incompatible_batch_shape_raises(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
u = rng.rand(5, 3, 2)
v = rng.rand(4, 3, 2)
with self.assertRaisesRegexp(ValueError, "Incompatible shapes"):
linalg.LinearOperatorLowRankUpdate(base_operator, u=u, v=v)
def test_u_and_base_operator_incompatible_batch_shape_raises(self):
base_operator = linalg.LinearOperatorIdentity(
num_rows=3, batch_shape=[4], dtype=np.float64)
u = rng.rand(5, 3, 2)
with self.assertRaisesRegexp(ValueError, "Incompatible shapes"):
linalg.LinearOperatorLowRankUpdate(base_operator, u=u)
def test_u_and_base_operator_incompatible_domain_dimension(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
u = rng.rand(5, 4, 2)
with self.assertRaisesRegexp(ValueError, "not compatible"):
linalg.LinearOperatorLowRankUpdate(base_operator, u=u)
def test_u_and_diag_incompatible_low_rank_raises(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
u = rng.rand(5, 3, 2)
diag = rng.rand(5, 4) # Last dimension should be 2
with self.assertRaisesRegexp(ValueError, "not compatible"):
linalg.LinearOperatorLowRankUpdate(base_operator, u=u, diag_update=diag)
def test_diag_incompatible_batch_shape_raises(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
u = rng.rand(5, 3, 2)
diag = rng.rand(4, 2) # First dimension should be 5
with self.assertRaisesRegexp(ValueError, "Incompatible shapes"):
linalg.LinearOperatorLowRankUpdate(base_operator, u=u, diag_update=diag)
if __name__ == "__main__":
linear_operator_test_util.add_tests(
LinearOperatorLowRankUpdatetestWithDiagUseCholesky)
linear_operator_test_util.add_tests(
LinearOperatorLowRankUpdatetestWithDiagCannotUseCholesky)
linear_operator_test_util.add_tests(
LinearOperatorLowRankUpdatetestNoDiagUseCholesky)
linear_operator_test_util.add_tests(
LinearOperatorLowRankUpdatetestNoDiagCannotUseCholesky)
linear_operator_test_util.add_tests(
LinearOperatorLowRankUpdatetestWithDiagNotSquare)
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py
|
# =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for proto ops reading descriptors from other sources."""
# Python3 preparedness imports.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.kernel_tests.proto import descriptor_source_test_base as test_base
from tensorflow.python.ops import proto_ops
from tensorflow.python.platform import test
class DescriptorSourceTest(test_base.DescriptorSourceTestBase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
super(DescriptorSourceTest, self).__init__(decode_module=proto_ops,
encode_module=proto_ops,
methodName=methodName)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/proto/descriptor_source_test.py
|
# =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for decode_proto op."""
# Python3 preparedness imports.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.kernel_tests.proto import proto_op_test_base as test_base
from tensorflow.python.kernel_tests.proto import test_example_pb2
class DecodeProtoOpTestBase(test_base.ProtoOpTestBase, parameterized.TestCase):
"""Base class for testing proto decoding ops."""
def __init__(self, decode_module, methodName='runTest'): # pylint: disable=invalid-name
"""DecodeProtoOpTestBase initializer.
Args:
decode_module: a module containing the `decode_proto_op` method
methodName: the name of the test method (same as for test.TestCase)
"""
super(DecodeProtoOpTestBase, self).__init__(methodName)
self._decode_module = decode_module
def _compareValues(self, fd, vs, evs):
"""Compare lists/arrays of field values."""
if len(vs) != len(evs):
self.fail('Field %s decoded %d outputs, expected %d' %
(fd.name, len(vs), len(evs)))
for i, ev in enumerate(evs):
# Special case fuzzy match for float32. TensorFlow seems to mess with
# MAX_FLT slightly and the test doesn't work otherwise.
# TODO(nix): ask on TF list about why MAX_FLT doesn't pass through.
if fd.cpp_type == fd.CPPTYPE_FLOAT:
# Numpy isclose() is better than assertIsClose() which uses an absolute
# value comparison.
self.assertTrue(
np.isclose(vs[i], ev), 'expected %r, actual %r' % (ev, vs[i]))
elif fd.cpp_type == fd.CPPTYPE_STRING:
# In Python3 string tensor values will be represented as bytes, so we
# reencode the proto values to match that.
self.assertEqual(vs[i], ev.encode('ascii'))
else:
# Doubles and other types pass through unscathed.
self.assertEqual(vs[i], ev)
def _compareProtos(self, batch_shape, sizes, fields, field_dict):
"""Compare protos of type TestValue.
Args:
batch_shape: the shape of the input tensor of serialized messages.
sizes: int matrix of repeat counts returned by decode_proto
fields: list of test_example_pb2.FieldSpec (types and expected values)
field_dict: map from field names to decoded numpy tensors of values
"""
# Check that expected values match.
for field in fields:
values = field_dict[field.name]
self.assertEqual(dtypes.as_dtype(values.dtype), field.dtype)
if 'ext_value' in field.name:
fd = test_example_pb2.PrimitiveValue()
else:
fd = field.value.DESCRIPTOR.fields_by_name[field.name]
# Values has the same shape as the input plus an extra
# dimension for repeats.
self.assertEqual(list(values.shape)[:-1], batch_shape)
# Nested messages are represented as TF strings, requiring
# some special handling.
if field.name == 'message_value' or 'ext_value' in field.name:
vs = []
for buf in values.flat:
msg = test_example_pb2.PrimitiveValue()
msg.ParseFromString(buf)
vs.append(msg)
if 'ext_value' in field.name:
evs = field.value.Extensions[test_example_pb2.ext_value]
else:
evs = getattr(field.value, field.name)
if len(vs) != len(evs):
self.fail('Field %s decoded %d outputs, expected %d' %
(fd.name, len(vs), len(evs)))
for v, ev in zip(vs, evs):
self.assertEqual(v, ev)
continue
tf_type_to_primitive_value_field = {
dtypes.bool:
'bool_value',
dtypes.float32:
'float_value',
dtypes.float64:
'double_value',
dtypes.int8:
'int8_value',
dtypes.int32:
'int32_value',
dtypes.int64:
'int64_value',
dtypes.string:
'string_value',
dtypes.uint8:
'uint8_value',
dtypes.uint32:
'uint32_value',
dtypes.uint64:
'uint64_value',
}
tf_field_name = tf_type_to_primitive_value_field.get(field.dtype)
if tf_field_name is None:
self.fail('Unhandled tensorflow type %d' % field.dtype)
self._compareValues(fd, values.flat,
getattr(field.value, tf_field_name))
def _runDecodeProtoTests(self, fields, case_sizes, batch_shape, batch,
message_type, message_format, sanitize,
force_disordered=False):
"""Run decode tests on a batch of messages.
Args:
fields: list of test_example_pb2.FieldSpec (types and expected values)
case_sizes: expected sizes array
batch_shape: the shape of the input tensor of serialized messages
batch: list of serialized messages
message_type: descriptor name for messages
message_format: format of messages, 'text' or 'binary'
sanitize: whether to sanitize binary protobuf inputs
force_disordered: whether to force fields encoded out of order.
"""
if force_disordered:
# Exercise code path that handles out-of-order fields by prepending extra
# fields with tag numbers higher than any real field. Note that this won't
# work with sanitization because that forces reserialization using a
# trusted decoder and encoder.
assert not sanitize
extra_fields = test_example_pb2.ExtraFields()
extra_fields.string_value = 'IGNORE ME'
extra_fields.bool_value = False
extra_msg = extra_fields.SerializeToString()
batch = [extra_msg + msg for msg in batch]
# Numpy silently truncates the strings if you don't specify dtype=object.
batch = np.array(batch, dtype=object)
batch = np.reshape(batch, batch_shape)
field_names = [f.name for f in fields]
output_types = [f.dtype for f in fields]
with self.cached_session() as sess:
sizes, vtensor = self._decode_module.decode_proto(
batch,
message_type=message_type,
field_names=field_names,
output_types=output_types,
message_format=message_format,
sanitize=sanitize)
vlist = sess.run([sizes] + vtensor)
sizes = vlist[0]
# Values is a list of tensors, one for each field.
value_tensors = vlist[1:]
# Check that the repeat sizes are correct.
self.assertTrue(
np.all(np.array(sizes.shape) == batch_shape + [len(field_names)]))
# Check that the decoded sizes match the expected sizes.
self.assertEqual(len(sizes.flat), len(case_sizes))
self.assertTrue(
np.all(sizes.flat == np.array(
case_sizes, dtype=np.int32)))
field_dict = dict(zip(field_names, value_tensors))
self._compareProtos(batch_shape, sizes, fields, field_dict)
@parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
def testBinary(self, case):
batch = [value.SerializeToString() for value in case.values]
self._runDecodeProtoTests(
case.fields,
case.sizes,
list(case.shapes),
batch,
'tensorflow.contrib.proto.TestValue',
'binary',
sanitize=False)
@parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
def testBinaryDisordered(self, case):
batch = [value.SerializeToString() for value in case.values]
self._runDecodeProtoTests(
case.fields,
case.sizes,
list(case.shapes),
batch,
'tensorflow.contrib.proto.TestValue',
'binary',
sanitize=False,
force_disordered=True)
@parameterized.named_parameters(
*test_base.ProtoOpTestBase.named_parameters(extension=False))
def testPacked(self, case):
# Now try with the packed serialization.
#
# We test the packed representations by loading the same test case using
# PackedTestValue instead of TestValue. To do this we rely on the text
# format being the same for packed and unpacked fields, and reparse the
# test message using the packed version of the proto.
packed_batch = [
# Note: float_format='.17g' is necessary to ensure preservation of
# doubles and floats in text format.
text_format.Parse(
text_format.MessageToString(value, float_format='.17g'),
test_example_pb2.PackedTestValue()).SerializeToString()
for value in case.values
]
self._runDecodeProtoTests(
case.fields,
case.sizes,
list(case.shapes),
packed_batch,
'tensorflow.contrib.proto.PackedTestValue',
'binary',
sanitize=False)
@parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
def testText(self, case):
# Note: float_format='.17g' is necessary to ensure preservation of
# doubles and floats in text format.
text_batch = [
text_format.MessageToString(
value, float_format='.17g') for value in case.values
]
self._runDecodeProtoTests(
case.fields,
case.sizes,
list(case.shapes),
text_batch,
'tensorflow.contrib.proto.TestValue',
'text',
sanitize=False)
@parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
def testSanitizerGood(self, case):
batch = [value.SerializeToString() for value in case.values]
self._runDecodeProtoTests(
case.fields,
case.sizes,
list(case.shapes),
batch,
'tensorflow.contrib.proto.TestValue',
'binary',
sanitize=True)
@parameterized.parameters((False), (True))
def testCorruptProtobuf(self, sanitize):
corrupt_proto = 'This is not a binary protobuf'
# Numpy silently truncates the strings if you don't specify dtype=object.
batch = np.array(corrupt_proto, dtype=object)
msg_type = 'tensorflow.contrib.proto.TestCase'
field_names = ['sizes']
field_types = [dtypes.int32]
with self.assertRaisesRegexp(
errors.DataLossError, 'Unable to parse binary protobuf'
'|Failed to consume entire buffer'):
self.evaluate(
self._decode_module.decode_proto(
batch,
message_type=msg_type,
field_names=field_names,
output_types=field_types,
sanitize=sanitize))
|
tensorflow-master
|
tensorflow/python/kernel_tests/proto/decode_proto_op_test_base.py
|
# =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for decode_proto op."""
# Python3 preparedness imports.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.kernel_tests.proto import decode_proto_op_test_base as test_base
from tensorflow.python.ops import proto_ops as proto_ops
from tensorflow.python.platform import test
class DecodeProtoOpTest(test_base.DecodeProtoOpTestBase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
super(DecodeProtoOpTest, self).__init__(proto_ops, methodName)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/proto/decode_proto_op_test.py
|
# =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for proto ops reading descriptors from other sources."""
# Python3 preparedness imports.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from google.protobuf.descriptor_pb2 import FieldDescriptorProto
from google.protobuf.descriptor_pb2 import FileDescriptorSet
from tensorflow.python.framework import dtypes
from tensorflow.python.kernel_tests.proto import proto_op_test_base as test_base
from tensorflow.python.platform import test
class DescriptorSourceTestBase(test.TestCase):
"""Base class for testing descriptor sources."""
def __init__(self, decode_module, encode_module, methodName='runTest'): # pylint: disable=invalid-name
"""DescriptorSourceTestBase initializer.
Args:
decode_module: a module containing the `decode_proto_op` method
encode_module: a module containing the `encode_proto_op` method
methodName: the name of the test method (same as for test.TestCase)
"""
super(DescriptorSourceTestBase, self).__init__(methodName)
self._decode_module = decode_module
self._encode_module = encode_module
# NOTE: We generate the descriptor programmatically instead of via a compiler
# because of differences between different versions of the compiler.
#
# The generated descriptor should capture the subset of `test_example.proto`
# used in `test_base.simple_test_case()`.
def _createDescriptorProto(self):
proto = FileDescriptorSet()
file_proto = proto.file.add(
name='types.proto', package='tensorflow', syntax='proto3')
enum_proto = file_proto.enum_type.add(name='DataType')
enum_proto.value.add(name='DT_DOUBLE', number=0)
enum_proto.value.add(name='DT_BOOL', number=1)
file_proto = proto.file.add(
name='test_example.proto',
package='tensorflow.contrib.proto',
dependency=['types.proto'])
message_proto = file_proto.message_type.add(name='TestCase')
message_proto.field.add(
name='values',
number=1,
type=FieldDescriptorProto.TYPE_MESSAGE,
type_name='.tensorflow.contrib.proto.TestValue',
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto.field.add(
name='shapes',
number=2,
type=FieldDescriptorProto.TYPE_INT32,
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto.field.add(
name='sizes',
number=3,
type=FieldDescriptorProto.TYPE_INT32,
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto.field.add(
name='fields',
number=4,
type=FieldDescriptorProto.TYPE_MESSAGE,
type_name='.tensorflow.contrib.proto.FieldSpec',
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto = file_proto.message_type.add(
name='TestValue')
message_proto.field.add(
name='double_value',
number=1,
type=FieldDescriptorProto.TYPE_DOUBLE,
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto.field.add(
name='bool_value',
number=2,
type=FieldDescriptorProto.TYPE_BOOL,
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto = file_proto.message_type.add(
name='FieldSpec')
message_proto.field.add(
name='name',
number=1,
type=FieldDescriptorProto.TYPE_STRING,
label=FieldDescriptorProto.LABEL_OPTIONAL)
message_proto.field.add(
name='dtype',
number=2,
type=FieldDescriptorProto.TYPE_ENUM,
type_name='.tensorflow.DataType',
label=FieldDescriptorProto.LABEL_OPTIONAL)
message_proto.field.add(
name='value',
number=3,
type=FieldDescriptorProto.TYPE_MESSAGE,
type_name='.tensorflow.contrib.proto.TestValue',
label=FieldDescriptorProto.LABEL_OPTIONAL)
return proto
def _writeProtoToFile(self, proto):
fn = os.path.join(self.get_temp_dir(), 'descriptor.pb')
with open(fn, 'wb') as f:
f.write(proto.SerializeToString())
return fn
def _testRoundtrip(self, descriptor_source):
# Numpy silently truncates the strings if you don't specify dtype=object.
in_bufs = np.array(
[test_base.ProtoOpTestBase.simple_test_case().SerializeToString()],
dtype=object)
message_type = 'tensorflow.contrib.proto.TestCase'
field_names = ['values', 'shapes', 'sizes', 'fields']
tensor_types = [dtypes.string, dtypes.int32, dtypes.int32, dtypes.string]
with self.cached_session() as sess:
sizes, field_tensors = self._decode_module.decode_proto(
in_bufs,
message_type=message_type,
field_names=field_names,
output_types=tensor_types,
descriptor_source=descriptor_source)
out_tensors = self._encode_module.encode_proto(
sizes,
field_tensors,
message_type=message_type,
field_names=field_names,
descriptor_source=descriptor_source)
out_bufs, = sess.run([out_tensors])
# Check that the re-encoded tensor has the same shape.
self.assertEqual(in_bufs.shape, out_bufs.shape)
# Compare the input and output.
for in_buf, out_buf in zip(in_bufs.flat, out_bufs.flat):
# Check that the input and output serialized messages are identical.
# If we fail here, there is a difference in the serialized
# representation but the new serialization still parses. This could
# be harmless (a change in map ordering?) or it could be bad (e.g.
# loss of packing in the encoding).
self.assertEqual(in_buf, out_buf)
def testWithFileDescriptorSet(self):
# First try parsing with a local proto db, which should fail.
with self.assertRaisesOpError('No descriptor found for message type'):
self._testRoundtrip(b'local://')
# Now try parsing with a FileDescriptorSet which contains the test proto.
proto = self._createDescriptorProto()
proto_file = self._writeProtoToFile(proto)
self._testRoundtrip(proto_file)
# Finally, try parsing the descriptor as a serialized string.
self._testRoundtrip(b'bytes://' + proto.SerializeToString())
|
tensorflow-master
|
tensorflow/python/kernel_tests/proto/descriptor_source_test_base.py
|
# =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test case base for testing proto operations."""
# Python3 preparedness imports.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes as ct
import os
from tensorflow.core.framework import types_pb2
from tensorflow.python.kernel_tests.proto import test_example_pb2
from tensorflow.python.platform import test
class ProtoOpTestBase(test.TestCase):
"""Base class for testing proto decoding and encoding ops."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(ProtoOpTestBase, self).__init__(methodName)
lib = os.path.join(os.path.dirname(__file__), "libtestexample.so")
if os.path.isfile(lib):
ct.cdll.LoadLibrary(lib)
@staticmethod
def named_parameters(extension=True):
parameters = [("defaults", ProtoOpTestBase.defaults_test_case()),
("minmax", ProtoOpTestBase.minmax_test_case()),
("nested", ProtoOpTestBase.nested_test_case()),
("optional", ProtoOpTestBase.optional_test_case()),
("promote", ProtoOpTestBase.promote_test_case()),
("ragged", ProtoOpTestBase.ragged_test_case()),
("shaped_batch", ProtoOpTestBase.shaped_batch_test_case()),
("simple", ProtoOpTestBase.simple_test_case())]
if extension:
parameters.append(("extension", ProtoOpTestBase.extension_test_case()))
return parameters
@staticmethod
def defaults_test_case():
test_case = test_example_pb2.TestCase()
test_case.values.add() # No fields specified, so we get all defaults.
test_case.shapes.append(1)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "double_value_with_default"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(1.0)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "float_value_with_default"
field.dtype = types_pb2.DT_FLOAT
field.value.float_value.append(2.0)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "int64_value_with_default"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(3)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "sfixed64_value_with_default"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(11)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "sint64_value_with_default"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(13)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "uint64_value_with_default"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(4)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "fixed64_value_with_default"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(6)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "int32_value_with_default"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(5)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "sfixed32_value_with_default"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(10)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "sint32_value_with_default"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(12)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "uint32_value_with_default"
field.dtype = types_pb2.DT_UINT32
field.value.uint32_value.append(9)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "fixed32_value_with_default"
field.dtype = types_pb2.DT_UINT32
field.value.uint32_value.append(7)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "bool_value_with_default"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(True)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "string_value_with_default"
field.dtype = types_pb2.DT_STRING
field.value.string_value.append("a")
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "bytes_value_with_default"
field.dtype = types_pb2.DT_STRING
field.value.string_value.append("a longer default string")
return test_case
@staticmethod
def minmax_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.double_value.append(-1.7976931348623158e+308)
value.double_value.append(2.2250738585072014e-308)
value.double_value.append(1.7976931348623158e+308)
value.float_value.append(-3.402823466e+38)
value.float_value.append(1.175494351e-38)
value.float_value.append(3.402823466e+38)
value.int64_value.append(-9223372036854775808)
value.int64_value.append(9223372036854775807)
value.sfixed64_value.append(-9223372036854775808)
value.sfixed64_value.append(9223372036854775807)
value.sint64_value.append(-9223372036854775808)
value.sint64_value.append(9223372036854775807)
value.uint64_value.append(0)
value.uint64_value.append(18446744073709551615)
value.fixed64_value.append(0)
value.fixed64_value.append(18446744073709551615)
value.int32_value.append(-2147483648)
value.int32_value.append(2147483647)
value.sfixed32_value.append(-2147483648)
value.sfixed32_value.append(2147483647)
value.sint32_value.append(-2147483648)
value.sint32_value.append(2147483647)
value.uint32_value.append(0)
value.uint32_value.append(4294967295)
value.fixed32_value.append(0)
value.fixed32_value.append(4294967295)
value.bool_value.append(False)
value.bool_value.append(True)
value.string_value.append("")
value.string_value.append("I refer to the infinite.")
test_case.shapes.append(1)
test_case.sizes.append(3)
field = test_case.fields.add()
field.name = "double_value"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(-1.7976931348623158e+308)
field.value.double_value.append(2.2250738585072014e-308)
field.value.double_value.append(1.7976931348623158e+308)
test_case.sizes.append(3)
field = test_case.fields.add()
field.name = "float_value"
field.dtype = types_pb2.DT_FLOAT
field.value.float_value.append(-3.402823466e+38)
field.value.float_value.append(1.175494351e-38)
field.value.float_value.append(3.402823466e+38)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "int64_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(-9223372036854775808)
field.value.int64_value.append(9223372036854775807)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "sfixed64_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(-9223372036854775808)
field.value.int64_value.append(9223372036854775807)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "sint64_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(-9223372036854775808)
field.value.int64_value.append(9223372036854775807)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "uint64_value"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(0)
field.value.uint64_value.append(18446744073709551615)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "fixed64_value"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(0)
field.value.uint64_value.append(18446744073709551615)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "int32_value"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(-2147483648)
field.value.int32_value.append(2147483647)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "sfixed32_value"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(-2147483648)
field.value.int32_value.append(2147483647)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "sint32_value"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(-2147483648)
field.value.int32_value.append(2147483647)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "uint32_value"
field.dtype = types_pb2.DT_UINT32
field.value.uint32_value.append(0)
field.value.uint32_value.append(4294967295)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "fixed32_value"
field.dtype = types_pb2.DT_UINT32
field.value.uint32_value.append(0)
field.value.uint32_value.append(4294967295)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "bool_value"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(False)
field.value.bool_value.append(True)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "string_value"
field.dtype = types_pb2.DT_STRING
field.value.string_value.append("")
field.value.string_value.append("I refer to the infinite.")
return test_case
@staticmethod
def nested_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
message_value = value.message_value.add()
message_value.double_value = 23.5
test_case.shapes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "message_value"
field.dtype = types_pb2.DT_STRING
message_value = field.value.message_value.add()
message_value.double_value = 23.5
return test_case
@staticmethod
def optional_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.bool_value.append(True)
test_case.shapes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "bool_value"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(True)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "double_value"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(0.0)
return test_case
@staticmethod
def promote_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.sint32_value.append(2147483647)
value.sfixed32_value.append(2147483647)
value.int32_value.append(2147483647)
value.fixed32_value.append(4294967295)
value.uint32_value.append(4294967295)
test_case.shapes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "sint32_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(2147483647)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "sfixed32_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(2147483647)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "int32_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(2147483647)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "fixed32_value"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(4294967295)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "uint32_value"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(4294967295)
return test_case
@staticmethod
def ragged_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.double_value.append(23.5)
value.double_value.append(123.0)
value.bool_value.append(True)
value = test_case.values.add()
value.double_value.append(3.1)
value.bool_value.append(False)
test_case.shapes.append(2)
test_case.sizes.append(2)
test_case.sizes.append(1)
test_case.sizes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "double_value"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(23.5)
field.value.double_value.append(123.0)
field.value.double_value.append(3.1)
field.value.double_value.append(0.0)
field = test_case.fields.add()
field.name = "bool_value"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(True)
field.value.bool_value.append(False)
return test_case
@staticmethod
def shaped_batch_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.double_value.append(23.5)
value.bool_value.append(True)
value = test_case.values.add()
value.double_value.append(44.0)
value.bool_value.append(False)
value = test_case.values.add()
value.double_value.append(3.14159)
value.bool_value.append(True)
value = test_case.values.add()
value.double_value.append(1.414)
value.bool_value.append(True)
value = test_case.values.add()
value.double_value.append(-32.2)
value.bool_value.append(False)
value = test_case.values.add()
value.double_value.append(0.0001)
value.bool_value.append(True)
test_case.shapes.append(3)
test_case.shapes.append(2)
for _ in range(12):
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "double_value"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(23.5)
field.value.double_value.append(44.0)
field.value.double_value.append(3.14159)
field.value.double_value.append(1.414)
field.value.double_value.append(-32.2)
field.value.double_value.append(0.0001)
field = test_case.fields.add()
field.name = "bool_value"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(True)
field.value.bool_value.append(False)
field.value.bool_value.append(True)
field.value.bool_value.append(True)
field.value.bool_value.append(False)
field.value.bool_value.append(True)
return test_case
@staticmethod
def extension_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
message_value = value.Extensions[test_example_pb2.ext_value].add()
message_value.double_value = 23.5
test_case.shapes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = test_example_pb2.ext_value.full_name
field.dtype = types_pb2.DT_STRING
message_value = field.value.Extensions[test_example_pb2.ext_value].add()
message_value.double_value = 23.5
return test_case
@staticmethod
def simple_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.double_value.append(23.5)
value.bool_value.append(True)
test_case.shapes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "double_value"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(23.5)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "bool_value"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(True)
return test_case
|
tensorflow-master
|
tensorflow/python/kernel_tests/proto/proto_op_test_base.py
|
# =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Table-driven test for encode_proto op.
It tests that encode_proto is a lossless inverse of decode_proto
(for the specified fields).
"""
# Python3 readiness boilerplate
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.kernel_tests.proto import proto_op_test_base as test_base
from tensorflow.python.kernel_tests.proto import test_example_pb2
from tensorflow.python.ops import array_ops
class EncodeProtoOpTestBase(test_base.ProtoOpTestBase, parameterized.TestCase):
"""Base class for testing proto encoding ops."""
def __init__(self, decode_module, encode_module, methodName='runTest'): # pylint: disable=invalid-name
"""EncodeProtoOpTestBase initializer.
Args:
decode_module: a module containing the `decode_proto_op` method
encode_module: a module containing the `encode_proto_op` method
methodName: the name of the test method (same as for test.TestCase)
"""
super(EncodeProtoOpTestBase, self).__init__(methodName)
self._decode_module = decode_module
self._encode_module = encode_module
def testBadSizesShape(self):
if context.executing_eagerly():
expected_error = (errors.InvalidArgumentError,
r'Invalid shape for field double_value.')
else:
expected_error = (ValueError,
r'Shape must be at least rank 2 but is rank 0')
with self.assertRaisesRegexp(*expected_error):
self.evaluate(
self._encode_module.encode_proto(
sizes=1,
values=[np.double(1.0)],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']))
def testBadInputs(self):
# Invalid field name
with self.assertRaisesOpError('Unknown field: non_existent_field'):
self.evaluate(
self._encode_module.encode_proto(
sizes=[[1]],
values=[np.array([[0.0]], dtype=np.int32)],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['non_existent_field']))
# Incorrect types.
with self.assertRaisesOpError('Incompatible type for field double_value.'):
self.evaluate(
self._encode_module.encode_proto(
sizes=[[1]],
values=[np.array([[0.0]], dtype=np.int32)],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']))
# Incorrect shapes of sizes.
for sizes_value in 1, np.array([[[0, 0]]]):
with self.assertRaisesOpError(
r'sizes should be batch_size \+ \[len\(field_names\)\]'):
if context.executing_eagerly():
self.evaluate(
self._encode_module.encode_proto(
sizes=sizes_value,
values=[np.array([[0.0]])],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']))
else:
with self.cached_session():
sizes = array_ops.placeholder(dtypes.int32)
values = array_ops.placeholder(dtypes.float64)
self._encode_module.encode_proto(
sizes=sizes,
values=[values],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']).eval(feed_dict={
sizes: sizes_value,
values: [[0.0]]
})
# Inconsistent shapes of values.
with self.assertRaisesOpError('Values must match up to the last dimension'):
if context.executing_eagerly():
self.evaluate(
self._encode_module.encode_proto(
sizes=[[1, 1]],
values=[np.array([[0.0]]),
np.array([[0], [0]])],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value', 'int32_value']))
else:
with self.cached_session():
values1 = array_ops.placeholder(dtypes.float64)
values2 = array_ops.placeholder(dtypes.int32)
(self._encode_module.encode_proto(
sizes=[[1, 1]],
values=[values1, values2],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value', 'int32_value']).eval(feed_dict={
values1: [[0.0]],
values2: [[0], [0]]
}))
def _testRoundtrip(self, in_bufs, message_type, fields):
field_names = [f.name for f in fields]
out_types = [f.dtype for f in fields]
with self.cached_session() as sess:
sizes, field_tensors = self._decode_module.decode_proto(
in_bufs,
message_type=message_type,
field_names=field_names,
output_types=out_types)
out_tensors = self._encode_module.encode_proto(
sizes,
field_tensors,
message_type=message_type,
field_names=field_names)
out_bufs, = sess.run([out_tensors])
# Check that the re-encoded tensor has the same shape.
self.assertEqual(in_bufs.shape, out_bufs.shape)
# Compare the input and output.
for in_buf, out_buf in zip(in_bufs.flat, out_bufs.flat):
in_obj = test_example_pb2.TestValue()
in_obj.ParseFromString(in_buf)
out_obj = test_example_pb2.TestValue()
out_obj.ParseFromString(out_buf)
# Check that the deserialized objects are identical.
self.assertEqual(in_obj, out_obj)
# Check that the input and output serialized messages are identical.
# If we fail here, there is a difference in the serialized
# representation but the new serialization still parses. This could
# be harmless (a change in map ordering?) or it could be bad (e.g.
# loss of packing in the encoding).
self.assertEqual(in_buf, out_buf)
@parameterized.named_parameters(
*test_base.ProtoOpTestBase.named_parameters(extension=False))
def testRoundtrip(self, case):
in_bufs = [value.SerializeToString() for value in case.values]
# np.array silently truncates strings if you don't specify dtype=object.
in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
return self._testRoundtrip(
in_bufs, 'tensorflow.contrib.proto.TestValue', case.fields)
@parameterized.named_parameters(
*test_base.ProtoOpTestBase.named_parameters(extension=False))
def testRoundtripPacked(self, case):
# Now try with the packed serialization.
# We test the packed representations by loading the same test cases using
# PackedTestValue instead of TestValue. To do this we rely on the text
# format being the same for packed and unpacked fields, and reparse the test
# message using the packed version of the proto.
in_bufs = [
# Note: float_format='.17g' is necessary to ensure preservation of
# doubles and floats in text format.
text_format.Parse(
text_format.MessageToString(
value, float_format='.17g'),
test_example_pb2.PackedTestValue()).SerializeToString()
for value in case.values
]
# np.array silently truncates strings if you don't specify dtype=object.
in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
return self._testRoundtrip(
in_bufs, 'tensorflow.contrib.proto.PackedTestValue', case.fields)
|
tensorflow-master
|
tensorflow/python/kernel_tests/proto/encode_proto_op_test_base.py
|
# =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for encode_proto op."""
# Python3 readiness boilerplate
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.kernel_tests.proto import encode_proto_op_test_base as test_base
from tensorflow.python.ops import proto_ops
from tensorflow.python.platform import test
class EncodeProtoOpTest(test_base.EncodeProtoOpTestBase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
super(EncodeProtoOpTest, self).__init__(encode_module=proto_ops,
decode_module=proto_ops,
methodName=methodName)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/proto/encode_proto_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for boosted_trees resource kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.core.kernels.boosted_trees import boosted_trees_pb2
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import boosted_trees_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
class ResourceOpsTest(test_util.TensorFlowTestCase):
"""Tests resource_ops."""
@test_util.run_deprecated_v1
def testCreate(self):
with self.cached_session():
ensemble = boosted_trees_ops.TreeEnsemble('ensemble')
resources.initialize_resources(resources.shared_resources()).run()
stamp_token = ensemble.get_stamp_token()
self.assertEqual(0, self.evaluate(stamp_token))
(_, num_trees, num_finalized_trees, num_attempted_layers,
nodes_range) = ensemble.get_states()
self.assertEqual(0, self.evaluate(num_trees))
self.assertEqual(0, self.evaluate(num_finalized_trees))
self.assertEqual(0, self.evaluate(num_attempted_layers))
self.assertAllEqual([0, 1], self.evaluate(nodes_range))
@test_util.run_deprecated_v1
def testCreateWithProto(self):
with self.cached_session():
ensemble_proto = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
bucketized_split {
threshold: 21
left_id: 3
right_id: 4
}
metadata {
gain: 1.4
original_leaf {
scalar: 7.14
}
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 7
left_id: 5
right_id: 6
}
metadata {
gain: 2.7
original_leaf {
scalar: -4.375
}
}
}
nodes {
leaf {
scalar: 6.54
}
}
nodes {
leaf {
scalar: 7.305
}
}
nodes {
leaf {
scalar: -4.525
}
}
nodes {
leaf {
scalar: -4.145
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 75
threshold: 21
left_id: 1
right_id: 2
}
metadata {
gain: -1.4
}
}
nodes {
leaf {
scalar: -0.6
}
}
nodes {
leaf {
scalar: 0.165
}
}
}
tree_weights: 0.15
tree_weights: 1.0
tree_metadata {
num_layers_grown: 2
is_finalized: true
}
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 6
last_layer_node_start: 16
last_layer_node_end: 19
}
""", ensemble_proto)
ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble',
stamp_token=7,
serialized_proto=ensemble_proto.SerializeToString())
resources.initialize_resources(resources.shared_resources()).run()
(stamp_token, num_trees, num_finalized_trees, num_attempted_layers,
nodes_range) = ensemble.get_states()
self.assertEqual(7, self.evaluate(stamp_token))
self.assertEqual(2, self.evaluate(num_trees))
self.assertEqual(1, self.evaluate(num_finalized_trees))
self.assertEqual(6, self.evaluate(num_attempted_layers))
self.assertAllEqual([16, 19], self.evaluate(nodes_range))
@test_util.run_deprecated_v1
def testSerializeDeserialize(self):
with self.cached_session():
# Initialize.
ensemble = boosted_trees_ops.TreeEnsemble('ensemble', stamp_token=5)
resources.initialize_resources(resources.shared_resources()).run()
(stamp_token, num_trees, num_finalized_trees, num_attempted_layers,
nodes_range) = ensemble.get_states()
self.assertEqual(5, self.evaluate(stamp_token))
self.assertEqual(0, self.evaluate(num_trees))
self.assertEqual(0, self.evaluate(num_finalized_trees))
self.assertEqual(0, self.evaluate(num_attempted_layers))
self.assertAllEqual([0, 1], self.evaluate(nodes_range))
# Deserialize.
ensemble_proto = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 75
threshold: 21
left_id: 1
right_id: 2
}
metadata {
gain: -1.4
}
}
nodes {
leaf {
scalar: -0.6
}
}
nodes {
leaf {
scalar: 0.165
}
}
}
tree_weights: 0.5
tree_metadata {
num_layers_grown: 4 # it's fake intentionally.
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 5
last_layer_node_start: 3
last_layer_node_end: 7
}
""", ensemble_proto)
with ops.control_dependencies([
ensemble.deserialize(
stamp_token=3,
serialized_proto=ensemble_proto.SerializeToString())
]):
(stamp_token, num_trees, num_finalized_trees, num_attempted_layers,
nodes_range) = ensemble.get_states()
self.assertEqual(3, self.evaluate(stamp_token))
self.assertEqual(1, self.evaluate(num_trees))
# This reads from metadata, not really counting the layers.
self.assertEqual(5, self.evaluate(num_attempted_layers))
self.assertEqual(0, self.evaluate(num_finalized_trees))
self.assertAllEqual([3, 7], self.evaluate(nodes_range))
# Serialize.
new_ensemble_proto = boosted_trees_pb2.TreeEnsemble()
new_stamp_token, new_serialized = ensemble.serialize()
self.assertEqual(3, self.evaluate(new_stamp_token))
new_ensemble_proto.ParseFromString(new_serialized.eval())
self.assertProtoEquals(ensemble_proto, new_ensemble_proto)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/boosted_trees/resource_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for boosted_trees stats kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import boosted_trees_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import googletest
_INEQUALITY_DEFAULT_LEFT = 'inequality_default_left'.encode('utf-8')
class StatsOpsTest(test_util.TensorFlowTestCase):
"""Tests stats_ops."""
def _get_stats_summary_for_split(self):
return [
[
[[0., 0.], [.08, .09], [0., 0.], [0., 0.]], # node 0; ignored
[[0., 0.], [.15, .36], [.06, .07], [.1, .2]], # node 1
[[0., 0.], [-.33, .58], [0., 0.], [.3, .4]], # node 2
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 3; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 4; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 5; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 6; ignored
], # feature 0
[
[[0., 0.], [0., 0.], [.08, .09], [0., 0.]], # node 0; ignored
[[0., 0.], [.3, .5], [-.05, .06], [.06, .07]], # node 1
[[.1, .1], [.2, .3], [-.4, .5], [.07, .08]], # node 2
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 3; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 4; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 5; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 6; ignored
], # feature 1
] # shape=[num_features, max_splits, num_buckets, 2]
def testCalculateBestGainsWithoutRegularization(self):
"""Testing Gain calculation without any regularization."""
with self.cached_session() as sess:
max_splits = 7
node_id_range = [1, 3] # node 1 through 2 will be processed.
stats_summary_list = self._get_stats_summary_for_split()
(node_ids_list, gains_list, thresholds_list, left_node_contribs_list,
right_node_contribs_list
) = boosted_trees_ops.calculate_best_gains_per_feature(
node_id_range,
stats_summary_list,
l1=0.0,
l2=0.0,
tree_complexity=0.0,
min_node_weight=0,
max_splits=max_splits)
self.assertAllEqual([[1, 2], [1, 2]], self.evaluate(node_ids_list))
self.assertAllClose([[0.004775, 0.41184], [0.02823, 0.41184]],
self.evaluate(gains_list))
self.assertAllEqual([[1, 1], [1, 1]], self.evaluate(thresholds_list))
# The left node contrib will be later added to the previous node value to
# make the left node value, and the same for right node contrib.
self.assertAllClose([[[-.416667], [.568966]], [[-.6], [-.75]]],
self.evaluate(left_node_contribs_list))
self.assertAllClose([[[-.592593], [-.75]], [[-.076923], [.568966]]],
self.evaluate(right_node_contribs_list))
def testCalculateBestMultiDimFeatureSplitsWithoutRegularization(self):
"""Testing best split calculation without any regularization."""
node_id_range = [1, 3] # node 1 through 2 will be processed.
stats_summary = np.asarray(self._get_stats_summary_for_split())
# reshape to [max_splits, num_features, num_buckets, 2]
stats_summary = np.moveaxis(stats_summary, 0, 1)
(node_ids, gains, feature_dimensions, thresholds, left_node_contribs,
right_node_contribs, split_types) = self.evaluate(
boosted_trees_ops.calculate_best_feature_split(
node_id_range,
stats_summary,
l1=0.0,
l2=0.0,
tree_complexity=0.0,
min_node_weight=0,
logits_dimension=1))
# Get same result as v1 op (CalculateBestGainsPerFeature), and find the
# feature dimension that has the best gain.
self.assertAllEqual([1, 2], node_ids)
self.assertAllClose([0.02823, 0.41184], gains)
self.assertAllEqual([1, 1], thresholds)
self.assertAllEqual([1, 0], feature_dimensions)
# The left node contrib will be later added to the previous node value to
# make the left node value, and the same for right node contrib.
self.assertAllClose([[-.6], [.568966]], left_node_contribs)
self.assertAllClose([[-.076923], [-.75]], right_node_contribs)
self.assertAllEqual([_INEQUALITY_DEFAULT_LEFT] * 2, split_types)
def testCalculateBestGainsWithL2(self):
"""Testing Gain calculation with L2."""
with self.cached_session() as sess:
max_splits = 7
node_id_range = [1, 3] # node 1 through 2 will be processed.
stats_summary_list = self._get_stats_summary_for_split()
(node_ids_list, gains_list, thresholds_list, left_node_contribs_list,
right_node_contribs_list
) = boosted_trees_ops.calculate_best_gains_per_feature(
node_id_range,
stats_summary_list,
l1=0.0,
l2=0.1,
tree_complexity=0.0,
min_node_weight=0,
max_splits=max_splits)
self.assertAllEqual([[1, 2], [1, 2]], self.evaluate(node_ids_list))
self.assertAllClose([[0., 0.33931375], [0.01879096, 0.33931375]],
self.evaluate(gains_list))
self.assertAllEqual([[0, 1], [1, 1]], self.evaluate(thresholds_list))
# The left node contrib will be later added to the previous node value to
# make the left node value, and the same for right node contrib.
self.assertAllClose([[[0.], [.485294]], [[-.5], [-.6]]],
self.evaluate(left_node_contribs_list))
self.assertAllClose([[[-.424658], [-.6]], [[-.043478], [.485294]]],
self.evaluate(right_node_contribs_list))
def testCalculateMultiDimBestFeatureSplitsWithL2(self):
"""Testing best split calculation with L2."""
node_id_range = [1, 3] # node 1 through 2 will be processed.
stats_summary = np.asarray(self._get_stats_summary_for_split())
# reshape to [max_splits, num_features, num_buckets, 2]
stats_summary = np.moveaxis(stats_summary, 0, 1)
(node_ids, gains, feature_dimensions, thresholds, left_node_contribs,
right_node_contribs, split_types) = self.evaluate(
boosted_trees_ops.calculate_best_feature_split(
node_id_range,
stats_summary,
l1=0.0,
l2=0.1,
tree_complexity=0.0,
min_node_weight=0,
logits_dimension=1))
# Get same result as v1 op (CalculateBestGainsPerFeature), and find the
# feature dimension that has the best gain.
self.assertAllEqual([1, 2], node_ids)
self.assertAllClose([0.01879096, 0.33931375], gains)
self.assertAllEqual([1, 1], thresholds)
self.assertAllEqual([1, 0], feature_dimensions)
# # The left node contrib will be later added to the previous node value to
# # make the left node value, and the same for right node contrib.
self.assertAllClose([[-.5], [.485294]], left_node_contribs)
self.assertAllClose([[-.043478], [-.6]], right_node_contribs)
self.assertAllEqual([_INEQUALITY_DEFAULT_LEFT] * 2, split_types)
def testCalculateBestGainsWithL1(self):
"""Testing Gain calculation with L1."""
with self.cached_session() as sess:
max_splits = 7
node_id_range = [1, 3] # node 1 through 2 will be processed.
stats_summary_list = self._get_stats_summary_for_split()
l1 = 0.1
(node_ids_list, gains_list, thresholds_list, left_node_contribs_list,
right_node_contribs_list
) = boosted_trees_ops.calculate_best_gains_per_feature(
node_id_range,
stats_summary_list,
l1=l1,
l2=0.0,
tree_complexity=0.0,
min_node_weight=0,
max_splits=max_splits)
self.assertAllEqual([[0, 1], [1, 1]], self.evaluate(thresholds_list))
self.assertAllEqual([[1, 2], [1, 2]], self.evaluate(node_ids_list))
self.assertAllClose([[[0.0], [0.3965517]], [[-0.4], [-0.5]]],
self.evaluate(left_node_contribs_list))
self.assertAllClose([[[-0.3333333], [-0.5]], [[0.0], [0.396552]]],
self.evaluate(right_node_contribs_list))
# Gain should also include an adjustment of the gradient by l1.
self.assertAllClose([[0.0, 0.191207], [0.01, 0.191207]],
self.evaluate(gains_list))
def testCalculateBestMultiDimFeatureSplitsWithL1(self):
"""Testing best split calculation with L1."""
node_id_range = [1, 3] # node 1 through 2 will be processed.
stats_summary = np.asarray(self._get_stats_summary_for_split())
# reshape to [max_splits, num_features, num_buckets, 2]
stats_summary = np.moveaxis(stats_summary, 0, 1)
l1 = 0.1
(node_ids, gains, feature_dimensions, thresholds, left_node_contribs,
right_node_contribs, split_types) = self.evaluate(
boosted_trees_ops.calculate_best_feature_split(
node_id_range,
stats_summary,
l1=l1,
l2=0.,
tree_complexity=0.0,
min_node_weight=0,
logits_dimension=1))
# Get same result as v1 op (CalculateBestGainsPerFeature), and find the
# feature dimension that has the best gain.
self.assertAllEqual([1, 2], node_ids)
# Gain should also include an adjustment of the gradient by l1.
self.assertAllClose([0.01, 0.191207], gains)
self.assertAllEqual([1, 1], thresholds)
self.assertAllClose([[-0.4], [-0.5]], left_node_contribs)
self.assertAllClose([[0.], [0.396552]], right_node_contribs)
self.assertAllEqual([1, 1], feature_dimensions)
self.assertAllEqual([_INEQUALITY_DEFAULT_LEFT] * 2, split_types)
def testCalculateBestGainsWithTreeComplexity(self):
"""Testing best gain calculation with tree complexity."""
with self.cached_session() as sess:
max_splits = 7
node_id_range = [1, 3] # node 1 through 2 will be processed.
stats_summary_list = self._get_stats_summary_for_split()
l2 = 0.1
tree_complexity = 3.
(node_ids_list, gains_list, thresholds_list, left_node_contribs_list,
right_node_contribs_list
) = boosted_trees_ops.calculate_best_gains_per_feature(
node_id_range,
stats_summary_list,
l1=0.0,
l2=l2,
tree_complexity=tree_complexity,
min_node_weight=0,
max_splits=max_splits)
self.assertAllEqual([[1, 2], [1, 2]], self.evaluate(node_ids_list))
self.assertAllClose([[-3., -2.66068625], [-2.98120904, -2.66068625]],
self.evaluate(gains_list))
self.assertAllEqual([[0, 1], [1, 1]], self.evaluate(thresholds_list))
# The left node contrib will be later added to the previous node value to
# make the left node value, and the same for right node contrib.
self.assertAllClose([[[0.], [.485294]], [[-.5], [-.6]]],
self.evaluate(left_node_contribs_list))
self.assertAllClose([[[-.424658], [-.6]], [[-.043478], [.485294]]],
self.evaluate(right_node_contribs_list))
def testCalculateBestMultiDimFeatureSplitsWithTreeComplexity(self):
"""Testing best split calculation with tree complexity."""
node_id_range = [1, 3] # node 1 through 2 will be processed.
stats_summary = np.asarray(self._get_stats_summary_for_split())
# reshape to [max_splits, num_features, num_buckets, 2]
stats_summary = np.moveaxis(stats_summary, 0, 1)
l2 = 0.1
tree_complexity = 3.
(node_ids, gains, feature_dimensions, thresholds, left_node_contribs,
right_node_contribs, split_types) = self.evaluate(
boosted_trees_ops.calculate_best_feature_split(
node_id_range,
stats_summary,
l1=0.,
l2=l2,
tree_complexity=tree_complexity,
min_node_weight=0,
logits_dimension=1))
# Get same result as v1 op (CalculateBestGainsPerFeature), and find the
# feature dimension that has the best gain.
self.assertAllEqual([1, 2], node_ids)
# Gain should also include an adjustment of the gradient by l1.
self.assertAllClose([-2.98120904, -2.66068625], gains)
self.assertAllEqual([1, 1], thresholds)
self.assertAllClose([[-0.5], [0.485294]], left_node_contribs)
self.assertAllClose([[-0.043478], [-.6]], right_node_contribs)
self.assertAllEqual([1, 0], feature_dimensions)
self.assertAllEqual([_INEQUALITY_DEFAULT_LEFT] * 2, split_types)
def testCalculateBestGainsWithMinNodeWeight(self):
"""Testing Gain calculation with min node weight."""
with self.cached_session() as sess:
max_splits = 7
node_id_range = [1, 3] # node 1 through 2 will be processed.
stats_summary_list = [
[
[[0., 0.], [.08, .09], [0., 0.], [0., 0.]], # node 0; ignored
[[0., 0.], [.15, .036], [.06, .07], [.1, .2]], # node 1
[[0., 0.], [-.33, .68], [0., 0.], [.3, .4]], # node 2
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 3; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 4; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 5; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 6; ignored
], # feature 0
[
[[0., 0.], [0., 0.], [.08, .09], [0., 0.]], # node 0; ignored
[[0., 0.], [.3, .5], [-.05, .6], [.06, .07]], # node 1
[[.1, .1], [.2, .03], [-.4, .05], [.07, .08]], # node 2
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 3; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 4; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 5; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 6; ignored
], # feature 1
] # num_features * shape=[max_splits, num_buckets, 2]
(node_ids_list, gains_list, thresholds_list, left_node_contribs_list,
right_node_contribs_list
) = boosted_trees_ops.calculate_best_gains_per_feature(
node_id_range,
stats_summary_list,
l1=0.0,
l2=0.0,
tree_complexity=0.0,
min_node_weight=1,
max_splits=max_splits)
# We can't split node 1 on feature 1 and node 2 on feature 2 because of
# the min node weight.
self.assertAllEqual([[2], [1]], self.evaluate(node_ids_list))
self.assertAllClose([[0.384314], [0.098013]], self.evaluate(gains_list))
self.assertAllEqual([[1], [1]], self.evaluate(thresholds_list))
self.assertAllClose([[[0.4852941]], [[-.6]]],
self.evaluate(left_node_contribs_list))
self.assertAllClose([[[-0.75]], [[-0.014925]]],
self.evaluate(right_node_contribs_list))
def testCalculateMultiDimBestSplitsWithMinNodeWeight(self):
"""Testing best split calculation with min node weight."""
node_id_range = [1, 3] # node 1 through 2 will be processed.
stats_summary = np.asarray([
[
[[0., 0.], [.08, .09], [0., 0.], [0., 0.]], # node 0; ignored
[[0., 0.], [.15, .36], [.06, .61], [.1, .2]], # node 1
[[0., 0.], [-.33, .68], [0., 0.], [.3, .4]], # node 2
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 3; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 4; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 5; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 6; ignored
], # feature 0
[
[[0., 0.], [0., 0.], [.08, .09], [0., 0.]], # node 0; ignored
[[0., 0.], [.3, .5], [-.05, .6], [.06, .07]], # node 1
[[.1, 1.], [.2, -.05], [-.4, .05], [.07, .08]], # node 2
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 3; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 4; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 5; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 6; ignored
], # feature 1
]) # num_features * shape=[max_splits, num_buckets, 2]
# reshape to [max_splits, num_features, num_buckets, 2]
stats_summary = np.moveaxis(stats_summary, 0, 1)
(node_ids, gains, feature_dimensions, thresholds, left_node_contribs,
right_node_contribs, split_types) = self.evaluate(
boosted_trees_ops.calculate_best_feature_split(
node_id_range,
stats_summary,
l1=0.,
l2=0.,
tree_complexity=0.,
min_node_weight=1,
logits_dimension=1))
self.assertAllEqual([1, 2], node_ids)
# Gain should also include an adjustment of the gradient by l1.
self.assertAllClose([0.098013, 0.931596], gains)
self.assertAllEqual([1, 1], thresholds)
self.assertAllClose([[-.6], [-0.315789]], left_node_contribs)
self.assertAllClose([[-0.014925], [2.53846]], right_node_contribs)
self.assertAllEqual([1, 1], feature_dimensions)
self.assertAllEqual([_INEQUALITY_DEFAULT_LEFT] * 2, split_types)
def testCalculateBestGainsWithMinNodeWeightNoSplitOnFeaturePossible(self):
"""Testing Gain calculation with min node weight and no split."""
with self.cached_session() as sess:
max_splits = 7
node_id_range = [1, 3] # node 1 through 2 will be processed.
stats_summary_list = [
[
[[0., 0.], [.08, .09], [0., 0.], [0., 0.]], # node 0; ignored
[[0., 0.], [.15, .0036], [.06, .007], [.1, .2]], # node 1
[[0., 0.], [-.33, .068], [0., 0.], [.3, .04]], # node 2
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 3; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 4; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 5; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 6; ignored
], # feature 0
[
[[0., 0.], [0., 0.], [.08, .09], [0., 0.]], # node 0; ignored
[[0., 0.], [.3, .5], [-.05, .6], [.06, .07]], # node 1
[[.1, .1], [.2, .03], [-.4, .05], [.07, .08]], # node 2
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 3; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 4; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 5; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 6; ignored
], # feature 1
] # num_features * shape=[max_splits, num_buckets, 2]
(node_ids_list, _, _, _,
_) = boosted_trees_ops.calculate_best_gains_per_feature(
node_id_range,
stats_summary_list,
l1=0.0,
l2=0.0,
tree_complexity=0.0,
min_node_weight=1,
max_splits=max_splits)
# We can't split either of the nodes on the first feature
self.assertEqual(2, len(self.evaluate(node_ids_list)))
self.assertAllEqual([], self.evaluate(node_ids_list)[0])
self.assertAllEqual([1], self.evaluate(node_ids_list)[1])
# Now check when we can't split on any feature
(node_ids_list, _, _, _,
_) = boosted_trees_ops.calculate_best_gains_per_feature(
node_id_range,
stats_summary_list,
l1=0.0,
l2=0.0,
tree_complexity=0.0,
min_node_weight=10,
max_splits=max_splits)
self.assertAllEqual([[], []], self.evaluate(node_ids_list))
def testCalculateBestMultiDimFeatureSplitsWithNoSplitOnFeaturePossible(self):
"""Testing best split calculation with min node weight and no split."""
node_id_range = [1, 3] # node 1 through 2 will be processed.
stats_summary = np.asarray([
[
[[0., 0.], [.08, .09], [0., 0.], [0., 0.]], # node 0; ignored
[[0., 0.], [.15, .36], [.06, .7], [.1, .2]], # node 1
[[0., 0.], [-.33, .068], [0., 0.], [.3, .04]], # node 2
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 3; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 4; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 5; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 6; ignored
], # feature 0
[
[[0., 0.], [0., 0.], [.08, .09], [0., 0.]], # node 0; ignored
[[0., 0.], [.3, .5], [-.05, .06], [.06, .7]], # node 1
[[.1, .1], [.2, -.05], [-.4, .05], [.07, .08]], # node 2
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 3; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 4; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 5; ignored
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], # node 6; ignored
], # feature 1
]) # num_features * shape=[max_splits, num_buckets, 2]
# reshape to [max_splits, num_features, num_buckets, 2]
stats_summary = np.moveaxis(stats_summary, 0, 1)
(node_ids, _, _, _, _, _,
_) = boosted_trees_ops.calculate_best_feature_split(
node_id_range,
stats_summary,
l1=0.0,
l2=0.0,
tree_complexity=0.0,
min_node_weight=1,
logits_dimension=1)
# We can't split either of the nodes on the first feature
self.assertAllEqual([1], node_ids)
# Now check when we can't split on any feature
(node_ids, _, _, _, _, _,
_) = boosted_trees_ops.calculate_best_feature_split(
node_id_range,
stats_summary,
l1=0.0,
l2=0.0,
tree_complexity=0.0,
min_node_weight=10,
logits_dimension=1)
self.assertAllEqual([], node_ids)
@test_util.run_deprecated_v1
def testMakeStatsSummarySimple(self):
"""Simple test for MakeStatsSummary."""
expected_stats_summary = np.asarray([1., 5., 2., 6., 3., 7., 4., 8.])
self.assertAllClose(
expected_stats_summary.reshape((1, 2, 2, 2)),
boosted_trees_ops.make_stats_summary(
node_ids=[0, 0, 1, 1],
gradients=[[1.], [2.], [3.], [4.]],
hessians=[[5.], [6.], [7.], [8.]],
bucketized_features_list=[[0, 1, 0, 1]],
max_splits=2,
num_buckets=2))
@test_util.run_deprecated_v1
def testAggregateStatsSimple(self):
# Get the same result as MakeStatsSummary Op.
expected_stats_summary = np.asarray([1., 5., 2., 6., 3., 7., 4., 8.])
# shape=[max_splits, num_buckets, feature_dim, stats_dim]
expected_stats_summary = np.reshape(expected_stats_summary, (2, 2, 1, 2))
# Reshape feature dim and bucket id axes
expected_stats_summary = np.swapaxes(expected_stats_summary, 1, 2)
self.assertAllClose(
expected_stats_summary,
boosted_trees_ops.boosted_trees_aggregate_stats(
node_ids=[0, 0, 1, 1],
gradients=[[1.], [2.], [3.], [4.]],
hessians=[[5.], [6.], [7.], [8.]],
feature=[[0], [1], [0], [1]],
max_splits=2,
num_buckets=2))
def testMakeStatsSummaryAccumulate(self):
"""Tests that Summary actually accumulates."""
with self.cached_session():
max_splits = 3
num_buckets = 4
node_ids = [1, 1, 2, 2, 1, 1, 2, 0]
gradients = [[.1], [.2], [.3], [-.4], [-.05], [.06], [.07], [.08]]
hessians = [[.2], [.3], [.4], [.5], [.06], [.07], [.08], [.09]]
# Tests a single feature.
bucketized_features = [[3, 1, 2, 0, 1, 2, 0, 1]]
result = boosted_trees_ops.make_stats_summary(
node_ids, gradients, hessians, bucketized_features, max_splits,
num_buckets) # shape=[max_splits, num_buckets, num_features, 2]
self.assertAllClose(
[[
[[0., 0.], [.08, .09], [0., 0.], [0., 0.]], # node 0
[[0., 0.], [.15, .36], [.06, .07], [.1, .2]], # node 1
[[-.33, .58], [0., 0.], [.3, .4], [0., 0.]], # node 2
]],
self.evaluate(result))
def testAggregateStatsAccumulate(self):
"""Tests that Summary actually accumulates."""
max_splits = 3
num_buckets = 4
node_ids = [1, 1, 2, 2, 1, 1, 2, 0]
gradients = [[.1], [.2], [.3], [-.4], [-.05], [.06], [.07], [.08]]
hessians = [[.2], [.3], [.4], [.5], [.06], [.07], [.08], [.09]]
# Tests a single feature.
bucketized_features = [[3], [1], [2], [0], [1], [2], [0], [1]]
result = boosted_trees_ops.boosted_trees_aggregate_stats(
node_ids, gradients, hessians, bucketized_features, max_splits,
num_buckets)
# shape=[max_splits, num_buckets, feature_dim, stats_dim]
# Get the same result as MakeStatsSummary Op.
expected_stats_summary = [
[[[0., 0.]], [[.08, .09]], [[0., 0.]], [[0., 0.]]],
[[[0., 0.]], [[.15, .36]], [[.06, .07]], [[.1, .2]]],
[[[-.33, .58]], [[0., 0.]], [[.3, .4]], [[0., 0.]]],
]
# Swap feature dim and bucket id axis
expected_stats_summary = np.swapaxes(expected_stats_summary, 1, 2)
self.assertAllClose(expected_stats_summary, result)
def testMakeStatsSummaryMultipleFeatures(self):
"""Tests that MakeStatsSummary works for multiple features."""
with self.cached_session():
max_splits = 3
num_buckets = 4
node_ids = [1, 1, 2, 2, 1, 1, 2, 0]
gradients = [[.1], [.2], [.3], [-.4], [-.05], [.06], [.07], [.08]]
hessians = [[.2], [.3], [.4], [.5], [.06], [.07], [.08], [.09]]
# Tests multiple features.
# The output from another feature will stored be in 3rd dimension.
bucketized_features = [[3, 1, 2, 0, 1, 2, 0, 1], [0, 0, 0, 2, 2, 3, 3, 2]]
result = boosted_trees_ops.make_stats_summary(
node_ids, gradients, hessians, bucketized_features, max_splits,
num_buckets) # shape=[max_splits, num_buckets, num_features, 2]
self.assertAllClose(
[
[
[[0., 0.], [.08, .09], [0., 0.], [0., 0.]], # node 0
[[0., 0.], [.15, .36], [.06, .07], [.1, .2]], # node 1
[[-.33, .58], [0., 0.], [.3, .4], [0., 0.]], # node 2
], # feature 0
[
[[0., 0.], [0., 0.], [.08, .09], [0., 0.]], # node 0
[[.3, .5], [0., 0.], [-.05, .06], [.06, .07]], # node 1
[[.3, .4], [0., 0.], [-.4, .5], [.07, .08]], # node 2
], # feature 1
],
self.evaluate(result))
def testAggregatesSummaryMultipleDimensionFeature(self):
"""Tests that MakeStatsSummary works for multiple features."""
expected_stats_summary = np.asarray(
[[0, 0, 0, 0, .08, .09, 0, 0, 0, 0, .08, .09, 0, 0, 0, 0],
[0, 0, .3, .5, .15, .36, 0, 0, .06, .07, -.05, .06, .1, .2, .06, .07],
[-.33, .58, .3, .4, 0, 0, 0, 0, .3, .4, -.4, .5, 0, 0, .07, .08]])
with self.cached_session():
max_splits = 3
num_buckets = 4
node_ids = [1, 1, 2, 2, 1, 1, 2, 0]
gradients = [[.1], [.2], [.3], [-.4], [-.05], [.06], [.07], [.08]]
hessians = [[.2], [.3], [.4], [.5], [.06], [.07], [.08], [.09]]
# Tests multiple features.
bucketized_features = [[3, 0], [1, 0], [2, 0], [0, 2], [1, 2], [2, 3],
[0, 3], [1, 2]]
result = boosted_trees_ops.boosted_trees_aggregate_stats(
node_ids, gradients, hessians, bucketized_features, max_splits,
num_buckets)
# Reshape to [max_splits, num_buckets, feature_dim, stats_dim]
expected_stats_summary = np.reshape(expected_stats_summary, (3, 4, 2, 2))
# Swap feature_dim and bucket_id axis
expected_stats_summary = np.swapaxes(expected_stats_summary, 1, 2)
self.assertAllClose(expected_stats_summary, result)
def testAggregateStatsMultiClass(self):
"""Tests that Summary actually accumulates."""
with self.cached_session():
max_splits = 3
num_buckets = 4
node_ids = [1, 1, 2, 2, 1, 1, 2, 0]
gradients = [[.1, .2], [.2, .4], [.3, .6], [-.4, -.8], [-.05, -.1],
[.06, .12], [.07, .14], [.08, .16]]
hessians = [[.2, .6], [.3, .9], [.4, 1.2], [.5, 1.5], [.06, .18],
[.07, .21], [.08, .24], [.09, .27]]
# Tests a single feature.
bucketized_features = [[3], [1], [2], [0], [1], [2], [0], [1]]
result = boosted_trees_ops.boosted_trees_aggregate_stats(
node_ids, gradients, hessians, bucketized_features, max_splits,
num_buckets)
# shape=[max_splits, num_buckets, feature_dim, stats_dim]
expected_stats_summary = [
[[[0., 0., 0., 0.]], [[.08, .16, .09, .27]], [[0., 0., 0., 0.]],
[[0., 0., 0., 0.]]],
[[[0., 0., 0., 0.]], [[.15, 0.3, .36, 1.08]], [[.06, 0.12, .07,
0.21]],
[[.1, .2, .2, .6]]],
[[[-.33, -.66, .58, 1.74]], [[0., 0., 0., 0.]], [[.3, .6, .4, 1.2]],
[[0., 0., 0., 0.]]],
]
expected_stats_summary = np.swapaxes(expected_stats_summary, 1, 2)
self.assertAllClose(expected_stats_summary, result)
def _get_dense_summaries_from_sparse_features(self, max_splits, num_buckets,
batch_size, feature_dims,
logits_dims, hess_dims):
np.random.seed(0)
stats_dims = logits_dims + hess_dims
node_ids = np.random.randint(max_splits, size=batch_size)
gradients = np.random.uniform(5.0, size=(batch_size, logits_dims))
hessians = np.random.uniform(5.0, size=(batch_size, hess_dims))
dense_indices = np.random.randint(2, size=(batch_size, feature_dims))
feature_indices = np.argwhere(dense_indices == 1)
missing_feature_indices = np.argwhere(dense_indices == 0)
feature_values = np.random.randint(num_buckets, size=len(feature_indices))
feature_shape = np.asarray([batch_size, feature_dims])
# Last bucket is for missing values.
dense_summary = np.zeros(
(max_splits, feature_dims, num_buckets + 1, stats_dims))
for (instance, f_dim), bucket in zip(feature_indices, feature_values):
node_id = node_ids[instance]
dense_summary[node_id][f_dim][bucket] += np.concatenate(
[gradients[instance], hessians[instance]])
for instance, f_dim in missing_feature_indices:
node_id = node_ids[instance]
dense_summary[node_id][f_dim][num_buckets] += np.concatenate(
[gradients[instance], hessians[instance]])
return (node_ids, gradients, hessians, feature_indices, feature_values,
feature_shape, dense_summary)
def testMakeSparseStatsSummarySingleFeatureDimension(self):
batch_size = 10
max_splits = 2
num_buckets = 2
feature_dims = 1
logits_dims = 1
hess_dims = 1
(node_ids, gradients, hessians, feature_indices, feature_values,
feature_shape,
expected_dense_summary) = self._get_dense_summaries_from_sparse_features(
max_splits, num_buckets, batch_size, feature_dims, logits_dims,
hess_dims)
(summary_indices, summary_values,
summary_shape) = boosted_trees_ops.boosted_trees_sparse_aggregate_stats(
node_ids, gradients, hessians, feature_indices, feature_values,
feature_shape, max_splits, num_buckets)
dense_result = sparse_ops.sparse_to_dense(summary_indices, summary_shape,
summary_values)
self.assertAllClose(expected_dense_summary, dense_result)
def testMakeSparseStatsSummaryMultiDimFeature(self):
batch_size = 10
max_splits = 2
num_buckets = 2
feature_dims = 1
logits_dims = 1
hess_dims = 1
(node_ids, gradients, hessians, feature_indices, feature_values,
feature_shape,
expected_dense_summary) = self._get_dense_summaries_from_sparse_features(
max_splits, num_buckets, batch_size, feature_dims, logits_dims,
hess_dims)
(summary_indices, summary_values,
summary_shape) = boosted_trees_ops.boosted_trees_sparse_aggregate_stats(
node_ids, gradients, hessians, feature_indices, feature_values,
feature_shape, max_splits, num_buckets)
dense_result = sparse_ops.sparse_to_dense(summary_indices, summary_shape,
summary_values)
self.assertAllClose(expected_dense_summary, dense_result)
def testMakeSparseStatsSummaryMultiClass(self):
batch_size = 10
max_splits = 2
num_buckets = 2
feature_dims = 1
logits_dims = 2
hess_dims = 2
(node_ids, gradients, hessians, feature_indices, feature_values,
feature_shape,
expected_dense_summary) = self._get_dense_summaries_from_sparse_features(
max_splits, num_buckets, batch_size, feature_dims, logits_dims,
hess_dims)
(summary_indices, summary_values,
summary_shape) = boosted_trees_ops.boosted_trees_sparse_aggregate_stats(
node_ids, gradients, hessians, feature_indices, feature_values,
feature_shape, max_splits, num_buckets)
dense_result = sparse_ops.sparse_to_dense(summary_indices, summary_shape,
summary_values)
self.assertAllClose(expected_dense_summary, dense_result)
def testMakeSparseStatsSummaryMultiClassAndMultiFeatureDim(self):
batch_size = 10
max_splits = 2
num_buckets = 2
feature_dim = 2
logits_dims = 2
hess_dims = 2
(node_ids, gradients, hessians, feature_indices, feature_values,
feature_shape,
expected_dense_summary) = self._get_dense_summaries_from_sparse_features(
max_splits, num_buckets, batch_size, feature_dim, logits_dims,
hess_dims)
(summary_indices, summary_values,
summary_shape) = boosted_trees_ops.boosted_trees_sparse_aggregate_stats(
node_ids, gradients, hessians, feature_indices, feature_values,
feature_shape, max_splits, num_buckets)
dense_result = sparse_ops.sparse_to_dense(summary_indices, summary_shape,
summary_values)
self.assertAllClose(expected_dense_summary, dense_result)
def _verify_precision(self, length):
with self.cached_session():
max_splits = 1
num_buckets = 1
node_ids = array_ops.fill([length], 0)
gradients = constant_op.constant(
2.0 / length, dtype=dtypes.float32, shape=[length, 1])
hessians = constant_op.constant(
0.2 / length, dtype=dtypes.float32, shape=[length, 1])
bucketized_features = array_ops.zeros([length], dtype=dtypes.int32)
result = boosted_trees_ops.make_stats_summary(
node_ids, gradients, hessians, [bucketized_features], max_splits,
num_buckets) # shape=[max_splits, num_buckets, num_features, 2]
self.assertAllClose([[[[2., 0.2]]]], self.evaluate(result))
def testMakeStatsSummaryNumericalPrecisionSmallBatch(self):
"""Tests numeric precision."""
self._verify_precision(length=2000)
def testMakeStatsSummaryNumericalPrecisionMediumBatch(self):
"""Tests numeric precision."""
self._verify_precision(length=100000)
def testMakeStatsSummaryNumericalPrecisionLargeBatch(self):
"""Tests numeric precision."""
self._verify_precision(length=1000000)
def testMakeStatsSummaryNumericalPrecisionMegaBatch(self):
"""Tests numeric precision."""
self._verify_precision(length=50000000)
class BestMultiDimFeatureSplitMultiClass(test_util.TensorFlowTestCase):
"""Tests multi-class/multi-regression for best splits."""
logits_dim = 2
def _get_stats_summary_for_split_diagonal_hessian(self):
summary = [
[[[0., 0., 0., 0.], [0.08, 0.2, 0.09, 0.3], [0., 0., 0., 0.],
[0., 0., 0., 0.]],
[[0., 0., 0., 0.], [0., 0., 0., 0.], [0.08, 0.2, 0.09, 0.3],
[0., 0., 0., 0.]]], # node 0
[[[0., 0., 0., 0.], [-0.25, -0.1, 0.36, 0.2], [-0.14, 0.25, 0.07, 0.18],
[0.1, 0.235, 0.2, 0.06]],
[[0., 0., 0., 0.], [-0.3, 0.12, 0.5, 0.31], [-0.05, 0.115, 0.11, 0.09],
[0.06, 0.15, 0.02, 0.04]]], # node 1
[[[0., 0., 0., 0.], [-0.03, 0.21, 0.28, 0.44], [0., 0., 0., 0.],
[0.3, 0.04, 0.4, 0.41]],
[[0.4, 0.188, 0.16, -0.03], [0.2, -0.088, 0.1, -0.24],
[-0.4, -0.06, 0.5, 0.15], [0.07, 0.21, -0.08, 0.97]]], # node 2
[[[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.],
[0., 0., 0., 0.]],
[[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.],
[0., 0., 0., 0.]]], # node 3
[[[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.],
[0., 0., 0., 0.]],
[[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.],
[0., 0., 0., 0.]]], # node 4
[[[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.],
[0., 0., 0., 0.]],
[[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.],
[0., 0., 0., 0.]]], # node 5
[[[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.],
[0., 0., 0., 0.]],
[[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.],
[0., 0., 0., 0.]]] # node 6
]
# [max_splits, num_features, num_buckets, 4]
return np.array(summary)
def _add_feature_dim(self, stats_summary):
"""Add dimension for features; number of features will be 1."""
return np.expand_dims(stats_summary, axis=1)
def testSumOfStatsSummaryValuesFromHelperFunction(self):
"""Sum of grads and hessians is correct from helper function."""
# [max_splits, num_features, num_buckets, 4]
stats_summary = self._get_stats_summary_for_split_diagonal_hessian()
# Test that sum of grads/hessians are same for both features for all nodes.
# [max_splits, num_features, 4]
agg = stats_summary.sum(axis=2) # Sum along buckets.
self.assertAllClose(agg[:, 0, :], agg[:, 1, :]) # There are two features.
# Test sum of hessians for each nodes. These values are used to evaluate if
# node meets min_node_weight criteria.
nodes_agg = agg[:, 0, :]
hessians = nodes_agg[:, self.logits_dim:]
def frobenius(x, **kwargs):
return np.sqrt(np.square(x).sum(**kwargs))
self.assertAllClose([0.3132092, 0.76843998, 1.08853112, 0., 0., 0., 0.],
frobenius(hessians, axis=1))
def testCalculateBestFeatureSplitsSingleClassVsMultiClass(self):
"""Testing same results using same grads/hess with both single and multi."""
node_id_range = [1, 3] # node 1 through 2 will be processed.
# Build same stats summary in single class and multi-class form (using
# diagonal hessian).
empty = [0] * 2
stats_summary = [
[empty, [.08, .09], empty], # node 0; ignored
[empty, [-0.25, 0.11], [0.1, 0.5]], # node 1
[empty, [0.14, 0.1], empty], # node 2
[empty, empty, empty], # node 3; ignored
]
# [max_splits, num_features, num_buckets, 2]
stats_summary = self._add_feature_dim(stats_summary)
diag_empty = [0] * 4
diag_stats_summary = [
[diag_empty, [0, .08, 0, 0.09], diag_empty], # node 0; ignored
[diag_empty, [0, -0.25, 0, 0.11], [0, 0.1, 0, 0.5]], # node 1
[diag_empty, [0, 0.14, 0, 0.1], diag_empty], # node 2
[diag_empty, diag_empty, diag_empty], # node 3; ignored
]
# [max_splits, num_features, num_buckets, 4]
diag_stats_summary = self._add_feature_dim(diag_stats_summary)
(node_ids, gains, feature_dimensions, thresholds, left_node_contribs,
right_node_contribs, split_types) = self.evaluate(
boosted_trees_ops.calculate_best_feature_split(
node_id_range,
stats_summary,
l1=0.0,
l2=0.0,
tree_complexity=0.0,
min_node_weight=0,
logits_dimension=1))
(diag_node_ids, diag_gains, diag_feature_dimensions, diag_thresholds,
diag_left_node_contribs, diag_right_node_contribs,
diag_split_types) = self.evaluate(
boosted_trees_ops.calculate_best_feature_split(
node_id_range,
diag_stats_summary,
l1=0.0,
l2=0.0,
tree_complexity=0.0,
min_node_weight=0,
logits_dimension=2))
self.assertAllEqual(node_ids, diag_node_ids)
self.assertAllClose(gains, diag_gains)
self.assertAllEqual(thresholds, diag_thresholds)
self.assertAllEqual(feature_dimensions, diag_feature_dimensions)
# The left node contrib will be later added to the previous node value to
# make the left node value, and the same for right node contrib.
zeros = np.zeros_like(left_node_contribs)
self.assertAllClose(
np.concatenate([zeros, left_node_contribs], axis=1),
diag_left_node_contribs)
self.assertAllClose(
np.concatenate([zeros, right_node_contribs], axis=1),
diag_right_node_contribs)
self.assertAllEqual(split_types, diag_split_types)
def testCalculateBestFeatureSplitsDiagonalVsFull(self):
"""Test results are same using diagonal hessian and full hessian."""
node_id_range = [1, 3] # node 1 through 2 will be processed.
# Build same stats summary in diagonal and full hessian form, respectively.
diag_empty = [0] * 4
diag_stats_summary = [
[diag_empty, [.08, .09, -.1, .2], diag_empty], # node 0; ignored
[diag_empty, [.15, .36, .21, -.11], [.06, .07, .67, 0.5]], # node 1
[diag_empty, [-.33, .58, -.2, -.31], diag_empty], # node 2
[diag_empty, diag_empty, diag_empty], # node 3; ignored
]
# [max_splits, num_features, num_buckets, 2*logits_dim]
diag_stats_summary = self._add_feature_dim(diag_stats_summary)
full_empty = [0] * 6
full_stats_summary = [
[full_empty, [.08, .09, -.1, 0, 0, .2], full_empty], # node 0; ignored
[full_empty, [.15, .36, .21, 0, 0, -.11], [.06, .07, .67, 0, 0,
0.5]], # node 1
[full_empty, [-.33, .58, -.2, 0, 0, -.31], full_empty], # node 2
[full_empty, full_empty, full_empty], # node 3; ignored
]
# [max_splits, num_features, num_buckets, logits_dim + logits_dim**2]
full_stats_summary = self._add_feature_dim(full_stats_summary)
(diag_node_ids, diag_gains, diag_feature_dimensions, diag_thresholds,
diag_left_node_contribs, diag_right_node_contribs,
diag_split_types) = self.evaluate(
boosted_trees_ops.calculate_best_feature_split(
node_id_range,
diag_stats_summary,
l1=0.0,
l2=0.0,
tree_complexity=0.0,
min_node_weight=0,
logits_dimension=self.logits_dim))
(full_node_ids, full_gains, full_feature_dimensions, full_thresholds,
full_left_node_contribs, full_right_node_contribs,
full_split_types) = self.evaluate(
boosted_trees_ops.calculate_best_feature_split(
node_id_range,
full_stats_summary,
l1=0.0,
l2=0.0,
tree_complexity=0.0,
min_node_weight=0,
logits_dimension=self.logits_dim))
self.assertAllEqual(diag_node_ids, full_node_ids)
self.assertAllClose(diag_gains, full_gains)
self.assertAllEqual(diag_thresholds, full_thresholds)
self.assertAllEqual(diag_feature_dimensions, full_feature_dimensions)
# The left node contrib will be later added to the previous node value to
# make the left node value, and the same for right node contrib.
self.assertAllClose(diag_left_node_contribs, full_left_node_contribs)
self.assertAllClose(diag_right_node_contribs, full_right_node_contribs)
self.assertAllEqual(diag_split_types, full_split_types)
def testCalculateBestFeatureSplitsWithoutRegularization(self):
"""Testing best split calculation without any regularization."""
node_id_range = [1, 3] # node 1 through 2 will be processed.
# [max_splits, num_features, num_buckets, 2*logits_dim]
stats_summary = self._get_stats_summary_for_split_diagonal_hessian()
(node_ids, gains, feature_dimensions, thresholds, left_node_contribs,
right_node_contribs, split_types) = self.evaluate(
boosted_trees_ops.calculate_best_feature_split(
node_id_range,
stats_summary,
l1=0.0,
l2=0.0,
tree_complexity=0.0,
min_node_weight=0,
logits_dimension=self.logits_dim))
self.assertAllEqual([1, 2], node_ids)
self.assertAllClose([0.912981, 1.446218], gains)
self.assertAllEqual([2, 1], thresholds)
self.assertAllEqual([0, 1], feature_dimensions)
# The left node contrib will be later added to the previous node value to
# make the left node value, and the same for right node contrib.
self.assertAllClose([[0.906977, -0.394737], [-2.307692, 0.370370]],
left_node_contribs)
self.assertAllClose([[-0.5, -3.916667], [0.785714, -0.133928]],
right_node_contribs)
self.assertAllEqual([_INEQUALITY_DEFAULT_LEFT] * 2, split_types)
def testCalculateBestFeatureSplitsWithL2(self):
"""Testing best split calculation inith L2 regularization."""
node_id_range = [1, 3] # node 1 through 2 will be processed.
# [max_splits, num_features, num_buckets, 2*logits_dim]
stats_summary = self._get_stats_summary_for_split_diagonal_hessian()
l2 = 0.1
(node_ids, gains, feature_dimensions, thresholds, left_node_contribs,
right_node_contribs, split_types) = self.evaluate(
boosted_trees_ops.calculate_best_feature_split(
node_id_range,
stats_summary,
l1=0.0,
l2=l2,
tree_complexity=0.0,
min_node_weight=0,
logits_dimension=self.logits_dim))
self.assertAllEqual([1, 2], node_ids)
self.assertAllClose([0.475669, 1.009791], gains)
self.assertAllEqual([1, 1], thresholds)
self.assertAllEqual([0, 1], feature_dimensions)
# The left node contrib will be later added to the previous node value to
# make the left node value, and the same for right node contrib.
self.assertAllClose([[0.543478, 0.333333], [-1.666667, 0.588235]],
left_node_contribs)
self.assertAllClose([[0.108108, -1.426471], [0.634615, -0.122951]],
right_node_contribs)
self.assertAllEqual([_INEQUALITY_DEFAULT_LEFT] * 2, split_types)
def testCalculateBestFeatureSplitsWithMinNodeWeight(self):
"""Testing best split calculation with min_node_weight."""
node_id_range = [1, 3] # node 1 through 2 will be processed.
# [max_splits, num_features, num_buckets, 2*logits_dim]
stats_summary = self._get_stats_summary_for_split_diagonal_hessian()
(node_ids, gains, feature_dimensions, thresholds, left_node_contribs,
right_node_contribs, split_types) = self.evaluate(
boosted_trees_ops.calculate_best_feature_split(
node_id_range,
stats_summary,
l1=0.0,
l2=0.0,
tree_complexity=0.0,
min_node_weight=0.5,
logits_dimension=self.logits_dim))
# Both nodes have large enough sum(hessians) so use them.
self.assertAllEqual([1, 2], node_ids)
self.assertAllClose([0.912981, 1.446218], gains)
self.assertAllEqual([2, 1], thresholds)
self.assertAllEqual([0, 1], feature_dimensions)
# The left node contrib will be later added to the previous node value to
# make the left node value, and the same for right node contrib.
self.assertAllClose([[0.906977, -0.394737], [-2.307692, 0.370370]],
left_node_contribs)
self.assertAllClose([[-0.5, -3.916667], [0.785714, -0.133928]],
right_node_contribs)
self.assertAllEqual([_INEQUALITY_DEFAULT_LEFT] * 2, split_types)
def testCalculateBestFeatureSplitsWithTreeComplexity(self):
"""Testing best split calculation with tree complexity."""
node_id_range = [1, 3] # node 1 through 2 will be processed.
# [max_splits, num_features, num_buckets, 2*logits_dim]
stats_summary = self._get_stats_summary_for_split_diagonal_hessian()
l2 = 0.1
tree_complexity = 3.
(node_ids, gains, feature_dimensions, thresholds, left_node_contribs,
right_node_contribs, split_types) = self.evaluate(
boosted_trees_ops.calculate_best_feature_split(
node_id_range,
stats_summary,
l1=0.0,
l2=l2,
tree_complexity=tree_complexity,
min_node_weight=0,
logits_dimension=self.logits_dim))
self.assertAllEqual([1, 2], node_ids)
self.assertAllEqual([1, 2], node_ids)
# L2 test result, but subtracted by tree_complexity.
self.assertAllClose(
[0.475669 - tree_complexity, 1.009791 - tree_complexity], gains)
self.assertAllEqual([1, 1], thresholds)
self.assertAllEqual([0, 1], feature_dimensions)
# The left node contrib will be later added to the previous node value to
# make the left node value, and the same for right node contrib.
self.assertAllClose([[0.543478, 0.333333], [-1.666667, 0.588235]],
left_node_contribs)
self.assertAllClose([[0.108108, -1.426471], [0.634615, -0.122951]],
right_node_contribs)
self.assertAllEqual([_INEQUALITY_DEFAULT_LEFT] * 2, split_types)
def testCalculateBestFeatureSplitsWithMinNodeNoSplitOnFeaturePossible(self):
"""Test when parent node hessian doesn't meet min node weight."""
node_id_range = [1, 3] # node 1 through 2 will be processed.
# [max_splits, num_features, num_buckets, 2*logits_dim]
stats_summary = self._get_stats_summary_for_split_diagonal_hessian()
min_node_weight = 0.8
(node_ids, gains, feature_dimensions, thresholds, left_node_contribs,
right_node_contribs, split_types) = self.evaluate(
boosted_trees_ops.calculate_best_feature_split(
node_id_range,
stats_summary,
l1=0.0,
l2=0.0,
tree_complexity=0.0,
min_node_weight=min_node_weight,
logits_dimension=self.logits_dim))
# node_1 doesn't have large enough sum(hessians) so don't return it.
self.assertAllEqual([2], node_ids)
self.assertAllClose([1.446218], gains)
self.assertAllEqual([1], thresholds)
self.assertAllEqual([1], feature_dimensions)
# The left node contrib will be later added to the previous node value to
# make the left node value, and the same for right node contrib.
self.assertAllClose([[-2.307692, 0.370370]], left_node_contribs)
self.assertAllClose([[0.785714, -0.133929]], right_node_contribs)
self.assertAllEqual([_INEQUALITY_DEFAULT_LEFT], split_types)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/boosted_trees/stats_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for boosted_trees training kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.core.kernels.boosted_trees import boosted_trees_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.ops import boosted_trees_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
class UpdateTreeEnsembleOpTest(test_util.TensorFlowTestCase):
"""Tests for growing tree ensemble from split candidates."""
@test_util.run_deprecated_v1
def testGrowWithEmptyEnsemble(self):
"""Test growing an empty ensemble."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble('ensemble')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_ids = [0, 2, 6]
# Prepare feature inputs.
# Note that features 1 & 3 have the same gain but different splits.
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([7.62], dtype=np.float32)
feature1_thresholds = np.array([52], dtype=np.int32)
feature1_left_node_contribs = np.array([[-4.375]], dtype=np.float32)
feature1_right_node_contribs = np.array([[7.143]], dtype=np.float32)
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([0.63], dtype=np.float32)
feature2_thresholds = np.array([23], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24]], dtype=np.float32)
# Feature split with the highest gain.
feature3_nodes = np.array([0], dtype=np.int32)
feature3_gains = np.array([7.65], dtype=np.float32)
feature3_thresholds = np.array([7], dtype=np.int32)
feature3_left_node_contribs = np.array([[-4.89]], dtype=np.float32)
feature3_right_node_contribs = np.array([[5.3]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
# Tree will be finalized now, since we will reach depth 1.
max_depth=1,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
])
session.run(grow_op)
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
# Note that since the tree is finalized, we added a new dummy tree.
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 6
threshold: 7
left_id: 1
right_id: 2
}
metadata {
gain: 7.65
}
}
nodes {
leaf {
scalar: -0.489
}
}
nodes {
leaf {
scalar: 0.53
}
}
}
trees {
nodes {
leaf {
scalar: 0.0
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testBiasCenteringOnEmptyEnsemble(self):
"""Test growing with bias centering on an empty ensemble."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble('ensemble')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
gradients = np.array([[5.]], dtype=np.float32)
hessians = np.array([[24.]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.center_bias(
tree_ensemble_handle,
mean_gradients=gradients,
mean_hessians=hessians,
l1=0.0,
l2=1.0
)
session.run(grow_op)
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
leaf {
scalar: -0.2
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 0
is_finalized: false
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testGrowExistingEnsembleTreeNotFinalized(self):
"""Test growing an existing ensemble with the last tree not finalized."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 0.714
}
}
nodes {
leaf {
scalar: -0.4375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
# feature 1 only has a candidate for node 1, feature 2 has candidates
# for both nodes and feature 3 only has a candidate for node 2.
feature_ids = [0, 1, 0]
feature1_nodes = np.array([1], dtype=np.int32)
feature1_gains = np.array([1.4], dtype=np.float32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
feature2_nodes = np.array([1, 2], dtype=np.int32)
feature2_gains = np.array([0.63, 2.7], dtype=np.float32)
feature2_thresholds = np.array([23, 7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6], [-1.5]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24], [2.3]], dtype=np.float32)
feature3_nodes = np.array([2], dtype=np.int32)
feature3_gains = np.array([1.7], dtype=np.float32)
feature3_thresholds = np.array([3], dtype=np.int32)
feature3_left_node_contribs = np.array([[-0.75]], dtype=np.float32)
feature3_right_node_contribs = np.array([[1.93]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
# tree is going to be finalized now, since we reach depth 2.
max_depth=2,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
])
session.run(grow_op)
# Expect the split for node 1 to be chosen from feature 1 and
# the split for node 2 to be chosen from feature 2.
# The grown tree should be finalized as max tree depth is 2 and we have
# grown 2 layers.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
bucketized_split {
threshold: 21
left_id: 3
right_id: 4
}
metadata {
gain: 1.4
original_leaf {
scalar: 0.714
}
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 7
left_id: 5
right_id: 6
}
metadata {
gain: 2.7
original_leaf {
scalar: -0.4375
}
}
}
nodes {
leaf {
scalar: 0.114
}
}
nodes {
leaf {
scalar: 0.879
}
}
nodes {
leaf {
scalar: -0.5875
}
}
nodes {
leaf {
scalar: -0.2075
}
}
}
trees {
nodes {
leaf {
scalar: 0.0
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
is_finalized: true
num_layers_grown: 2
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testGrowExistingEnsembleTreeFinalized(self):
"""Test growing an existing ensemble with the last tree finalized."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
trees {
nodes {
leaf {
scalar: 0.0
}
}
}
tree_weights: 0.15
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
feature_ids = [75]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([-1.4], dtype=np.float32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
learning_rate=0.1,
max_depth=2,
feature_ids=feature_ids,
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs])
session.run(grow_op)
# Expect a new tree added, with a split on feature 75
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 75
threshold: 21
left_id: 1
right_id: 2
}
metadata {
gain: -1.4
}
}
nodes {
leaf {
scalar: -0.6
}
}
nodes {
leaf {
scalar: 0.165
}
}
}
tree_weights: 0.15
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 2
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testPrePruning(self):
"""Test growing an existing ensemble with pre-pruning."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
tree_weights: 0.1
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
# For node 1, the best split is on feature 2 (gain -0.63), but the gain
# is negative so node 1 will not be split.
# For node 2, the best split is on feature 3, gain is positive.
feature_ids = [0, 1, 0]
feature1_nodes = np.array([1], dtype=np.int32)
feature1_gains = np.array([-1.4], dtype=np.float32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
feature2_nodes = np.array([1, 2], dtype=np.int32)
feature2_gains = np.array([-0.63, 2.7], dtype=np.float32)
feature2_thresholds = np.array([23, 7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6], [-1.5]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24], [2.3]], dtype=np.float32)
feature3_nodes = np.array([2], dtype=np.int32)
feature3_gains = np.array([2.8], dtype=np.float32)
feature3_thresholds = np.array([3], dtype=np.int32)
feature3_left_node_contribs = np.array([[-0.75]], dtype=np.float32)
feature3_right_node_contribs = np.array([[1.93]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.PRE_PRUNING,
max_depth=3,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
])
session.run(grow_op)
# Expect the split for node 1 to be chosen from feature 1 and
# the split for node 2 to be chosen from feature 2.
# The grown tree should not be finalized as max tree depth is 3 and
# it's only grown 2 layers.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 3
left_id: 3
right_id: 4
}
metadata {
gain: 2.8
original_leaf {
scalar: -4.375
}
}
}
nodes {
leaf {
scalar: -4.45
}
}
nodes {
leaf {
scalar: -4.182
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: false
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 3
last_layer_node_end: 5
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testMetadataWhenCantSplitDueToEmptySplits(self):
"""Test that the metadata is updated even though we can't split."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 0.714
}
}
nodes {
leaf {
scalar: -0.4375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 1
last_layer_node_end: 3
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
# feature 1 only has a candidate for node 1, feature 2 has candidates
# for both nodes and feature 3 only has a candidate for node 2.
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
max_depth=2,
# No splits are available.
feature_ids=[],
node_ids=[],
gains=[],
thresholds=[],
left_node_contribs=[],
right_node_contribs=[])
session.run(grow_op)
# Expect no new splits created, but attempted (global) stats updated. Meta
# data for this tree should not be updated (we didn't succeed building a
# layer. Node ranges don't change.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 0.714
}
}
nodes {
leaf {
scalar: -0.4375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testMetadataWhenCantSplitDuePrePruning(self):
"""Test metadata is updated correctly when no split due to prepruning."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 1
last_layer_node_end: 3
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
feature_ids = [0, 1, 0]
# All the gains are negative.
feature1_nodes = np.array([1], dtype=np.int32)
feature1_gains = np.array([-1.4], dtype=np.float32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
feature2_nodes = np.array([1, 2], dtype=np.int32)
feature2_gains = np.array([-0.63, -2.7], dtype=np.float32)
feature2_thresholds = np.array([23, 7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6], [-1.5]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24], [2.3]], dtype=np.float32)
feature3_nodes = np.array([2], dtype=np.int32)
feature3_gains = np.array([-2.8], dtype=np.float32)
feature3_thresholds = np.array([3], dtype=np.int32)
feature3_left_node_contribs = np.array([[-0.75]], dtype=np.float32)
feature3_right_node_contribs = np.array([[1.93]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.PRE_PRUNING,
max_depth=3,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
])
session.run(grow_op)
# Expect that no new split was created because all the gains were negative
# Global metadata should be updated, tree metadata should not be updated.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testPostPruningOfSomeNodes(self):
"""Test growing an ensemble with post-pruning."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare inputs.
# Second feature has larger (but still negative gain).
feature_ids = [0, 1]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([-1.3], dtype=np.float32)
feature1_thresholds = np.array([7], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.013]], dtype=np.float32)
feature1_right_node_contribs = np.array([[0.0143]], dtype=np.float32)
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([-0.2], dtype=np.float32)
feature2_thresholds = np.array([33], dtype=np.int32)
feature2_left_node_contribs = np.array([[0.01]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.0143]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=3,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes],
gains=[feature1_gains, feature2_gains],
thresholds=[feature1_thresholds, feature2_thresholds],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs
])
session.run(grow_op)
# Expect the split from second features to be chosen despite the negative
# gain.
# No pruning happened just yet.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
leaf {
scalar: 0.01
}
}
nodes {
leaf {
scalar: 0.0143
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, res_ensemble)
# Prepare the second layer.
# Note that node 1 gain is negative and node 2 gain is positive.
feature_ids = [3]
feature1_nodes = np.array([1, 2], dtype=np.int32)
feature1_gains = np.array([-0.2, 0.5], dtype=np.float32)
feature1_thresholds = np.array([7, 5], dtype=np.int32)
feature1_left_node_contribs = np.array(
[[0.07], [0.041]], dtype=np.float32)
feature1_right_node_contribs = np.array(
[[0.083], [0.064]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=3,
feature_ids=feature_ids,
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs])
session.run(grow_op)
# After adding this layer, the tree will not be finalized
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id:1
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
bucketized_split {
feature_id: 3
threshold: 7
left_id: 3
right_id: 4
}
metadata {
gain: -0.2
original_leaf {
scalar: 0.01
}
}
}
nodes {
bucketized_split {
feature_id: 3
threshold: 5
left_id: 5
right_id: 6
}
metadata {
gain: 0.5
original_leaf {
scalar: 0.0143
}
}
}
nodes {
leaf {
scalar: 0.08
}
}
nodes {
leaf {
scalar: 0.093
}
}
nodes {
leaf {
scalar: 0.0553
}
}
nodes {
leaf {
scalar: 0.0783
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 2
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 3
last_layer_node_end: 7
}
"""
self.assertEqual(new_stamp, 2)
self.assertProtoEquals(expected_result, res_ensemble)
# Now split the leaf 3, again with negative gain. After this layer, the
# tree will be finalized, and post-pruning happens. The leafs 3,4,7,8 will
# be pruned out.
# Prepare the third layer.
feature_ids = [92]
feature1_nodes = np.array([3], dtype=np.int32)
feature1_gains = np.array([-0.45], dtype=np.float32)
feature1_thresholds = np.array([11], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.15]], dtype=np.float32)
feature1_right_node_contribs = np.array([[0.5]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=3,
feature_ids=feature_ids,
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs])
session.run(grow_op)
# After adding this layer, the tree will be finalized
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
# Node that nodes 3, 4, 7 and 8 got deleted, so metadata stores has ids
# mapped to their parent node 1, with the respective change in logits.
expected_result = """
trees {
nodes {
bucketized_split {
feature_id:1
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
leaf {
scalar: 0.01
}
}
nodes {
bucketized_split {
feature_id: 3
threshold: 5
left_id: 3
right_id: 4
}
metadata {
gain: 0.5
original_leaf {
scalar: 0.0143
}
}
}
nodes {
leaf {
scalar: 0.0553
}
}
nodes {
leaf {
scalar: 0.0783
}
}
}
trees {
nodes {
leaf {
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 3
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 2
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.07
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.083
}
post_pruned_nodes_meta {
new_node_id: 3
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 4
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.22
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.57
}
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 3
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 3)
self.assertProtoEquals(expected_result, res_ensemble)
@test_util.run_deprecated_v1
def testPostPruningOfAllNodes(self):
"""Test growing an ensemble with post-pruning, with all nodes are pruned."""
with self.cached_session() as session:
# Create empty ensemble.
# Create empty ensemble.
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare inputs. All have negative gains.
feature_ids = [0, 1]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([-1.3], dtype=np.float32)
feature1_thresholds = np.array([7], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.013]], dtype=np.float32)
feature1_right_node_contribs = np.array([[0.0143]], dtype=np.float32)
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([-0.62], dtype=np.float32)
feature2_thresholds = np.array([33], dtype=np.int32)
feature2_left_node_contribs = np.array([[0.01]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.0143]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=2,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes],
gains=[feature1_gains, feature2_gains],
thresholds=[feature1_thresholds, feature2_thresholds],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs
])
session.run(grow_op)
# Expect the split from feature 2 to be chosen despite the negative gain.
# The grown tree should not be finalized as max tree depth is 2 so no
# pruning occurs.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.62
}
}
nodes {
leaf {
scalar: 0.01
}
}
nodes {
leaf {
scalar: 0.0143
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, res_ensemble)
# Prepare inputs.
# All have negative gain.
feature_ids = [3]
feature1_nodes = np.array([1, 2], dtype=np.int32)
feature1_gains = np.array([-0.2, -0.5], dtype=np.float32)
feature1_thresholds = np.array([77, 79], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.023], [0.3]], dtype=np.float32)
feature1_right_node_contribs = np.array(
[[0.012343], [24]], dtype=np.float32)
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=2,
feature_ids=feature_ids,
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs])
session.run(grow_op)
# Expect the split from feature 1 to be chosen despite the negative gain.
# The grown tree should be finalized. Since all nodes have negative gain,
# the whole tree is pruned.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
# Expect the ensemble to be empty as post-pruning will prune
# the entire finalized tree.
self.assertEqual(new_stamp, 2)
self.assertProtoEquals(
"""
trees {
nodes {
leaf {
}
}
}
trees {
nodes {
leaf {
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata{
num_layers_grown: 2
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.01
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.0143
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.033
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.022343
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.3143
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -24.014299
}
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 0
last_layer_node_end: 1
}
""", res_ensemble)
@test_util.run_deprecated_v1
def testPostPruningChangesNothing(self):
"""Test growing an ensemble with post-pruning with all gains >0."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare inputs.
# Second feature has larger (but still negative gain).
feature_ids = [3, 4]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([7.62], dtype=np.float32)
feature1_thresholds = np.array([52], dtype=np.int32)
feature1_left_node_contribs = np.array([[-4.375]], dtype=np.float32)
feature1_right_node_contribs = np.array([[7.143]], dtype=np.float32)
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([0.63], dtype=np.float32)
feature2_thresholds = np.array([23], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=1,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes],
gains=[feature1_gains, feature2_gains],
thresholds=[feature1_thresholds, feature2_thresholds],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs
])
session.run(grow_op)
# Expect the split from the first feature to be chosen.
# Pruning got triggered but changed nothing.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 3
threshold: 52
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: -4.375
}
}
nodes {
leaf {
scalar: 7.143
}
}
}
trees {
nodes {
leaf {
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, res_ensemble)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/boosted_trees/training_ops_test.py
|
tensorflow-master
|
tensorflow/python/kernel_tests/boosted_trees/__init__.py
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking quantile related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import boosted_trees_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_handle_op as resource_handle_op
from tensorflow.python.ops.gen_boosted_trees_ops import is_boosted_trees_quantile_stream_resource_initialized as resource_initialized
from tensorflow.python.platform import googletest
from tensorflow.python.training import saver
@test_util.run_deprecated_v1
class QuantileOpsTest(test_util.TensorFlowTestCase):
def create_resource(self, name, eps, max_elements, num_streams=1):
quantile_accumulator_handle = resource_handle_op(
container="", shared_name=name, name=name)
create_op = boosted_trees_ops.create_quantile_stream_resource(
quantile_accumulator_handle,
epsilon=eps,
max_elements=max_elements,
num_streams=num_streams)
is_initialized_op = resource_initialized(quantile_accumulator_handle)
resources.register_resource(quantile_accumulator_handle, create_op,
is_initialized_op)
return quantile_accumulator_handle
def setUp(self):
"""Sets up the quantile ops test as follows.
Create a batch of 6 examples having 2 features
The data looks like this
| Instance | instance weights | Feature 0 | Feature 1
| 0 | 10 | 1.2 | 2.3
| 1 | 1 | 12.1 | 1.2
| 2 | 1 | 0.3 | 1.1
| 3 | 1 | 0.5 | 2.6
| 4 | 1 | 0.6 | 3.2
| 5 | 1 | 2.2 | 0.8
"""
self._feature_0 = constant_op.constant([1.2, 12.1, 0.3, 0.5, 0.6, 2.2],
dtype=dtypes.float32)
self._feature_1 = constant_op.constant([2.3, 1.2, 1.1, 2.6, 3.2, 0.8],
dtype=dtypes.float32)
self._feature_0_boundaries = np.array([0.3, 0.6, 1.2, 12.1])
self._feature_1_boundaries = np.array([0.8, 1.2, 2.3, 3.2])
self._feature_0_quantiles = constant_op.constant([2, 3, 0, 1, 1, 3],
dtype=dtypes.int32)
self._feature_1_quantiles = constant_op.constant([2, 1, 1, 3, 3, 0],
dtype=dtypes.int32)
self._example_weights = constant_op.constant(
[10, 1, 1, 1, 1, 1], dtype=dtypes.float32)
self.eps = 0.01
self.max_elements = 1 << 16
self.num_quantiles = constant_op.constant(3, dtype=dtypes.int64)
def testBasicQuantileBucketsSingleResource(self):
with self.cached_session() as sess:
quantile_accumulator_handle = self.create_resource("floats", self.eps,
self.max_elements, 2)
resources.initialize_resources(resources.shared_resources()).run()
summaries = boosted_trees_ops.make_quantile_summaries(
[self._feature_0, self._feature_1], self._example_weights,
epsilon=self.eps)
summary_op = boosted_trees_ops.quantile_add_summaries(
quantile_accumulator_handle, summaries)
flush_op = boosted_trees_ops.quantile_flush(
quantile_accumulator_handle, self.num_quantiles)
buckets = boosted_trees_ops.get_bucket_boundaries(
quantile_accumulator_handle, num_features=2)
quantiles = boosted_trees_ops.boosted_trees_bucketize(
[self._feature_0, self._feature_1], buckets)
self.evaluate(summary_op)
self.evaluate(flush_op)
self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())
self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())
self.assertAllClose(self._feature_0_quantiles, quantiles[0].eval())
self.assertAllClose(self._feature_1_quantiles, quantiles[1].eval())
def testBasicQuantileBucketsMultipleResources(self):
with self.cached_session() as sess:
quantile_accumulator_handle_0 = self.create_resource("float_0", self.eps,
self.max_elements)
quantile_accumulator_handle_1 = self.create_resource("float_1", self.eps,
self.max_elements)
resources.initialize_resources(resources.shared_resources()).run()
summaries = boosted_trees_ops.make_quantile_summaries(
[self._feature_0, self._feature_1], self._example_weights,
epsilon=self.eps)
summary_op_0 = boosted_trees_ops.quantile_add_summaries(
quantile_accumulator_handle_0,
[summaries[0]])
summary_op_1 = boosted_trees_ops.quantile_add_summaries(
quantile_accumulator_handle_1,
[summaries[1]])
flush_op_0 = boosted_trees_ops.quantile_flush(
quantile_accumulator_handle_0, self.num_quantiles)
flush_op_1 = boosted_trees_ops.quantile_flush(
quantile_accumulator_handle_1, self.num_quantiles)
bucket_0 = boosted_trees_ops.get_bucket_boundaries(
quantile_accumulator_handle_0, num_features=1)
bucket_1 = boosted_trees_ops.get_bucket_boundaries(
quantile_accumulator_handle_1, num_features=1)
quantiles = boosted_trees_ops.boosted_trees_bucketize(
[self._feature_0, self._feature_1], bucket_0 + bucket_1)
self.evaluate([summary_op_0, summary_op_1])
self.evaluate([flush_op_0, flush_op_1])
self.assertAllClose(self._feature_0_boundaries, bucket_0[0].eval())
self.assertAllClose(self._feature_1_boundaries, bucket_1[0].eval())
self.assertAllClose(self._feature_0_quantiles, quantiles[0].eval())
self.assertAllClose(self._feature_1_quantiles, quantiles[1].eval())
def testSaveRestoreAfterFlush(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.cached_session() as sess:
accumulator = boosted_trees_ops.QuantileAccumulator(
num_streams=2, num_quantiles=3, epsilon=self.eps, name="q0")
save = saver.Saver()
resources.initialize_resources(resources.shared_resources()).run()
buckets = accumulator.get_bucket_boundaries()
self.assertAllClose([], buckets[0].eval())
self.assertAllClose([], buckets[1].eval())
summaries = accumulator.add_summaries([self._feature_0, self._feature_1],
self._example_weights)
with ops.control_dependencies([summaries]):
flush = accumulator.flush()
self.evaluate(flush)
self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())
self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())
save.save(sess, save_path)
with self.session(graph=ops.Graph()) as sess:
accumulator = boosted_trees_ops.QuantileAccumulator(
num_streams=2, num_quantiles=3, epsilon=self.eps, name="q0")
save = saver.Saver()
save.restore(sess, save_path)
buckets = accumulator.get_bucket_boundaries()
self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())
self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())
def testSaveRestoreBeforeFlush(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.cached_session() as sess:
accumulator = boosted_trees_ops.QuantileAccumulator(
num_streams=2, num_quantiles=3, epsilon=self.eps, name="q0")
save = saver.Saver()
resources.initialize_resources(resources.shared_resources()).run()
summaries = accumulator.add_summaries([self._feature_0, self._feature_1],
self._example_weights)
self.evaluate(summaries)
buckets = accumulator.get_bucket_boundaries()
self.assertAllClose([], buckets[0].eval())
self.assertAllClose([], buckets[1].eval())
save.save(sess, save_path)
self.evaluate(accumulator.flush())
self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())
self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())
with self.session(graph=ops.Graph()) as sess:
accumulator = boosted_trees_ops.QuantileAccumulator(
num_streams=2, num_quantiles=3, epsilon=self.eps, name="q0")
save = saver.Saver()
save.restore(sess, save_path)
buckets = accumulator.get_bucket_boundaries()
self.assertAllClose([], buckets[0].eval())
self.assertAllClose([], buckets[1].eval())
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/boosted_trees/quantile_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests boosted_trees prediction kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.core.kernels.boosted_trees import boosted_trees_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.ops import boosted_trees_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
class TrainingPredictionOpsTest(test_util.TensorFlowTestCase):
"""Tests prediction ops for training."""
@test_util.run_deprecated_v1
def testCachedPredictionOnEmptyEnsemble(self):
"""Tests that prediction on a dummy ensemble does not fail."""
with self.cached_session() as session:
# Create a dummy ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto='')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# No previous cached values.
cached_tree_ids = [0, 0]
cached_node_ids = [0, 0]
# We have two features: 0 and 1. Values don't matter here on a dummy
# ensemble.
feature_0_values = [67, 5]
feature_1_values = [9, 17]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# Nothing changed.
self.assertAllClose(cached_tree_ids, new_tree_ids)
self.assertAllClose(cached_node_ids, new_node_ids)
self.assertAllClose([[0], [0]], logits_updates)
@test_util.run_deprecated_v1
def testCachedPredictionOnEmptyEnsembleMultiClass(self):
"""Tests that prediction on dummy ensemble does not fail for multi class."""
with self.cached_session() as session:
# Create a dummy ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto='')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# No previous cached values.
cached_tree_ids = [0, 0]
cached_node_ids = [0, 0]
# We have two features: 0 and 1. Values don't matter here on a dummy
# ensemble.
feature_0_values = [67, 5]
feature_1_values = [9, 17]
# Multi class.
logits_dimension = 2
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=logits_dimension)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# Nothing changed.
self.assertAllClose(cached_tree_ids, new_tree_ids)
self.assertAllClose(cached_node_ids, new_node_ids)
self.assertAllClose([[0, 0], [0, 0]], logits_updates)
@test_util.run_deprecated_v1
def testNoCachedPredictionButTreeExists(self):
"""Tests that predictions are updated once trees are added."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 0
threshold: 15
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: true
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, none were cached before.
cached_tree_ids = [0, 0]
cached_node_ids = [0, 0]
feature_0_values = [67, 5]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are in the first tree.
self.assertAllClose([0, 0], new_tree_ids)
self.assertAllClose([2, 1], new_node_ids)
self.assertAllClose([[0.1 * 8.79], [0.1 * 1.14]], logits_updates)
@test_util.run_deprecated_v1
def testNoCachedPredictionButTreeExistsMultiClass(self):
"""Tests predictions are updated once trees are added for multi class."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 0
threshold: 15
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
vector: {
value: 1.14
}
}
}
nodes {
leaf {
vector: {
value: 8.79
}
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: true
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, none were cached before.
cached_tree_ids = [0, 0]
cached_node_ids = [0, 0]
feature_0_values = [67, 5]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are in the first tree.
self.assertAllClose([0, 0], new_tree_ids)
self.assertAllClose([2, 1], new_node_ids)
expected_logit_updates = 0.1 * np.array([[8.79], [1.14]])
self.assertAllClose(expected_logit_updates, logits_updates)
@test_util.run_deprecated_v1
def testCachedPredictionIsCurrent(self):
"""Tests that prediction based on previous node in the tree works."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 15
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
original_leaf {
scalar: -2
}
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: true
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, one was cached in node 1 first, another in node 2.
cached_tree_ids = [0, 0]
cached_node_ids = [1, 2]
# We have two features: 0 and 1. Values don't matter because trees didn't
# change.
feature_0_values = [67, 5]
feature_1_values = [9, 17]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# Nothing changed.
self.assertAllClose(cached_tree_ids, new_tree_ids)
self.assertAllClose(cached_node_ids, new_node_ids)
self.assertAllClose([[0], [0]], logits_updates)
@test_util.run_deprecated_v1
def testCachedPredictionIsCurrentMultiClass(self):
"""Tests that cached prediction is current for multi class."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 15
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
original_leaf {
vector: {
value: -2
}
vector: {
value: 1.2
}
}
}
}
nodes {
leaf {
vector: {
value: 1.14
}
vector: {
value: -0.5
}
}
}
nodes {
leaf {
vector: {
value: 8.79
}
vector: {
value: 0.2
}
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: true
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, one was cached in node 1 first, another in node 2.
cached_tree_ids = [0, 0]
cached_node_ids = [1, 2]
# We have two features: 0 and 1. Values don't matter because trees didn't
# change.
feature_0_values = [67, 5]
feature_1_values = [9, 17]
# Multi-class.
logits_dimension = 2
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=logits_dimension)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# Nothing changed.
self.assertAllClose(cached_tree_ids, new_tree_ids)
self.assertAllClose(cached_node_ids, new_node_ids)
self.assertAllClose([[0, 0], [0, 0]], logits_updates)
@test_util.run_deprecated_v1
def testCachedPredictionFromTheSameTree(self):
"""Tests that prediction based on previous node in the tree works."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 15
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
original_leaf {
scalar: -2
}
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 7
left_id: 3
right_id: 4
}
metadata {
gain: 1.4
original_leaf {
scalar: 7.14
}
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 7
left_id: 5
right_id: 6
}
metadata {
gain: 2.7
original_leaf {
scalar: -4.375
}
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
nodes {
leaf {
scalar: -5.875
}
}
nodes {
leaf {
scalar: -2.075
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: true
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, one was cached in node 1 first, another in node 0.
cached_tree_ids = [0, 0]
cached_node_ids = [1, 0]
# We have two features: 0 and 1.
feature_0_values = [67, 5]
feature_1_values = [9, 17]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are still in the same tree.
self.assertAllClose([0, 0], new_tree_ids)
# When using the full tree, the first example will end up in node 4,
# the second in node 5.
self.assertAllClose([4, 5], new_node_ids)
# Full predictions for each instance would be 8.79 and -5.875,
# so an update from the previous cached values lr*(7.14 and -2) would be
# 1.65 and -3.875, and then multiply them by 0.1 (lr)
self.assertAllClose([[0.1 * 1.65], [0.1 * -3.875]], logits_updates)
@test_util.run_deprecated_v1
def testCachedPredictionFromTheSameTreeMultiClass(self):
"""Tests that cache prediction works within a tree for multi-class."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 15
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
original_leaf {
vector: {
value: -2
}
vector: {
value: 1.2
}
}
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 7
left_id: 3
right_id: 4
}
metadata {
gain: 1.4
original_leaf {
vector: {
value: 7.14
}
vector: {
value: -3.2
}
}
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 7
left_id: 5
right_id: 6
}
metadata {
gain: 2.7
original_leaf {
vector: {
value: -4.375
}
vector: {
value: 0.9
}
}
}
}
nodes {
leaf {
vector: {
value: 1.14
}
vector: {
value: 0.27
}
}
}
nodes {
leaf {
vector: {
value: 8.79
}
vector: {
value: -3.4
}
}
}
nodes {
leaf {
vector: {
value: -5.875
}
vector: {
value: 1.61
}
}
}
nodes {
leaf {
vector: {
value: -2.075
}
vector: {
value: 3.48
}
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: true
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, one was cached in node 1 first, another in node 0.
cached_tree_ids = [0, 0]
cached_node_ids = [1, 0]
# We have two features: 0 and 1.
feature_0_values = [67, 5]
feature_1_values = [9, 17]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=2)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are still in the same tree.
self.assertAllClose([0, 0], new_tree_ids)
# When using the full tree, the first example will end up in node 4,
# the second in node 5.
self.assertAllClose([4, 5], new_node_ids)
# Full predictions for example 1: [8.79, -3.4], example 2: [-5.875, 1.61].
# So an update from the previous cached values lr*([7.14, -3.2] and [-2,
# 1.2]) would be [1.65, -0.2] for example1 and [-3.875, 0.41] for
# example2; and then multiply them by 0.1 (lr).
self.assertAllClose(0.1 * np.array([[1.65, -0.2], [-3.875, 0.41]]),
logits_updates)
@test_util.run_deprecated_v1
def testCachedPredictionFromPreviousTree(self):
"""Tests the predictions work when we have cache from previous trees."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 28
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 26
left_id: 1
right_id: 2
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 50
left_id: 3
right_id: 4
}
}
nodes {
leaf {
scalar: 7
}
}
nodes {
leaf {
scalar: 5
}
}
nodes {
leaf {
scalar: 6
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 0
threshold: 34
left_id: 1
right_id: 2
}
}
nodes {
leaf {
scalar: -7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
}
tree_metadata {
is_finalized: true
}
tree_metadata {
is_finalized: true
}
tree_metadata {
is_finalized: false
}
tree_weights: 0.1
tree_weights: 0.1
tree_weights: 0.1
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, one was cached in node 1 first, another in node 0.
cached_tree_ids = [0, 0]
cached_node_ids = [1, 0]
# We have two features: 0 and 1.
feature_0_values = [36, 32]
feature_1_values = [11, 27]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# Example 1 will get to node 3 in tree 1 and node 2 of tree 2
# Example 2 will get to node 2 in tree 1 and node 1 of tree 2
# We are in the last tree.
self.assertAllClose([2, 2], new_tree_ids)
# When using the full tree, the first example will end up in node 4,
# the second in node 5.
self.assertAllClose([2, 1], new_node_ids)
# Example 1: tree 0: 8.79, tree 1: 5.0, tree 2: 5.0 = >
# change = 0.1*(5.0+5.0)
# Example 2: tree 0: 1.14, tree 1: 7.0, tree 2: -7 = >
# change= 0.1(1.14+7.0-7.0)
self.assertAllClose([[1], [0.114]], logits_updates)
@test_util.run_deprecated_v1
def testCachedPredictionFromPreviousTreeMultiClass(self):
"""Tests predictions when we have cache from previous trees multi-class."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 28
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
original_leaf {
vector: {
value: 0
}
vector: {
value: 0
}
}
}
}
nodes {
leaf {
vector: {
value: 1.14
}
vector: {
value: 0.27
}
}
}
nodes {
leaf {
vector: {
value: 8.79
}
vector: {
value: -3.4
}
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 26
left_id: 1
right_id: 2
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 50
left_id: 3
right_id: 4
}
}
nodes {
leaf {
vector: {
value: 7
}
vector: {
value: 1.12
}
}
}
nodes {
leaf {
vector: {
value: 5
}
vector: {
value: -0.4
}
}
}
nodes {
leaf {
vector: {
value: 6
}
vector: {
value: 1.4
}
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 0
threshold: 34
left_id: 1
right_id: 2
}
}
nodes {
leaf {
vector: {
value: -7
}
vector: {
value: 3.4
}
}
}
nodes {
leaf {
vector: {
value: 5.0
}
vector: {
value: 1.24
}
}
}
}
tree_metadata {
is_finalized: true
}
tree_metadata {
is_finalized: true
}
tree_metadata {
is_finalized: false
}
tree_weights: 0.1
tree_weights: 0.1
tree_weights: 0.1
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, one was cached in node 1 first, another in node 0.
cached_tree_ids = [0, 0]
cached_node_ids = [1, 0]
# We have two features: 0 and 1.
feature_0_values = [36, 32]
feature_1_values = [11, 27]
# Multi-class.
logits_dimension = 2
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=logits_dimension)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# Example 1 will get to node 3 in tree 1 and node 2 of tree 2
# Example 2 will get to node 2 in tree 1 and node 1 of tree 2
# We are in the last tree.
self.assertAllClose([2, 2], new_tree_ids)
self.assertAllClose([2, 1], new_node_ids)
# Example 1 was cached at tree 0, node 1.
# Example 1: tree 0: [8.79, -3.4], tree 1: [5, -0.4], tree 2: [5, 1.24]
# change = 0.1*(5.0+5.0, -0.4+1.24)
# Example 2 was cached at tree 0, node 0.
# Example 2: tree 0: [1.14, 0.27], tree 1: [7.0, 1.12], tree 2: [-7, 3.4]
# change= 0.1(1.14+7.0-7.0, 0.27+1.12+3.4)
self.assertAllClose(0.1 * np.array([[10, 0.84], [1.14, 4.79]]),
logits_updates)
@test_util.run_deprecated_v1
def testCategoricalSplits(self):
"""Tests the training prediction work for categorical splits."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
categorical_split {
feature_id: 1
value: 2
left_id: 1
right_id: 2
}
}
nodes {
categorical_split {
feature_id: 0
value: 13
left_id: 3
right_id: 4
}
}
nodes {
leaf {
scalar: 7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
nodes {
leaf {
scalar: 6.0
}
}
}
tree_weights: 1.0
tree_metadata {
is_finalized: true
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_0_values = [13, 1, 3]
feature_1_values = [2, 2, 1]
# No previous cached values.
cached_tree_ids = [0, 0, 0]
cached_node_ids = [0, 0, 0]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
self.assertAllClose([0, 0, 0], new_tree_ids)
self.assertAllClose([3, 4, 2], new_node_ids)
self.assertAllClose([[5.], [6.], [7.]], logits_updates)
@test_util.run_deprecated_v1
def testCategoricalSplitsMultiClass(self):
"""Tests the training prediction work for categorical splits."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
categorical_split {
feature_id: 1
value: 2
left_id: 1
right_id: 2
}
}
nodes {
categorical_split {
feature_id: 0
value: 13
left_id: 3
right_id: 4
}
}
nodes {
leaf {
vector: {
value: 7
}
vector: {
value: 1.12
}
}
}
nodes {
leaf {
vector: {
value: 5
}
vector: {
value: 1.24
}
}
}
nodes {
leaf {
vector: {
value: 6
}
vector: {
value: 1.4
}
}
}
}
tree_weights: 1.0
tree_metadata {
is_finalized: true
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_0_values = [13, 1, 3]
feature_1_values = [2, 2, 1]
# No previous cached values.
cached_tree_ids = [0, 0, 0]
cached_node_ids = [0, 0, 0]
# Multi-class.
logits_dimension = 2
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=logits_dimension)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
self.assertAllClose([0, 0, 0], new_tree_ids)
self.assertAllClose([3, 4, 2], new_node_ids)
self.assertAllClose([[5., 1.24], [6., 1.4], [7., 1.12]], logits_updates)
@test_util.run_deprecated_v1
def testCachedPredictionFromTheSameTreeWithPostPrunedNodes(self):
"""Tests that prediction based on previous node in the tree works."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id:0
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
leaf {
scalar: 0.01
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 5
left_id: 3
right_id: 4
}
metadata {
gain: 0.5
original_leaf {
scalar: 0.0143
}
}
}
nodes {
leaf {
scalar: 0.0553
}
}
nodes {
leaf {
scalar: 0.0783
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 3
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 2
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.07
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.083
}
post_pruned_nodes_meta {
new_node_id: 3
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 4
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.22
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.57
}
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 3
}
""", tree_ensemble_config)
# Create existing ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
cached_tree_ids = [0, 0, 0, 0, 0, 0]
# Leaves 3,4, 7 and 8 got deleted during post-pruning, leaves 5 and 6
# changed the ids to 3 and 4 respectively.
cached_node_ids = [3, 4, 5, 6, 7, 8]
# We have two features: 0 and 1.
feature_0_values = [12, 17, 35, 36, 23, 11]
feature_1_values = [12, 12, 17, 18, 123, 24]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are still in the same tree.
self.assertAllClose([0, 0, 0, 0, 0, 0], new_tree_ids)
# Examples from leaves 3,4,7,8 should be in leaf 1, examples from leaf 5
# and 6 in leaf 3 and 4.
self.assertAllClose([1, 1, 3, 4, 1, 1], new_node_ids)
cached_values = [[0.08], [0.093], [0.0553], [0.0783], [0.15 + 0.08],
[0.5 + 0.08]]
self.assertAllClose([[0.01], [0.01], [0.0553], [0.0783], [0.01], [0.01]],
logits_updates + cached_values)
@test_util.run_deprecated_v1
def testCachedPredictionFromTheSameTreeWithPostPrunedNodesMultiClass(self):
"""Tests that prediction based on previous node in tree works multiclass."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id:0
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
leaf {
vector: {
value: 0.01
}
vector: {
value: 0.032
}
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 5
left_id: 3
right_id: 4
}
metadata {
gain: 0.5
original_leaf {
vector: {
value: 0.0143
}
vector: {
value: 0.022
}
}
}
}
nodes {
leaf {
vector: {
value: 0.0553
}
vector: {
value: -0.02
}
}
}
nodes {
leaf {
vector: {
value: 0.0783
}
vector: {
value: 0.012
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 3
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 2
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.07
logit_change: -0.02
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.083
logit_change: -0.42
}
post_pruned_nodes_meta {
new_node_id: 3
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 4
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.22
logit_change: -0.05
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.57
logit_change: -0.11
}
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 3
}
""", tree_ensemble_config)
# Create existing ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
cached_tree_ids = [0, 0, 0, 0, 0, 0]
# Leaves 3,4, 7 and 8 got deleted during post-pruning, leaves 5 and 6
# changed the ids to 3 and 4 respectively.
cached_node_ids = [3, 4, 5, 6, 7, 8]
# We have two features: 0 and 1.
feature_0_values = [12, 17, 35, 36, 23, 11]
feature_1_values = [12, 12, 17, 18, 123, 24]
# Multi-class.
logits_dimension = 2
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=logits_dimension)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are still in the same tree.
self.assertAllClose([0, 0, 0, 0, 0, 0], new_tree_ids)
# Examples from leaves 3,4,7,8 should be in leaf 1, examples from leaf 5
# and 6 in leaf 3 and 4.
self.assertAllClose([1, 1, 3, 4, 1, 1], new_node_ids)
cached_values = np.array([[0.01 + 0.07, 0.032 + 0.02],
[0.01 + 0.083, 0.032 + 0.42], [0.0553, -0.02],
[0.0783, 0.012],
[0.08 + (-0.07 + 0.22), 0.052 + (-0.02 + 0.05)],
[0.08 + (-0.07 + 0.57),
0.052 + (-0.02 + 0.11)]])
self.assertAllClose([[0.01, 0.032], [0.01, 0.032], [0.0553, -0.02],
[0.0783, 0.012], [0.01, 0.032], [0.01, 0.032]],
np.array(logits_updates) + cached_values)
@test_util.run_deprecated_v1
def testCachedPredictionFromThePreviousTreeWithPostPrunedNodes(self):
"""Tests that prediction based on previous node in the tree works."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id:0
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
leaf {
scalar: 0.01
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 5
left_id: 3
right_id: 4
}
metadata {
gain: 0.5
original_leaf {
scalar: 0.0143
}
}
}
nodes {
leaf {
scalar: 0.0553
}
}
nodes {
leaf {
scalar: 0.0783
}
}
}
trees {
nodes {
leaf {
scalar: 0.55
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 3
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 2
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.07
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.083
}
post_pruned_nodes_meta {
new_node_id: 3
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 4
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.22
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.57
}
}
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 4
}
""", tree_ensemble_config)
# Create existing ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
cached_tree_ids = [0, 0, 0, 0, 0, 0]
# Leaves 3,4, 7 and 8 got deleted during post-pruning, leaves 5 and 6
# changed the ids to 3 and 4 respectively.
cached_node_ids = [3, 4, 5, 6, 7, 8]
# We have two features: 0 and 1.
feature_0_values = [12, 17, 35, 36, 23, 11]
feature_1_values = [12, 12, 17, 18, 123, 24]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are in the last tree.
self.assertAllClose([1, 1, 1, 1, 1, 1], new_tree_ids)
# Examples from leaves 3,4,7,8 should be in leaf 1, examples from leaf 5
# and 6 in leaf 3 and 4 in tree 0. For tree 1, all of the examples are in
# the root node.
self.assertAllClose([0, 0, 0, 0, 0, 0], new_node_ids)
cached_values = [[0.08], [0.093], [0.0553], [0.0783], [0.15 + 0.08],
[0.5 + 0.08]]
root = 0.55
self.assertAllClose([[root + 0.01], [root + 0.01], [root + 0.0553],
[root + 0.0783], [root + 0.01], [root + 0.01]],
logits_updates + cached_values)
@test_util.run_deprecated_v1
def testCachedPredictionFromThePreviousTreeWithPostPrunedNodesMultiClass(
self):
"""Tests that prediction from pruned previous tree works multi class."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id:0
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
leaf {
vector: {
value: 0.01
}
vector: {
value: 0.032
}
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 5
left_id: 3
right_id: 4
}
metadata {
gain: 0.5
original_leaf {
vector: {
value: 0.0143
}
vector: {
value: 0.022
}
}
}
}
nodes {
leaf {
vector: {
value: 0.0553
}
vector: {
value: -0.02
}
}
}
nodes {
leaf {
vector: {
value: 0.0783
}
vector: {
value: 0.012
}
}
}
}
trees {
nodes {
leaf {
vector: {
value: 0.55
}
vector: {
value: 0.03
}
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 3
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 2
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.07
logit_change: -0.02
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.083
logit_change: -0.42
}
post_pruned_nodes_meta {
new_node_id: 3
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 4
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.22
logit_change: -0.05
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.57
logit_change: -0.11
}
}
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 4
}
""", tree_ensemble_config)
# Create existing ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
cached_tree_ids = [0, 0, 0, 0, 0, 0]
# Leaves 3,4, 7 and 8 got deleted during post-pruning, leaves 5 and 6
# changed the ids to 3 and 4 respectively.
cached_node_ids = [3, 4, 5, 6, 7, 8]
# We have two features: 0 and 1.
feature_0_values = [12, 17, 35, 36, 23, 11]
feature_1_values = [12, 12, 17, 18, 123, 24]
# Multi class.
logits_dimension = 2
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=logits_dimension)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are in the last tree.
self.assertAllClose([1, 1, 1, 1, 1, 1], new_tree_ids)
# Examples from leaves 3,4,7,8 should be in leaf 1, examples from leaf 5
# and 6 in leaf 3 and 4 in tree 0. For tree 1, all of the examples are in
# the root node.
self.assertAllClose([0, 0, 0, 0, 0, 0], new_node_ids)
tree1_logits = np.array([[0.01, 0.032], [0.01, 0.032], [0.0553, -0.02],
[0.0783, 0.012], [0.01, 0.032], [0.01, 0.032]])
tree2_root_weights = [0.55, 0.03]
expected_logits = tree1_logits
expected_logits[:, 0] += tree2_root_weights[0]
expected_logits[:, 1] += tree2_root_weights[1]
cached_values = np.array([[0.01 + 0.07, 0.032 + 0.02],
[0.01 + 0.083, 0.032 + 0.42], [0.0553, -0.02],
[0.0783, 0.012],
[0.08 + (-0.07 + 0.22), 0.052 + (-0.02 + 0.05)],
[0.08 + (-0.07 + 0.57),
0.052 + (-0.02 + 0.11)]])
self.assertAllClose(expected_logits,
np.array(logits_updates) + cached_values)
@test_util.run_deprecated_v1
def testCachedPredictionTheWholeTreeWasPruned(self):
"""Tests that prediction based on previous node in the tree works."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
leaf {
scalar: 0.00
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -6.0
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 5.0
}
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
cached_tree_ids = [
0,
0,
]
# The predictions were cached in 1 and 2, both were pruned to the root.
cached_node_ids = [1, 2]
# We have two features: 0 and 1.These are not going to be used anywhere.
feature_0_values = [12, 17]
feature_1_values = [12, 12]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are in the last tree.
self.assertAllClose([0, 0], new_tree_ids)
self.assertAllClose([0, 0], new_node_ids)
self.assertAllClose([[-6.0], [5.0]], logits_updates)
@test_util.run_deprecated_v1
def testCachedPredictionTheWholeTreeWasPrunedMultiClass(self):
"""Tests that prediction works when whole tree was pruned multi class."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
leaf {
vector: {
value: 0.00
}
vector: {
value: 0.00
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -6.0
logit_change: -2.0
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 5.0
logit_change: -0.4
}
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
cached_tree_ids = [0, 0]
# The predictions were cached in 1 and 2, both were pruned to the root.
cached_node_ids = [1, 2]
# We have two features: 0 and 1.These are not going to be used anywhere.
feature_0_values = [12, 17]
feature_1_values = [12, 12]
# Multi class.
logits_dimension = 2
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=logits_dimension)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are in the last tree.
self.assertAllClose([0, 0], new_tree_ids)
self.assertAllClose([0, 0], new_node_ids)
self.assertAllClose([[-6.0, -2.0], [5.0, -0.4]], logits_updates)
class PredictionOpsTest(test_util.TensorFlowTestCase):
"""Tests prediction ops for inference."""
@test_util.run_deprecated_v1
def testPredictionOnEmptyEnsemble(self):
"""Tests that prediction on a empty ensemble does not fail."""
with self.cached_session() as session:
# Create an empty ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto='')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_0_values = [36, 32]
feature_1_values = [11, 27]
expected_logits = [[0.0], [0.0]]
# Prediction should work fine.
predict_op = boosted_trees_ops.predict(
tree_ensemble_handle,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits = session.run(predict_op)
self.assertAllClose(expected_logits, logits)
@test_util.run_deprecated_v1
def testPredictionOnEmptyEnsembleMultiClass(self):
"""Tests that prediction on empty ensemble does not fail for multiclass."""
with self.cached_session() as session:
# Create an empty ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto='')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_0_values = [36, 32]
feature_1_values = [11, 27]
logits_dimension = 2
expected_logits = [[0.0, 0.0], [0.0, 0.0]]
# Prediction should work fine.
predict_op = boosted_trees_ops.predict(
tree_ensemble_handle,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=logits_dimension)
logits = session.run(predict_op)
self.assertAllClose(expected_logits, logits)
@test_util.run_deprecated_v1
def testPredictionMultipleTree(self):
"""Tests the predictions work when we have multiple trees."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 28
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 26
left_id: 1
right_id: 2
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 50
left_id: 3
right_id: 4
}
}
nodes {
leaf {
scalar: 7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
nodes {
leaf {
scalar: 6.0
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 0
threshold: 34
left_id: 1
right_id: 2
}
}
nodes {
leaf {
scalar: -7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
}
tree_weights: 0.1
tree_weights: 0.2
tree_weights: 1.0
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_0_values = [36, 32]
feature_1_values = [11, 27]
# Example 1: tree 0: 1.14, tree 1: 5.0, tree 2: 5.0 = >
# logit = 0.1*1.14+0.2*5.0+1*5
# Example 2: tree 0: 1.14, tree 1: 7.0, tree 2: -7 = >
# logit= 0.1*1.14+0.2*7.0-1*7.0
expected_logits = [[6.114], [-5.486]]
# Prediction should work fine.
predict_op = boosted_trees_ops.predict(
tree_ensemble_handle,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits = session.run(predict_op)
self.assertAllClose(expected_logits, logits)
@test_util.run_deprecated_v1
def testPredictionMultipleTreeMultiClass(self):
"""Tests the predictions work when we have multiple trees."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 28
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
vector: {
value: 0.51
}
vector: {
value: 1.14
}
}
}
nodes {
leaf {
vector: {
value: 1.29
}
vector: {
value: 8.79
}
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 26
left_id: 1
right_id: 2
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 50
left_id: 3
right_id: 4
}
}
nodes {
leaf {
vector: {
value: -4.33
}
vector: {
value: 7.0
}
}
}
nodes {
leaf {
vector: {
value: 0.2
}
vector: {
value: 5.0
}
}
}
nodes {
leaf {
vector: {
value: -4.1
}
vector: {
value: 6.0
}
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 0
threshold: 34
left_id: 1
right_id: 2
}
}
nodes {
leaf {
vector: {
value: 2.0
}
vector: {
value: -7.0
}
}
}
nodes {
leaf {
vector: {
value: 6.3
}
vector: {
value: 5.0
}
}
}
}
tree_weights: 0.1
tree_weights: 0.2
tree_weights: 1.0
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_0_values = [36, 32]
feature_1_values = [11, 27]
# Example 1: tree 0: (0.51, 1.14), tree 1: (0.2, 5.0), tree 2: (6.3, 5.0)
#
# logits = (0.1*0.51+0.2*0.2+1*6.3,
# 0.1*1.14+0.2*5.0+1*5)
# Example 2: tree 0: (0.51, 1.14), tree 1: (-4.33, 7.0), tree 2: (2.0, -7)
#
# logits = (0.1*0.51+0.2*-4.33+1*2.0,
# 0.1*1.14+0.2*7.0+1*-7)
logits_dimension = 2
expected_logits = [[6.391, 6.114], [1.185, -5.486]]
# Prediction should work fine.
predict_op = boosted_trees_ops.predict(
tree_ensemble_handle,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=logits_dimension)
logits = session.run(predict_op)
self.assertAllClose(expected_logits, logits)
@test_util.run_deprecated_v1
def testCategoricalSplits(self):
"""Tests the predictions work for categorical splits."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
categorical_split {
feature_id: 1
value: 2
left_id: 1
right_id: 2
}
}
nodes {
categorical_split {
feature_id: 0
value: 13
left_id: 3
right_id: 4
}
}
nodes {
leaf {
scalar: 7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
nodes {
leaf {
scalar: 6.0
}
}
}
tree_weights: 1.0
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_0_values = [13, 1, 3]
feature_1_values = [2, 2, 1]
expected_logits = [[5.], [6.], [7.]]
# Prediction should work fine.
predict_op = boosted_trees_ops.predict(
tree_ensemble_handle,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits = session.run(predict_op)
self.assertAllClose(expected_logits, logits)
class FeatureContribsOpsTest(test_util.TensorFlowTestCase):
"""Tests feature contribs ops for model understanding."""
@test_util.run_deprecated_v1
def testContribsForOnlyABiasNode(self):
"""Tests case when, after training, only left with a bias node.
For example, this could happen if the final ensemble contains one tree that
got pruned up to the root.
"""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
leaf {
scalar: 1.72
}
}
}
tree_weights: 0.1
tree_metadata: {
num_layers_grown: 0
}
""", tree_ensemble_config)
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# All features are unused.
feature_0_values = [36, 32]
feature_1_values = [13, -29]
feature_2_values = [11, 27]
# Expected logits are computed by traversing the logit path and
# subtracting child logits from parent logits.
bias = 1.72 * 0.1 # Root node of tree_0.
expected_feature_ids = ((), ())
expected_logits_paths = ((bias,), (bias,))
bucketized_features = [
feature_0_values, feature_1_values, feature_2_values
]
debug_op = boosted_trees_ops.example_debug_outputs(
tree_ensemble_handle,
bucketized_features=bucketized_features,
logits_dimension=1)
serialized_examples_debug_outputs = session.run(debug_op)
feature_ids = []
logits_paths = []
for example in serialized_examples_debug_outputs:
example_debug_outputs = boosted_trees_pb2.DebugOutput()
example_debug_outputs.ParseFromString(example)
feature_ids.append(example_debug_outputs.feature_ids)
logits_paths.append(example_debug_outputs.logits_path)
self.assertAllClose(feature_ids, expected_feature_ids)
self.assertAllClose(logits_paths, expected_logits_paths)
@test_util.run_deprecated_v1
def testContribsMultipleTreeWhenFirstTreeIsABiasNode(self):
"""Tests case when, after training, first tree contains only a bias node."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
leaf {
scalar: 1.72
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 2
threshold: 26
left_id: 1
right_id: 2
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 50
left_id: 3
right_id: 4
}
metadata {
original_leaf: {scalar: 5.5}
}
}
nodes {
leaf {
scalar: 7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
nodes {
leaf {
scalar: 6.0
}
}
}
tree_weights: 1.
tree_weights: 0.1
tree_metadata: {
num_layers_grown: 0
}
tree_metadata: {
num_layers_grown: 1
}
""", tree_ensemble_config)
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_0_values = [36, 32]
feature_1_values = [13, -29] # Unused feature.
feature_2_values = [11, 27]
# Expected logits are computed by traversing the logit path and
# subtracting child logits from parent logits.
expected_feature_ids = ((2, 0), (2,))
# bias = 1.72 * 1. # Root node of tree_0.
# example_0 : (bias, 0.1 * 5.5 + bias, 0.1 * 5. + bias)
# example_1 : (bias, 0.1 * 7. + bias )
expected_logits_paths = ((1.72, 2.27, 2.22), (1.72, 2.42))
bucketized_features = [
feature_0_values, feature_1_values, feature_2_values
]
debug_op = boosted_trees_ops.example_debug_outputs(
tree_ensemble_handle,
bucketized_features=bucketized_features,
logits_dimension=1)
serialized_examples_debug_outputs = session.run(debug_op)
feature_ids = []
logits_paths = []
for example in serialized_examples_debug_outputs:
example_debug_outputs = boosted_trees_pb2.DebugOutput()
example_debug_outputs.ParseFromString(example)
feature_ids.append(example_debug_outputs.feature_ids)
logits_paths.append(example_debug_outputs.logits_path)
self.assertAllClose(feature_ids, expected_feature_ids)
self.assertAllClose(logits_paths, expected_logits_paths)
@test_util.run_deprecated_v1
def testContribsMultipleTree(self):
"""Tests that the contribs work when we have multiple trees."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 2
threshold: 28
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
original_leaf: {scalar: 2.1}
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 2
threshold: 26
left_id: 1
right_id: 2
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 50
left_id: 3
right_id: 4
}
metadata {
original_leaf: {scalar: 5.5}
}
}
nodes {
leaf {
scalar: 7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
nodes {
leaf {
scalar: 6.0
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 0
threshold: 34
left_id: 1
right_id: 2
}
}
nodes {
leaf {
scalar: -7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
}
tree_weights: 0.1
tree_weights: 0.2
tree_weights: 1.0
tree_metadata: {
num_layers_grown: 1
}
tree_metadata: {
num_layers_grown: 2
}
tree_metadata: {
num_layers_grown: 1
}
""", tree_ensemble_config)
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_0_values = [36, 32]
feature_1_values = [13, -29] # Unused. Feature is not in above ensemble.
feature_2_values = [11, 27]
# Expected logits are computed by traversing the logit path and
# subtracting child logits from parent logits.
bias = 2.1 * 0.1 # Root node of tree_0.
expected_feature_ids = ((2, 2, 0, 0), (2, 2, 0))
# example_0 : (bias, 0.1 * 1.14, 0.2 * 5.5 + .114, 0.2 * 5. + .114,
# 1.0 * 5.0 + 0.2 * 5. + .114)
# example_1 : (bias, 0.1 * 1.14, 0.2 * 7 + .114,
# 1.0 * -7. + 0.2 * 7 + .114)
expected_logits_paths = ((bias, 0.114, 1.214, 1.114, 6.114),
(bias, 0.114, 1.514, -5.486))
bucketized_features = [
feature_0_values, feature_1_values, feature_2_values
]
debug_op = boosted_trees_ops.example_debug_outputs(
tree_ensemble_handle,
bucketized_features=bucketized_features,
logits_dimension=1)
serialized_examples_debug_outputs = session.run(debug_op)
feature_ids = []
logits_paths = []
for example in serialized_examples_debug_outputs:
example_debug_outputs = boosted_trees_pb2.DebugOutput()
example_debug_outputs.ParseFromString(example)
feature_ids.append(example_debug_outputs.feature_ids)
logits_paths.append(example_debug_outputs.logits_path)
self.assertAllClose(feature_ids, expected_feature_ids)
self.assertAllClose(logits_paths, expected_logits_paths)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/boosted_trees/prediction_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.random_grad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_grad
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class AddLeadingUnitDimensionsTest(test.TestCase):
def testBasic(self):
ret = random_grad.add_leading_unit_dimensions(array_ops.ones([3, 2, 1]), 3)
self.assertAllEqual(ret.shape, [1, 1, 1, 3, 2, 1])
def testZeroExtraDimensions(self):
ret = random_grad.add_leading_unit_dimensions(array_ops.ones([3, 2, 1]), 0)
self.assertAllEqual(ret.shape, [3, 2, 1])
def testScalarInput(self):
ret = random_grad.add_leading_unit_dimensions(1.0, 2)
self.assertAllEqual(ret.shape, [1, 1])
@test_util.run_deprecated_v1
def testUnknownShape(self):
x = array_ops.placeholder(dtypes.float32)
num_dimensions = array_ops.placeholder(dtypes.int32)
ret = random_grad.add_leading_unit_dimensions(x, num_dimensions)
with self.cached_session() as sess:
ret_val = sess.run(ret, {x: np.ones([2, 2]), num_dimensions: 2})
self.assertAllEqual(ret_val.shape, [1, 1, 2, 2])
class RandomGammaGradTest(test.TestCase):
"""Tests for derivative of a sample ~ Gamma(alpha, beta) wrt alpha and beta.
The sample is an "implicit" function of alpha, beta and the independent random
noise u. The derivatives we are looking for are
d sample(alpha, beta, u) / dalpha (and dbeta).
The derivative w.r.t. beta is computed by the standard automatic
differentiation, so we trust that it is computed correctly.
The derivative w.r.t. alpha is computed by Eigen function, so we test it in
several ways. Unfortunately, the standard derivative checking by perturbing
the parameter is impossible here, because we cannot fix the value of u
in the random sampler. Instead, we compare the derivative for the given pair
of (sample, alpha) to the values computed in various ways, and also check
some statistical properties of the derivative.
"""
@test_util.run_deprecated_v1
def testGradientsShape(self):
shape = [2, 3]
alpha = array_ops.ones([2, 2])
beta = array_ops.ones([1, 2])
sample = random_ops.random_gamma(shape, alpha, beta, seed=12345)
grads_alpha, grads_beta = gradients_impl.gradients(sample, [alpha, beta])
self.assertAllEqual(grads_alpha.shape, alpha.shape)
self.assertAllEqual(grads_beta.shape, beta.shape)
@test_util.run_deprecated_v1
def testGradientsShapeWithOneSamplePerParameter(self):
shape = []
alpha = array_ops.ones([2, 2])
beta = array_ops.ones([1, 2])
sample = random_ops.random_gamma(shape, alpha, beta, seed=12345)
grads_alpha, grads_beta = gradients_impl.gradients(sample, [alpha, beta])
self.assertAllEqual(grads_alpha.shape, alpha.shape)
self.assertAllEqual(grads_beta.shape, beta.shape)
@test_util.run_deprecated_v1
def testGradientsUnknownShape(self):
shape = array_ops.placeholder(dtypes.int32)
alpha = array_ops.placeholder(dtypes.float32)
beta = array_ops.placeholder(dtypes.float32)
sample = random_ops.random_gamma(shape, alpha, beta, seed=12345)
grads_alpha, grads_beta = gradients_impl.gradients(sample, [alpha, beta])
alpha_val = np.ones([1, 2])
beta_val = np.ones([2, 1])
with self.cached_session() as sess:
grads_alpha_val, grads_beta_val = sess.run(
[grads_alpha, grads_beta],
{alpha: alpha_val, beta: beta_val, shape: [2, 1]})
self.assertAllEqual(grads_alpha_val.shape, alpha_val.shape)
self.assertAllEqual(grads_beta_val.shape, beta_val.shape)
def _testCompareToExplicitDerivative(self, dtype):
"""Compare to the explicit reparameterization derivative.
Verifies that the computed derivative satisfies
dsample / dalpha = d igammainv(alpha, u) / dalpha,
where u = igamma(alpha, sample).
Args:
dtype: TensorFlow dtype to perform the computations in.
"""
delta = 1e-3
np_dtype = dtype.as_numpy_dtype
try:
from scipy import misc # pylint: disable=g-import-not-at-top
from scipy import special # pylint: disable=g-import-not-at-top
alpha_val = np.logspace(-2, 3, dtype=np_dtype)
alpha = constant_op.constant(alpha_val)
sample = random_ops.random_gamma(
[], alpha, np_dtype(1.0), dtype=dtype, seed=12345)
actual = gradients_impl.gradients(sample, alpha)[0]
(sample_val, actual_val) = self.evaluate((sample, actual))
u = special.gammainc(alpha_val, sample_val)
expected_val = misc.derivative(
lambda alpha_prime: special.gammaincinv(alpha_prime, u),
alpha_val, dx=delta * alpha_val)
self.assertAllClose(actual_val, expected_val, rtol=1e-3, atol=1e-3)
except ImportError as e:
tf_logging.warn("Cannot use special functions in a test: %s" % str(e))
@test_util.run_deprecated_v1
def testCompareToExplicitDerivativeFloat(self):
self._testCompareToExplicitDerivative(dtypes.float32)
@test_util.run_deprecated_v1
def testCompareToExplicitDerivativeDouble(self):
self._testCompareToExplicitDerivative(dtypes.float64)
def _testCompareToImplicitDerivative(self, dtype):
"""Compare to the implicit reparameterization derivative.
Let's derive the formula we compare to.
Start from the fact that CDF maps a random variable to the Uniform
random variable:
igamma(alpha, sample) = u, where u ~ Uniform(0, 1).
Apply d / dalpha to both sides:
d igamma(alpha, sample) / dalpha
+ d igamma(alpha, sample) / dsample * dsample/dalpha = 0
d igamma(alpha, sample) / dalpha
+ d igamma(alpha, sample) / dsample * dsample / dalpha = 0
dsample/dalpha = - (d igamma(alpha, sample) / dalpha)
/ d igamma(alpha, sample) / dsample
This is the equation (8) of https://arxiv.org/abs/1805.08498
Args:
dtype: TensorFlow dtype to perform the computations in.
"""
np_dtype = dtype.as_numpy_dtype
alpha = constant_op.constant(np.logspace(-2, 3, dtype=np_dtype))
sample = random_ops.random_gamma(
[], alpha, np_dtype(1.0), dtype=dtype, seed=12345)
actual = gradients_impl.gradients(sample, alpha)[0]
sample_sg = array_ops.stop_gradient(sample)
cdf = math_ops.igamma(alpha, sample_sg)
dcdf_dalpha, dcdf_dsample = gradients_impl.gradients(
cdf, [alpha, sample_sg])
# Numerically unstable due to division, do not try at home.
expected = -dcdf_dalpha / dcdf_dsample
(actual_val, expected_val) = self.evaluate((actual, expected))
self.assertAllClose(actual_val, expected_val, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testCompareToImplicitDerivativeFloat(self):
self._testCompareToImplicitDerivative(dtypes.float32)
@test_util.run_deprecated_v1
def testCompareToImplicitDerivativeDouble(self):
self._testCompareToImplicitDerivative(dtypes.float64)
@test_util.run_deprecated_v1
def testAverageAlphaGradient(self):
"""Statistical test for the gradient.
Using the equation (5) of https://arxiv.org/abs/1805.08498, we have
1 = d/dalpha E_{sample ~ Gamma(alpha, 1)} sample
= E_{sample ~ Gamma(alpha, 1)} dsample/dalpha.
Here we verify that the rhs is fairly close to one.
The convergence speed is not great, so we use many samples and loose bounds.
"""
num_samples = 10000
alpha = constant_op.constant([0.8, 1e1, 1e3], dtype=dtypes.float32)
sample = random_ops.random_gamma([num_samples], alpha, seed=12345)
# We need to average the gradients, which is equivalent to averaging the
# samples and then doing backprop.
mean_sample = math_ops.reduce_mean(sample, axis=0)
dsample_dalpha = gradients_impl.gradients(mean_sample, alpha)[0]
dsample_dalpha_val = self.evaluate(dsample_dalpha)
self.assertAllClose(dsample_dalpha_val, [1.0] * 3, atol=1e-1, rtol=1e-1)
@test_util.run_deprecated_v1
def testQuadraticLoss(self):
"""Statistical test for the gradient.
The equation (5) of https://arxiv.org/abs/1805.08498 says
d/dalpha E_{sample ~ Gamma(alpha, 1)} f(sample)
= E_{sample ~ Gamma(alpha, 1)} df(sample)/dalpha.
Choose a quadratic loss function f(sample) = (sample - t)^2.
Then, the lhs can be computed analytically:
d/dalpha E_{sample ~ Gamma(alpha, 1)} f(sample)
= d/dalpha [ (alpha + alpha^2) - 2 * t * alpha + t^2 ]
= 1 + 2 * alpha - 2 * t.
We compare the Monte-Carlo estimate of the expectation with the
true gradient.
"""
num_samples = 10000
t = 0.3
alpha = 0.5
expected = 1 + 2 * alpha - 2 * t
alpha = constant_op.constant(alpha)
sample = random_ops.random_gamma([num_samples], alpha, 1.0, seed=12345)
loss = math_ops.reduce_mean(math_ops.square(sample - t))
dloss_dalpha = gradients_impl.gradients(loss, alpha)[0]
dloss_dalpha_val = self.evaluate(dloss_dalpha)
self.assertAllClose(expected, dloss_dalpha_val, atol=1e-1, rtol=1e-1)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/random/random_grad_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.random_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RandomOpTestCommon(test.TestCase):
# Checks that executing the same rng_func multiple times rarely produces the
# same result.
def _testSingleSessionNotConstant(self,
rng_func,
num,
dtype,
min_or_mean,
max_or_stddev,
use_gpu,
op_seed=None,
graph_seed=None):
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
if graph_seed is not None:
random_seed.set_random_seed(graph_seed)
x = rng_func([num], min_or_mean, max_or_stddev, dtype=dtype, seed=op_seed)
y = self.evaluate(x)
z = self.evaluate(x)
w = self.evaluate(x)
# We use exact equality here. If the random-number generator is producing
# the same output, all three outputs will be bitwise identical.
self.assertTrue((not np.array_equal(y, z)) or
(not np.array_equal(z, w)) or (not np.array_equal(y, w)))
class RandomNormalTest(RandomOpTestCommon):
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
def func():
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_normal(
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = self.evaluate(rng)
return ret
return func
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
if count >= 10:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < 10)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
@test_util.run_deprecated_v1
def testCPUGPUMatch(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(
1000000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
@test_util.run_deprecated_v1
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
self.assertAllEqual(sx(), sy())
@test_util.run_deprecated_v1
def testNoCSE(self):
for use_gpu in [False, True]:
with self.session(use_gpu=use_gpu):
shape = [2, 3, 4]
rnd1 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
rnd2 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
@test_util.run_deprecated_v1
def testSingleSessionNotConstant(self):
for use_gpu in [False, True]:
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
self._testSingleSessionNotConstant(
random_ops.random_normal, 100, dt, 0.0, 1.0, use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testSingleSessionOpSeedNotConstant(self):
for use_gpu in [False, True]:
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
self._testSingleSessionNotConstant(
random_ops.random_normal,
100,
dt,
0.0,
1.0,
use_gpu=use_gpu,
op_seed=1345)
@test_util.run_deprecated_v1
def testSingleSessionGraphSeedNotConstant(self):
for use_gpu in [False, True]:
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
self._testSingleSessionNotConstant(
random_ops.random_normal,
100,
dt,
0.0,
1.0,
use_gpu=use_gpu,
graph_seed=965)
class TruncatedNormalTest(test.TestCase):
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
def func():
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.truncated_normal(
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = self.evaluate(rng)
return ret
return func
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
# NOTE: TruncatedNormal on GPU is not supported.
if not test.is_gpu_available():
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=False)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
if count >= 10:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < 10)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
@test_util.run_deprecated_v1
def testCPUGPUMatch(self):
# Skip the test if there is no GPU.
if not test.is_gpu_available():
return
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
# We need a particular larger number of samples to test multiple rounds
# on GPU
sampler = self._Sampler(
1000000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
@test_util.run_deprecated_v1
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
self.assertAllEqual(sx(), sy())
# The effective standard deviation of truncated normal is 85% of the
# requested one.
def testStdDev(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
stddev = 3.0
sampler = self._Sampler(100000, 0.0, stddev, dt, use_gpu=True)
x = sampler()
print("std(x)", np.std(x), abs(np.std(x) / stddev - 0.85))
self.assertTrue(abs(np.std(x) / stddev - 0.85) < 0.04)
@test_util.run_deprecated_v1
def testLargeShape(self):
with self.session(use_gpu=True):
v = variables.Variable(
array_ops.zeros(dtype=dtypes.float32, shape=[2**33, 1]))
n = random_ops.truncated_normal(v.shape)
self.assertEqual([8589934592, 1], n.shape.as_list())
@test_util.run_deprecated_v1
def testNoCSE(self):
with self.session(use_gpu=True):
shape = [2, 3, 4]
rnd1 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
rnd2 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
def testEagerSeed(self):
with context.eager_mode():
# Ensure a context has been created
random_ops.random_normal([])
# Set the same seed twice and check that the values match
context.set_global_seed(42)
rnd1 = random_ops.random_normal([])
context.set_global_seed(42)
rnd2 = random_ops.random_normal([])
self.assertAllEqual(rnd1, rnd2)
@test_util.for_all_test_methods(test_util.disable_xla,
"This never passed on XLA")
class RandomUniformTest(RandomOpTestCommon):
def _Sampler(self, num, minv, maxv, dtype, use_gpu, seed=None):
def func():
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_uniform(
[num], minval=minv, maxval=maxv, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = self.evaluate(rng)
return ret
return func
def testRange(self):
for dt in (dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64):
sampler = self._Sampler(1000, minv=-2, maxv=8, dtype=dt, use_gpu=True)
x = sampler()
self.assertTrue(-2 <= np.min(x))
self.assertTrue(np.max(x) < 8)
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for dt in (dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64):
maxv = 1.0 if dt.is_floating else 1 << 30
sampler = self._Sampler(1000, minv=0, maxv=maxv, dtype=dt, use_gpu=True)
x = sampler()
y = sampler()
count = (x == y).sum()
count_limit = 50 if dt == dtypes.float16 else 10
if count >= count_limit:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < count_limit)
@test_util.run_deprecated_v1
def testUniformIntsWithInvalidShape(self):
for dtype in dtypes.int32, dtypes.int64:
with self.assertRaisesRegexp(
ValueError, "Shape must be rank 0 but is rank 1"):
random_ops.random_uniform(
[1000], minval=[1, 2], maxval=3, dtype=dtype)
with self.assertRaisesRegexp(
ValueError, "Shape must be rank 0 but is rank 1"):
random_ops.random_uniform(
[1000], minval=1, maxval=[2, 3], dtype=dtype)
# Check that uniform ints actually follow a uniform distribution.
@test_util.run_deprecated_v1
def testUniformInts(self):
minv = -2
maxv = 15
n = 100000
p = 1 / (maxv - minv)
# The counts should follow an (n, p) binomial distribution.
mean = p * n
std = np.sqrt(n * p * (1 - p))
for dt in dtypes.int32, dtypes.int64:
# Use a fixed seed here to make the test deterministic.
# Without the fixed seed, the 5 * std bound will (very rarely) fail.
sampler = self._Sampler(
n // 10, minv=minv, maxv=maxv, dtype=dt, use_gpu=True, seed=17)
x = sampler().ravel()
self.assertEqual(x.shape, (n,))
counts, _ = np.histogram(x, bins=maxv - minv)
self.assertEqual(counts.shape, (maxv - minv,))
self.assertEqual(counts.sum(), n)
error = np.abs(counts - mean)
self.assertLess(error.max(), 5 * std)
# Check that minval = maxval is fine iff we're producing no numbers
def testUniformIntsDegenerate(self):
for dt in dtypes.int32, dtypes.int64:
def sample(n):
return self._Sampler(n, minv=0, maxv=0, dtype=dt, use_gpu=True)()
self.assertEqual(sample(0).shape, (10, 0))
with self.assertRaisesOpError('Need minval < maxval, got 0 >= 0'):
sample(1)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
@test_util.run_deprecated_v1
def testCPUGPUMatch(self):
for dt in (dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64):
maxv = 1.0 if dt.is_floating else 17
results = {}
for use_gpu in False, True:
sampler = self._Sampler(
1000000, minv=0, maxv=maxv, dtype=dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
self.assertAllEqual(results[False], results[True])
@test_util.run_deprecated_v1
def testSeed(self):
for dt in (dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64):
for seed in [345, 2**100, -2**100]:
sx = self._Sampler(1000, 0, 17, dtype=dt, use_gpu=True, seed=seed)
sy = self._Sampler(1000, 0, 17, dtype=dt, use_gpu=True, seed=seed)
self.assertAllEqual(sx(), sy())
@test_util.run_deprecated_v1
def testNoCSE(self):
shape = [2, 3, 4]
for dtype in dtypes.float16, dtypes.float32, dtypes.int32:
with self.session(use_gpu=True):
rnd1 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
rnd2 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
diff = (rnd2 - rnd1).eval()
self.assertTrue(np.linalg.norm(diff) > 0.1)
@test_util.run_deprecated_v1
def testSingleSessionNotConstant(self):
for use_gpu in [False, True]:
for dt in (dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64):
self._testSingleSessionNotConstant(
random_ops.random_uniform, 100, dt, 0, 17, use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testSingleSessionOpSeedNotConstant(self):
for use_gpu in [False, True]:
for dt in (dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64):
self._testSingleSessionNotConstant(
random_ops.random_uniform,
100,
dt,
10,
20,
use_gpu=use_gpu,
op_seed=1345)
@test_util.run_deprecated_v1
def testSingleSessionGraphSeedNotConstant(self):
for use_gpu in [False, True]:
for dt in (dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64):
self._testSingleSessionNotConstant(
random_ops.random_uniform,
100,
dt,
20,
200,
use_gpu=use_gpu,
graph_seed=965)
class RandomShapeTest(test.TestCase):
@test_util.run_deprecated_v1
def testTruncatedNormal(self):
# Fully known shape.
rnd1 = random_ops.truncated_normal([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = random_ops.truncated_normal(
array_ops.placeholder(dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = random_ops.truncated_normal(array_ops.placeholder(dtypes.int32))
self.assertIs(None, rnd3.get_shape().ndims)
@test_util.run_deprecated_v1
def testRandomNormal(self):
# Fully known shape.
rnd1 = random_ops.random_normal([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = random_ops.random_normal(
array_ops.placeholder(dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = random_ops.random_normal(array_ops.placeholder(dtypes.int32))
self.assertIs(None, rnd3.get_shape().ndims)
@test_util.run_deprecated_v1
def testRandomUniform(self):
# Fully known shape.
rnd1 = random_ops.random_uniform([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = random_ops.random_uniform(
array_ops.placeholder(dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = random_ops.random_uniform(array_ops.placeholder(dtypes.int32))
self.assertIs(None, rnd3.get_shape().ndims)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/random/random_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Long tests for Multinomial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class MultinomialTest(test.TestCase):
# check that events with tiny probabilities are not over-sampled
def testLargeDynamicRange(self):
random_seed.set_random_seed(10)
counts_by_indices = {}
with self.test_session(use_gpu=True) as sess:
samples = random_ops.multinomial(
constant_op.constant([[-30, 0]], dtype=dtypes.float32),
num_samples=1000000,
seed=15)
for _ in range(100):
x = self.evaluate(samples)
indices, counts = np.unique(x, return_counts=True)
for index, count in zip(indices, counts):
if index in counts_by_indices.keys():
counts_by_indices[index] += count
else:
counts_by_indices[index] = count
self.assertEqual(counts_by_indices[1], 100000000)
def testLargeDynamicRange2(self):
random_seed.set_random_seed(10)
counts_by_indices = {}
with self.test_session(use_gpu=True) as sess:
samples = random_ops.multinomial(
constant_op.constant([[0, -30]], dtype=dtypes.float32),
num_samples=1000000,
seed=15)
for _ in range(100):
x = self.evaluate(samples)
indices, counts = np.unique(x, return_counts=True)
for index, count in zip(indices, counts):
if index in counts_by_indices.keys():
counts_by_indices[index] += count
else:
counts_by_indices[index] = count
self.assertEqual(counts_by_indices[0], 100000000)
@test_util.run_deprecated_v1
def testLargeDynamicRange3(self):
random_seed.set_random_seed(10)
counts_by_indices = {}
# here the cpu undersamples and won't pass this test either
with self.test_session(use_gpu=True) as sess:
samples = random_ops.multinomial(
constant_op.constant([[0, -17]], dtype=dtypes.float32),
num_samples=1000000,
seed=22)
# we'll run out of memory if we try to draw 1e9 samples directly
# really should fit in 12GB of memory...
for _ in range(100):
x = self.evaluate(samples)
indices, counts = np.unique(x, return_counts=True)
for index, count in zip(indices, counts):
if index in counts_by_indices.keys():
counts_by_indices[index] += count
else:
counts_by_indices[index] = count
self.assertGreater(counts_by_indices[1], 0)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/random/multinomial_op_big_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
@test_util.run_v1_only("RandomShuffleQueue removed from v2")
class RandomShuffleQueueTest(test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf_logging.error("Starting: %s", self._testMethodName)
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf_logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size().eval())
enqueue_op.run()
self.assertAllEqual(1, q.size().eval())
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size().eval())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size().eval())
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = self.evaluate(dequeue_t)
results.append((a, b))
a, b = self.evaluate(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in xrange(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(3, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(self.evaluate(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in xrange(len(elems)):
x, y = self.evaluate(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], self.evaluate(size))
dequeued_t.op.run()
self.assertEqual([0], self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, self.evaluate(size_t))
enqueue_op.run()
self.assertEqual(0, self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpTo(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueMany
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
def testEmptyDequeueUpToWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_up_to(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueUpTo
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = self.evaluate(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = list(zip(float_elems, int_elems)) * 2
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = self.evaluate(dequeued_t).tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(5)
enqueue_op.run()
results = self.evaluate(dequeued_t).tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testMultiDequeueUpToNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = self.evaluate(dequeued_t)
# dequeue_up_to has undefined shape.
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.int32, (
(4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpToRandomPartition(self):
with self.cached_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in xrange(10)]
total_elements = sum(dequeue_sizes)
q = data_flow_ops.RandomShuffleQueue(
total_elements, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in xrange(total_elements)]
enqueue_op = q.enqueue_many((elems,))
dequeue_ops = [q.dequeue_up_to(size) for size in dequeue_sizes]
enqueue_op.run()
# Dequeue random number of items in parallel on 10 threads.
dequeued_elems = []
def dequeue(dequeue_op):
dequeued_elems.extend(self.evaluate(dequeue_op))
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueUpToWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesUpTo
# that number of elements.
dequeued_t = q.dequeue_up_to(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
with self.cached_session() as sess:
min_size = 2
q = data_flow_ops.RandomShuffleQueue(10, min_size, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
# Manually dequeue until we hit min_size.
results.append(self.evaluate(dequeued_t))
results.append(self.evaluate(dequeued_t))
def blocking_dequeue():
results.append(self.evaluate(dequeued_t))
results.append(self.evaluate(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=blocking_dequeue)
dequeue_thread.start()
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, self.evaluate(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1:
break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEquals(3, len(results))
results.extend(self.evaluate(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueUpToSmallerThanMinAfterDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
capacity=10,
min_after_dequeue=2,
dtypes=dtypes_lib.float32,
shapes=((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEquals(3, len(results))
# min_after_dequeue is 2, we ask for 3 elements, and we end up only
# getting the remaining 1.
results.extend(self.evaluate(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEqual(len(results), 3)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
# While the last dequeue failed, we want to insure that it returns
# any elements that it potentially reserved to dequeue. Thus the
# next cleanup should return a single element.
results.extend(self.evaluate(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 4)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 4, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
thread.join()
def testBlockingEnqueueToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
self.evaluate(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
self.evaluate(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def blocking_close():
self.evaluate(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
self.evaluate(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
for i in range(50):
queue_size = self.evaluate(size_t)
if queue_size == 4:
break
elif i == 49:
self.fail(
"Blocking enqueue op did not execute within the expected time.")
time.sleep(0.1)
def blocking_close():
self.evaluate(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
# At this point the close operation will complete, so the next enqueue
# will fail.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
self.evaluate(blocking_enqueue_op)
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
# TensorFlow TestCase adds a default graph seed (=87654321). We check if
# the seed computed from the default graph seed is reproduced.
seed = 887634792
q2 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=seed)
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testSharedQueueSameSessionGraphSeedNone(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=98765432)
q1.enqueue((10.0,)).run()
# If both graph and op seeds are not provided, the default value must be
# used, and in case a shared queue is already created, the second queue op
# must accept any previous seed value.
random_seed.set_random_seed(None)
q2 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.RandomShuffleQueue(
15, 5, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.RandomShuffleQueue(
10, 0, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_d")
q_d_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.op.run()
q_g_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_g")
q_g_2 = data_flow_ops.RandomShuffleQueue(
10, 5, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_g")
q_g_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.op.run()
q_h_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=12, shared_name="q_h")
q_h_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.op.run()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = []
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
q2 = data_flow_ops.RandomShuffleQueue(15, 0, dtypes_lib.float32)
enq_q = data_flow_ops.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_many_op)
def _blockingDequeueUpTo(self, sess, dequeue_up_to_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_up_to_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_many_op)
def testResetOfBlockingOperation(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q_empty = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, (
(),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
dequeue_up_to_op = q_empty.dequeue_up_to(1)
q_full = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingDequeueUpTo, args=(sess, dequeue_up_to_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueUpToInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_up_to(5)
deq2 = q2.dequeue_up_to(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
self.evaluate(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(self.evaluate(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
self.evaluate(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/random/random_shuffle_queue_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateless random ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops as stateless
from tensorflow.python.platform import test
def invert_philox(key, value):
"""Invert the Philox bijection."""
key = np.array(key, dtype=np.uint32)
value = np.array(value, dtype=np.uint32)
step = np.array([0x9E3779B9, 0xBB67AE85], dtype=np.uint32)
for n in range(10)[::-1]:
key0, key1 = key + n * step
v0 = value[3] * 0x991a7cdb & 0xffffffff
v2 = value[1] * 0x6d7cae67 & 0xffffffff
hi0 = v0 * 0xD2511F53 >> 32
hi1 = v2 * 0xCD9E8D57 >> 32
v1 = hi1 ^ value[0] ^ key0
v3 = hi0 ^ value[2] ^ key1
value = v0, v1, v2, v3
return np.array(value)
class StatelessOpsTest(test.TestCase):
def _test_match(self, cases):
# Stateless ops should be the same as stateful ops on the first call
# after seed scrambling.
cases = tuple(cases)
key = 0x3ec8f720, 0x02461e29
for seed in (7, 17), (11, 5), (2, 3):
preseed = invert_philox(key, (seed[0], 0, seed[1], 0)).astype(np.uint64)
preseed = preseed[::2] | preseed[1::2] << 32
random_seed.set_random_seed(seed[0])
with test_util.use_gpu():
for stateless_op, stateful_op in cases:
stateful = stateful_op(seed=seed[1])
pure = stateless_op(seed=preseed)
self.assertAllEqual(self.evaluate(stateful), self.evaluate(pure))
def _test_determinism(self, cases):
# Stateless values should be equal iff the seeds are equal (roughly)
cases = tuple(cases)
with self.test_session(use_gpu=True):
for seed_type in [dtypes.int32, dtypes.int64]:
seed_t = array_ops.placeholder(seed_type, shape=[2])
seeds = [(x, y) for x in range(5) for y in range(5)] * 3
for stateless_op, _ in cases:
pure = stateless_op(seed=seed_t)
values = [
(seed, pure.eval(feed_dict={seed_t: seed})) for seed in seeds
]
for s0, v0 in values:
for s1, v1 in values:
self.assertEqual(s0 == s1, np.all(v0 == v1))
def _float_cases(self, shape_dtypes=(None,)):
float_cases = (
# Uniform distribution, with and without range
(stateless.stateless_random_uniform, random_ops.random_uniform, {}),
(stateless.stateless_random_uniform, random_ops.random_uniform,
dict(minval=2.2, maxval=7.1)),
# Normal distribution, with and without mean+stddev
(stateless.stateless_random_normal, random_ops.random_normal, {}),
(stateless.stateless_random_normal, random_ops.random_normal,
dict(mean=2, stddev=3)),
# Truncated normal distribution, with and without mean+stddev
(stateless.stateless_truncated_normal, random_ops.truncated_normal, {}),
(stateless.stateless_truncated_normal, random_ops.truncated_normal,
dict(mean=3, stddev=4)),
)
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
for shape_dtype in shape_dtypes:
for shape in (), (3,), (2, 5):
if shape_dtype is not None:
shape = constant_op.constant(shape, dtype=shape_dtype)
for stateless_op, stateful_op, kwds in float_cases:
kwds = dict(shape=shape, dtype=dtype, **kwds)
yield (functools.partial(stateless_op, **kwds),
functools.partial(stateful_op, **kwds))
def _int_cases(self, shape_dtypes=(None,)):
for shape_dtype in shape_dtypes:
for shape in (), (3,), (2, 5):
if shape_dtype is not None:
shape = constant_op.constant(shape, dtype=shape_dtype)
for dtype in dtypes.int32, dtypes.int64:
kwds = dict(minval=2, maxval=11111, dtype=dtype, shape=shape)
yield (functools.partial(stateless.stateless_random_uniform, **kwds),
functools.partial(random_ops.random_uniform, **kwds))
def _multinomial_cases(self):
num_samples = 10
for logits_dtype in np.float16, np.float32, np.float64:
for output_dtype in dtypes.int32, dtypes.int64:
for logits in ([[0.1, 0.25, 0.5, 0.15]], [[0.5, 0.5], [0.8, 0.2],
[0.25, 0.75]]):
kwds = dict(
logits=constant_op.constant(logits, dtype=logits_dtype),
num_samples=num_samples,
output_dtype=output_dtype)
yield (functools.partial(stateless.stateless_multinomial, **kwds),
functools.partial(random_ops.multinomial, **kwds))
@test_util.run_deprecated_v1
def testMatchFloat(self):
self._test_match(self._float_cases())
@test_util.run_deprecated_v1
def testMatchInt(self):
self._test_match(self._int_cases())
@test_util.run_deprecated_v1
def testMatchMultinomial(self):
self._test_match(self._multinomial_cases())
@test_util.run_deprecated_v1
def testDeterminismFloat(self):
self._test_determinism(
self._float_cases(shape_dtypes=(dtypes.int32, dtypes.int64)))
@test_util.run_deprecated_v1
def testDeterminismInt(self):
self._test_determinism(
self._int_cases(shape_dtypes=(dtypes.int32, dtypes.int64)))
@test_util.run_deprecated_v1
def testDeterminismMultinomial(self):
self._test_determinism(self._multinomial_cases())
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/random/stateless_random_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing random variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops.distributions import special_math
def test_moment_matching(
samples,
number_moments,
dist,
stride=0):
"""Return z-test scores for sample moments to match analytic moments.
Given `samples`, check that the first sample `number_moments` match
the given `dist` moments by doing a z-test.
Args:
samples: Samples from target distribution.
number_moments: Python `int` describing how many sample moments to check.
dist: SciPy distribution object that provides analytic moments.
stride: Distance between samples to check for statistical properties.
A stride of 0 means to use all samples, while other strides test for
spatial correlation.
Returns:
Array of z_test scores.
"""
sample_moments = []
expected_moments = []
variance_sample_moments = []
x = samples.flat
for i in range(1, number_moments + 1):
strided_range = x[::(i - 1) * stride + 1]
sample_moments.append(np.mean(strided_range ** i))
expected_moments.append(dist.moment(i))
variance_sample_moments.append(
(dist.moment(2 * i) - dist.moment(i) ** 2) / len(strided_range))
z_test_scores = []
for i in range(1, number_moments + 1):
# Assume every operation has a small numerical error.
# It takes i multiplications to calculate one i-th moment.
total_variance = (
variance_sample_moments[i - 1] +
i * np.finfo(samples.dtype).eps)
tiny = np.finfo(samples.dtype).tiny
assert np.all(total_variance > 0)
if total_variance < tiny:
total_variance = tiny
# z_test is approximately a unit normal distribution.
z_test_scores.append(abs(
(sample_moments[i - 1] - expected_moments[i - 1]) / np.sqrt(
total_variance)))
return z_test_scores
def chi_squared(x, bins):
"""Pearson's Chi-squared test."""
x = np.ravel(x)
n = len(x)
histogram, _ = np.histogram(x, bins=bins, range=(0, 1))
expected = n / float(bins)
return np.sum(np.square(histogram - expected) / expected)
def normal_cdf(x):
"""Cumulative distribution function for a standard normal distribution."""
return 0.5 + 0.5 * np.vectorize(math.erf)(x / math.sqrt(2))
def anderson_darling(x):
"""Anderson-Darling test for a standard normal distribution."""
x = np.sort(np.ravel(x))
n = len(x)
i = np.linspace(1, n, n)
z = np.sum((2 * i - 1) * np.log(normal_cdf(x)) +
(2 * (n - i) + 1) * np.log(1 - normal_cdf(x)))
return -n - z / n
def test_truncated_normal(assert_equal, assert_all_close, dtype, n, y):
"""Tests truncated normal distribution's statistics."""
def _normal_cdf(x):
return .5 * math.erfc(-x / math.sqrt(2))
def normal_pdf(x):
return math.exp(-(x**2) / 2.) / math.sqrt(2 * math.pi)
def probit(x):
return special_math.ndtri(x)
a = -2.
b = 2.
mu = 0.
sigma = 1.
alpha = (a - mu) / sigma
beta = (b - mu) / sigma
z = _normal_cdf(beta) - _normal_cdf(alpha)
assert_equal((y >= a).sum(), n)
assert_equal((y <= b).sum(), n)
# For more information on these calculations, see:
# Burkardt, John. "The Truncated Normal Distribution".
# Department of Scientific Computing website. Florida State University.
expected_mean = mu + (normal_pdf(alpha) - normal_pdf(beta)) / z * sigma
y = y.astype(float)
actual_mean = np.mean(y)
assert_all_close(actual_mean, expected_mean, atol=5e-4)
expected_median = mu + probit(
(_normal_cdf(alpha) + _normal_cdf(beta)) / 2.) * sigma
actual_median = np.median(y)
assert_all_close(actual_median, expected_median, atol=8e-4)
expected_variance = sigma**2 * (1 + (
(alpha * normal_pdf(alpha) - beta * normal_pdf(beta)) / z) - (
(normal_pdf(alpha) - normal_pdf(beta)) / z)**2)
actual_variance = np.var(y)
assert_all_close(
actual_variance,
expected_variance,
rtol=6e-3 if dtype == dtypes.bfloat16 else 1e-3)
|
tensorflow-master
|
tensorflow/python/kernel_tests/random/util.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.stateful_random_ops.binomial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.kernel_tests.random import util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import stateful_random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# All supported dtypes for binomial().
_SUPPORTED_DTYPES = (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.int32, dtypes.int64)
class RandomBinomialTest(test.TestCase):
"""This is a large test due to the moments computation taking some time."""
def _Sampler(self, num, counts, probs, dtype, seed=None):
def func():
rng = stateful_random_ops.Generator.from_seed(seed).binomial(
shape=[10 * num], counts=counts, probs=probs, dtype=dtype)
ret = array_ops.reshape(rng, [10, num])
ret = self.evaluate(ret)
return ret
return func
@test_util.run_v2_only
def testMoments(self):
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test moments: %s", e)
return
# The moments test is a z-value test. This is the largest z-value
# we want to tolerate. Since the z-test approximates a unit normal
# distribution, it should almost definitely never exceed 6.
z_limit = 6.0
for dt in _SUPPORTED_DTYPES:
# Test when n * p > 10, and n * p < 10
for stride in 0, 4, 10:
for counts in (1., 10., 22., 50.):
for prob in (0.1, 0.5, 0.8):
sampler = self._Sampler(int(1e5), counts, prob, dt, seed=12345)
z_scores = util.test_moment_matching(
# Use float64 samples.
sampler().astype(np.float64),
number_moments=6,
dist=stats.binom(counts, prob),
stride=stride,
)
self.assertAllLess(z_scores, z_limit)
@test_util.run_v2_only
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, counts=10., probs=0.4, dtype=dt, seed=345)
sy = self._Sampler(1000, counts=10., probs=0.4, dtype=dt, seed=345)
self.assertAllEqual(sx(), sy())
def testZeroShape(self):
rnd = stateful_random_ops.Generator.from_seed(12345).binomial([0], [], [])
self.assertEqual([0], rnd.shape.as_list())
def testShape(self):
rng = stateful_random_ops.Generator.from_seed(12345)
# Scalar parameters.
rnd = rng.binomial(shape=[10], counts=np.float32(2.), probs=np.float32(0.5))
self.assertEqual([10], rnd.shape.as_list())
# Vector parameters.
rnd = rng.binomial(
shape=[10],
counts=array_ops.ones([10], dtype=np.float32),
probs=0.3 * array_ops.ones([10], dtype=np.float32))
self.assertEqual([10], rnd.shape.as_list())
rnd = rng.binomial(
shape=[2, 5],
counts=array_ops.ones([2], dtype=np.float32),
probs=0.4 * array_ops.ones([2], dtype=np.float32))
self.assertEqual([2, 5], rnd.shape.as_list())
# Scalar counts, vector probs.
rnd = rng.binomial(
shape=[10],
counts=np.float32(5.),
probs=0.8 * array_ops.ones([10], dtype=np.float32))
self.assertEqual([10], rnd.shape.as_list())
# Vector counts, scalar probs.
rnd = rng.binomial(
shape=[10],
counts=array_ops.ones([10], dtype=np.float32),
probs=np.float32(0.9))
self.assertEqual([10], rnd.shape.as_list())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/random/random_binomial_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for random_crop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class RandomCropTest(test.TestCase):
@test_util.run_deprecated_v1
def testNoOp(self):
# No random cropping is performed since the size is value.shape.
for shape in (2, 1, 1), (2, 1, 3), (4, 5, 3):
value = np.arange(0, np.prod(shape), dtype=np.int32).reshape(shape)
with self.cached_session():
crop = random_ops.random_crop(value, shape).eval()
self.assertAllEqual(crop, value)
def testContains(self):
with self.cached_session():
shape = (3, 5, 7)
target = (2, 3, 4)
value = np.random.randint(1000000, size=shape)
value_set = set(
tuple(value[i:i + 2, j:j + 3, k:k + 4].ravel())
for i in range(2) for j in range(3) for k in range(4))
crop = random_ops.random_crop(value, size=target)
for _ in range(20):
y = self.evaluate(crop)
self.assertAllEqual(y.shape, target)
self.assertTrue(tuple(y.ravel()) in value_set)
@test_util.run_deprecated_v1
def testRandomization(self):
# Run 1x1 crop num_samples times in an image and ensure that one finds each
# pixel 1/size of the time.
num_samples = 1000
shape = [5, 4, 1]
size = np.prod(shape)
single = [1, 1, 1]
value = np.arange(size).reshape(shape)
with self.cached_session():
crop = random_ops.random_crop(value, single, seed=7)
counts = np.zeros(size, dtype=np.int32)
for _ in range(num_samples):
y = self.evaluate(crop)
self.assertAllEqual(y.shape, single)
counts[y] += 1
# Calculate the mean and 4 * standard deviation.
mean = np.repeat(num_samples / size, size)
four_stddev = 4.0 * np.sqrt(mean)
# Ensure that each entry is observed in 1/size of the samples
# within 4 standard deviations.
self.assertAllClose(counts, mean, atol=four_stddev)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/random/random_crop_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Multinomial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import timeit
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def composed_sampler(logits, num_samples):
# [batch size, num classes, num samples]
unif = random_ops.random_uniform(logits.get_shape().concatenate(
tensor_shape.TensorShape([num_samples])))
noise = -math_ops.log(-math_ops.log(unif))
# [batch size, num classes, 1]
logits = array_ops.expand_dims(logits, -1)
# [batch size, num samples]
return math_ops.argmax(logits + noise, axis=1)
native_sampler = random_ops.multinomial
class MultinomialTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testSmallEntropy(self):
random_seed.set_random_seed(1618)
for output_dtype in [np.int32, np.int64]:
with test_util.device(use_gpu=True):
# A logit value of -10 corresponds to a probability of ~5e-5.
logits = constant_op.constant([[-10., 10., -10.], [-10., -10., 10.]])
num_samples = 1000
samples = self.evaluate(random_ops.multinomial(
logits, num_samples, output_dtype=output_dtype))
self.assertAllEqual([[1] * num_samples, [2] * num_samples], samples)
@test_util.run_deprecated_v1
def testOneOpMultipleStepsIndependent(self):
with test_util.use_gpu():
sample_op1, _ = self._make_ops(10)
# Consecutive runs shouldn't yield identical output.
sample1a = self.evaluate(sample_op1)
sample1b = self.evaluate(sample_op1)
self.assertFalse(np.equal(sample1a, sample1b).all())
def testEagerOneOpMultipleStepsIndependent(self):
with context.eager_mode(), test_util.device(use_gpu=True):
sample1, sample2 = self._make_ops(10)
# Consecutive runs shouldn't yield identical output.
self.assertFalse(np.equal(sample1.numpy(), sample2.numpy()).all())
def testTwoOpsIndependent(self):
with test_util.use_gpu():
sample_op1, sample_op2 = self._make_ops(32)
sample1, sample2 = self.evaluate([sample_op1, sample_op2])
# We expect sample1 and sample2 to be independent.
# 1 in 2^32 chance of this assertion failing.
self.assertFalse(np.equal(sample1, sample2).all())
@test_util.run_deprecated_v1
def testTwoOpsSameSeedDrawSameSequences(self):
with test_util.use_gpu():
sample_op1, sample_op2 = self._make_ops(1000, seed=1)
sample1, sample2 = self.evaluate([sample_op1, sample_op2])
self.assertAllEqual(sample1, sample2)
def testLargeLogits(self):
for neg in [True, False]:
with test_util.use_gpu():
logits = np.array([[1000.] * 5])
if neg:
logits *= -1
samples = self.evaluate(random_ops.multinomial(logits, 10))
# Sampled classes should be in-range.
self.assertTrue((samples >= 0).all())
self.assertTrue((samples < 5).all())
def testSamplingCorrectness(self):
np.random.seed(1618) # Make it reproducible.
num_samples = 21000
rand_probs = self._normalize(np.random.random_sample((10,)))
rand_probs2 = self._normalize(np.random.random_sample((3, 5))) # batched
for probs in [[.5, .5], [.85, .05, .1], rand_probs, rand_probs2]:
probs = np.asarray(probs)
if len(probs.shape) == 1:
probs = probs.reshape(1, probs.size) # singleton batch
logits = np.log(probs).astype(np.float32)
composed_freqs = self._do_sampling(logits, num_samples, composed_sampler)
native_freqs = self._do_sampling(logits, num_samples, native_sampler)
# the test here is similar to core/lib/random/distribution_sampler_test.cc
composed_chi2 = self._chi2(probs, composed_freqs)
native_chi2 = self._chi2(probs, native_freqs)
composed_native_chi2 = self._chi2(composed_freqs, native_freqs)
def check(chi2s):
for chi2 in chi2s:
self.assertLess(chi2, 1e-3)
check(composed_chi2)
check(native_chi2)
check(composed_native_chi2)
def _make_ops(self, num_samples, seed=None):
prob_dist = constant_op.constant([[0.15, 0.5, 0.3, 0.05]])
logits = math_ops.log(prob_dist)
# Two independent sets of samples from the same distribution
sample_op1 = random_ops.multinomial(logits, num_samples, seed)
sample_op2 = random_ops.multinomial(logits, num_samples, seed)
return (sample_op1, sample_op2)
def _normalize(self, vec):
batched = (len(vec.shape) == 2)
return vec / vec.sum(axis=1, keepdims=True) if batched else vec / vec.sum()
def _do_sampling(self, logits, num_samples, sampler):
"""Samples using the supplied sampler and inputs.
Args:
logits: Numpy ndarray of shape [batch_size, num_classes].
num_samples: Int; number of samples to draw.
sampler: A sampler function that takes (1) a [batch_size, num_classes]
Tensor, (2) num_samples and returns a [batch_size, num_samples] Tensor.
Returns:
Frequencies from sampled classes; shape [batch_size, num_classes].
"""
with test_util.use_gpu():
random_seed.set_random_seed(1618)
op = sampler(constant_op.constant(logits), num_samples)
d = self.evaluate(op)
batch_size, num_classes = logits.shape
freqs_mat = []
for i in range(batch_size):
cnts = dict(collections.Counter(d[i, :]))
# Requires drawn class labels be in range.
self.assertLess(max(cnts.keys()), num_classes)
self.assertGreaterEqual(min(cnts.keys()), 0)
freqs = [(cnts[k] * 1. / num_samples if k in cnts else 0)
for k in range(num_classes)]
freqs_mat.append(freqs)
return freqs_mat
def _chi2(self, expected, actual):
actual = np.asarray(actual)
expected = np.asarray(expected)
diff = actual - expected
chi2 = np.sum(diff * diff / expected, axis=0)
return chi2
def testEmpty(self):
classes = 5
with test_util.use_gpu():
for batch in 0, 3:
for samples in 0, 7:
x = self.evaluate(
random_ops.multinomial(
array_ops.zeros([batch, classes]), samples))
self.assertEqual(x.shape, (batch, samples))
@test_util.run_deprecated_v1
def testEmptyClasses(self):
with test_util.use_gpu():
x = random_ops.multinomial(array_ops.zeros([5, 0]), 7)
with self.assertRaisesOpError("num_classes should be positive"):
self.evaluate(x)
def testNegativeMinLogits(self):
random_seed.set_random_seed(78844)
with test_util.use_gpu():
logits = constant_op.constant([[np.finfo(np.float32).min] * 1023 + [0]])
num_samples = 1000
samples = self.evaluate(random_ops.multinomial(logits, num_samples))
self.assertAllEqual([[1023] * num_samples], samples)
# Benchmarking code
def native_op_vs_composed_ops(batch_size, num_classes, num_samples, num_iters):
np.random.seed(1618) # Make it reproducible.
shape = [batch_size, num_classes]
logits_np = np.random.randn(*shape).astype(np.float32)
# No CSE/CF.
optimizer_options = config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0)
config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=optimizer_options))
with session.Session(config=config) as sess:
logits = constant_op.constant(logits_np, shape=shape)
native_op = control_flow_ops.group(native_sampler(logits, num_samples))
composed_op = control_flow_ops.group(composed_sampler(logits, num_samples))
native_dt = timeit.timeit(lambda: sess.run(native_op), number=num_iters)
composed_dt = timeit.timeit(lambda: sess.run(composed_op), number=num_iters)
return native_dt, composed_dt
class MultinomialBenchmark(test.Benchmark):
def benchmarkNativeOpVsComposedOps(self):
num_iters = 50
print("Composition of existing ops vs. Native Multinomial op [%d iters]" %
num_iters)
print("BatchSize\tNumClasses\tNumSamples\tsec(composed)\tsec(native)\t"
"speedup")
for batch_size in [32, 128]:
for num_classes in [10000, 100000]:
for num_samples in [1, 4, 32]:
n_dt, c_dt = native_op_vs_composed_ops(batch_size, num_classes,
num_samples, num_iters)
print("%d\t%d\t%d\t%.3f\t%.3f\t%.2f" % (batch_size, num_classes,
num_samples, c_dt, n_dt,
c_dt / n_dt))
self.report_benchmark(
name="native_batch%d_classes%d_s%d" %
(batch_size, num_classes, num_samples),
iters=num_iters,
wall_time=n_dt)
self.report_benchmark(
name="composed_batch%d_classes%d_s%d" %
(batch_size, num_classes, num_samples),
iters=num_iters,
wall_time=c_dt)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/random/multinomial_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.random_ops.random_poisson."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.kernel_tests.random import util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# All supported dtypes for random_poisson().
_SUPPORTED_DTYPES = (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.int32, dtypes.int64)
class RandomPoissonTest(test.TestCase):
"""This is a large test due to the moments computation taking some time."""
def _Sampler(self, num, lam, dtype, use_gpu, seed=None):
def func():
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_poisson(lam, [num], dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = self.evaluate(rng)
return ret
return func
def testMoments(self):
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test moments: %s", e)
return
# The moments test is a z-value test. This is the largest z-value
# we want to tolerate. Since the z-test approximates a unit normal
# distribution, it should almost definitely never exceed 6.
z_limit = 6.0
for dt in _SUPPORTED_DTYPES:
# Test when lam < 10 and when lam >= 10
for stride in 0, 4, 10:
for lam in (3., 20):
max_moment = 5
sampler = self._Sampler(10000, lam, dt, use_gpu=False, seed=12345)
z_scores = util.test_moment_matching(
sampler(),
max_moment,
stats.poisson(lam),
stride=stride,
)
self.assertAllLess(z_scores, z_limit)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
@test_util.run_deprecated_v1
def testCPUGPUMatch(self):
for dt in _SUPPORTED_DTYPES:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
@test_util.run_deprecated_v1
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 1.0, dt, use_gpu=True, seed=345)
sy = self._Sampler(1000, 1.0, dt, use_gpu=True, seed=345)
self.assertAllEqual(sx(), sy())
@test_util.run_deprecated_v1
def testNoCSE(self):
"""CSE = constant subexpression eliminator.
SetIsStateful() should prevent two identical random ops from getting
merged.
"""
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
with self.cached_session(use_gpu=True):
rnd1 = random_ops.random_poisson(2.0, [24], dtype=dtype)
rnd2 = random_ops.random_poisson(2.0, [24], dtype=dtype)
diff = rnd2 - rnd1
# Since these are all positive integers, the norm will
# be at least 1 if they are different.
self.assertGreaterEqual(np.linalg.norm(diff.eval()), 1)
def testZeroShape(self):
with self.cached_session():
rnd = random_ops.random_poisson([], [], seed=12345)
self.assertEqual([0], rnd.get_shape().as_list())
self.assertAllClose(np.array([], dtype=np.float32), self.evaluate(rnd))
@test_util.run_deprecated_v1
def testShape(self):
# Fully known shape
rnd = random_ops.random_poisson(2.0, [150], seed=12345)
self.assertEqual([150], rnd.get_shape().as_list())
rnd = random_ops.random_poisson(
lam=array_ops.ones([1, 2, 3]),
shape=[150],
seed=12345)
self.assertEqual([150, 1, 2, 3], rnd.get_shape().as_list())
rnd = random_ops.random_poisson(
lam=array_ops.ones([1, 2, 3]),
shape=[20, 30],
seed=12345)
self.assertEqual([20, 30, 1, 2, 3], rnd.get_shape().as_list())
rnd = random_ops.random_poisson(
lam=array_ops.placeholder(dtypes.float32, shape=(2,)),
shape=[12],
seed=12345)
self.assertEqual([12, 2], rnd.get_shape().as_list())
# Partially known shape.
rnd = random_ops.random_poisson(
lam=array_ops.ones([7, 3]),
shape=array_ops.placeholder(dtypes.int32, shape=(1,)),
seed=12345)
self.assertEqual([None, 7, 3], rnd.get_shape().as_list())
rnd = random_ops.random_poisson(
lam=array_ops.ones([9, 6]),
shape=array_ops.placeholder(dtypes.int32, shape=(3,)),
seed=12345)
self.assertEqual([None, None, None, 9, 6], rnd.get_shape().as_list())
# Unknown shape.
rnd = random_ops.random_poisson(
lam=array_ops.placeholder(dtypes.float32),
shape=array_ops.placeholder(dtypes.int32),
seed=12345)
self.assertIs(None, rnd.get_shape().ndims)
rnd = random_ops.random_poisson(
lam=array_ops.placeholder(dtypes.float32),
shape=[50],
seed=12345)
self.assertIs(None, rnd.get_shape().ndims)
@test_util.run_deprecated_v1
def testDTypeCombinationsV2(self):
"""Tests random_poisson_v2() for all supported dtype combinations."""
with self.cached_session():
for lam_dt in _SUPPORTED_DTYPES:
for out_dt in _SUPPORTED_DTYPES:
random_ops.random_poisson(
constant_op.constant([1], dtype=lam_dt), [10],
dtype=out_dt).eval()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/random/random_poisson_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.random_ops.random_gamma."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.kernel_tests.random import util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class RandomGammaTest(test.TestCase):
"""This is a medium test due to the moments computation taking some time."""
def setUp(self):
np.random.seed(137)
random_seed.set_random_seed(137)
def _Sampler(self, num, alpha, beta, dtype, use_gpu, seed=None):
def func():
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_gamma(
[num], alpha, beta=beta, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = self.evaluate(rng)
return ret
return func
def testEmptySamplingNoError(self):
self.evaluate(random_ops.random_gamma(
[5], alpha=np.ones([2, 0, 3]), beta=np.ones([3]), dtype=dtypes.float32))
@test_util.run_deprecated_v1
def testMomentsFloat32(self):
self._testMoments(dtypes.float32)
@test_util.run_deprecated_v1
def testMomentsFloat64(self):
self._testMoments(dtypes.float64)
def _testMoments(self, dt):
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test moments: %s" % e)
return
# The moments test is a z-value test. This is the largest z-value
# we want to tolerate. Since the z-test approximates a unit normal
# distribution, it should almost definitely never exceed 6.
z_limit = 6.0
for stride in 0, 1, 4, 17:
alphas = [0.2, 1.0, 3.0]
if dt == dtypes.float64:
alphas = [0.01] + alphas
for alpha in alphas:
for scale in 9, 17:
# Gamma moments only defined for values less than the scale param.
max_moment = min(6, scale // 2)
sampler = self._Sampler(
20000, alpha, 1 / scale, dt, use_gpu=False, seed=12345)
z_scores = util.test_moment_matching(
sampler(),
max_moment,
stats.gamma(alpha, scale=scale),
stride=stride,
)
self.assertAllLess(z_scores, z_limit)
def _testZeroDensity(self, alpha):
"""Zero isn't in the support of the gamma distribution.
But quantized floating point math has its limits.
TODO(bjp): Implement log-gamma sampler for small-shape distributions.
Args:
alpha: float shape value to test
"""
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test zero density proportions: %s" % e)
return
allowable_zeros = {
dtypes.float16: stats.gamma(alpha).cdf(np.finfo(np.float16).tiny),
dtypes.float32: stats.gamma(alpha).cdf(np.finfo(np.float32).tiny),
dtypes.float64: stats.gamma(alpha).cdf(np.finfo(np.float64).tiny)
}
failures = []
for use_gpu in [False, True]:
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(
10000, alpha, 1.0, dt, use_gpu=use_gpu, seed=12345)
x = sampler()
allowable = allowable_zeros[dt] * x.size
allowable = allowable * 2 if allowable < 10 else allowable * 1.05
if np.sum(x <= 0) > allowable:
failures += [(use_gpu, dt)]
self.assertEqual([], failures)
def testNonZeroSmallShape(self):
self._testZeroDensity(0.01)
def testNonZeroSmallishShape(self):
self._testZeroDensity(0.35)
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for use_gpu in [False, True]:
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 2.0, 1.0, dt, use_gpu=use_gpu)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
count_limit = 20 if dt == dtypes.float16 else 10
if count >= count_limit:
print(use_gpu, dt)
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertLess(count, count_limit)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for use_gpu in [False, True]:
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
self.assertAllEqual(sx(), sy())
@test_util.run_deprecated_v1
def testNoCSE(self):
"""CSE = constant subexpression eliminator.
SetIsStateful() should prevent two identical random ops from getting
merged.
"""
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
rnd1 = random_ops.random_gamma([24], 2.0, dtype=dtype)
rnd2 = random_ops.random_gamma([24], 2.0, dtype=dtype)
diff = rnd2 - rnd1
self.assertGreater(np.linalg.norm(diff.eval()), 0.1)
@test_util.run_deprecated_v1
def testShape(self):
# Fully known shape.
rnd = random_ops.random_gamma([150], 2.0)
self.assertEqual([150], rnd.get_shape().as_list())
rnd = random_ops.random_gamma([150], 2.0, beta=[3.0, 4.0])
self.assertEqual([150, 2], rnd.get_shape().as_list())
rnd = random_ops.random_gamma([150], array_ops.ones([1, 2, 3]))
self.assertEqual([150, 1, 2, 3], rnd.get_shape().as_list())
rnd = random_ops.random_gamma([20, 30], array_ops.ones([1, 2, 3]))
self.assertEqual([20, 30, 1, 2, 3], rnd.get_shape().as_list())
rnd = random_ops.random_gamma(
[123], array_ops.placeholder(
dtypes.float32, shape=(2,)))
self.assertEqual([123, 2], rnd.get_shape().as_list())
# Partially known shape.
rnd = random_ops.random_gamma(
array_ops.placeholder(
dtypes.int32, shape=(1,)), array_ops.ones([7, 3]))
self.assertEqual([None, 7, 3], rnd.get_shape().as_list())
rnd = random_ops.random_gamma(
array_ops.placeholder(
dtypes.int32, shape=(3,)), array_ops.ones([9, 6]))
self.assertEqual([None, None, None, 9, 6], rnd.get_shape().as_list())
# Unknown shape.
rnd = random_ops.random_gamma(
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.float32))
self.assertIs(None, rnd.get_shape().ndims)
rnd = random_ops.random_gamma([50], array_ops.placeholder(dtypes.float32))
self.assertIs(None, rnd.get_shape().ndims)
@test_util.run_deprecated_v1
def testPositive(self):
n = int(10e3)
for dt in [dtypes.float16, dtypes.float32, dtypes.float64]:
with self.cached_session():
x = random_ops.random_gamma(shape=[n], alpha=0.001, dtype=dt, seed=0)
self.assertEqual(0, math_ops.reduce_sum(math_ops.cast(
math_ops.less_equal(x, 0.), dtype=dtypes.int64)).eval())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/random/random_gamma_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fft operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.platform import test
VALID_FFT_RANKS = (1, 2, 3)
class BaseFFTOpsTest(test.TestCase):
def _compare(self, x, rank, fft_length=None, use_placeholder=False,
rtol=1e-4, atol=1e-4):
self._compareForward(x, rank, fft_length, use_placeholder, rtol, atol)
self._compareBackward(x, rank, fft_length, use_placeholder, rtol, atol)
def _compareForward(self, x, rank, fft_length=None, use_placeholder=False,
rtol=1e-4, atol=1e-4):
x_np = self._npFFT(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tfFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})
else:
x_tf = self._tfFFT(x, rank, fft_length)
self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)
def _compareBackward(self, x, rank, fft_length=None, use_placeholder=False,
rtol=1e-4, atol=1e-4):
x_np = self._npIFFT(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tfIFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})
else:
x_tf = self._tfIFFT(x, rank, fft_length)
self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)
def _checkMemoryFail(self, x, rank):
config = config_pb2.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 1e-2
with self.cached_session(config=config, force_gpu=True):
self._tfFFT(x, rank, fft_length=None)
def _checkGradComplex(self, func, x, y, result_is_complex=True,
rtol=1e-2, atol=1e-2):
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
# func is a forward or inverse, real or complex, batched or unbatched FFT
# function with a complex input.
z = func(math_ops.complex(inx, iny))
# loss = sum(|z|^2)
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
((x_jacob_t, x_jacob_n),
(y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
[inx, iny], [list(x.shape), list(y.shape)],
loss, [1],
x_init_value=[x, y],
delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)
self.assertAllClose(y_jacob_t, y_jacob_n, rtol=rtol, atol=atol)
def _checkGradReal(self, func, x, rtol=1e-2, atol=1e-2):
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
# func is a forward RFFT function (batched or unbatched).
z = func(inx)
# loss = sum(|z|^2)
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
x_jacob_t, x_jacob_n = test.compute_gradient(
inx, list(x.shape), loss, [1], x_init_value=x, delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)
class FFTOpsTest(BaseFFTOpsTest):
def _tfFFT(self, x, rank, fft_length=None, feed_dict=None):
# fft_length unused for complex FFTs.
with self.cached_session(use_gpu=True) as sess:
return sess.run(self._tfFFTForRank(rank)(x), feed_dict=feed_dict)
def _tfIFFT(self, x, rank, fft_length=None, feed_dict=None):
# fft_length unused for complex FFTs.
with self.cached_session(use_gpu=True) as sess:
return sess.run(self._tfIFFTForRank(rank)(x), feed_dict=feed_dict)
def _npFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.fft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.fft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.fft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _npIFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.ifft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.ifft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.ifft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _tfFFTForRank(self, rank):
if rank == 1:
return fft_ops.fft
elif rank == 2:
return fft_ops.fft2d
elif rank == 3:
return fft_ops.fft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return fft_ops.ifft
elif rank == 2:
return fft_ops.ifft2d
elif rank == 3:
return fft_ops.ifft3d
else:
raise ValueError("invalid rank")
@test_util.run_deprecated_v1
def testEmpty(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type in (np.complex64, np.complex128):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np_type)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
@test_util.run_deprecated_v1
def testBasic(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-8)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._compare(
np.mod(np.arange(np.power(4, dims)), 10).reshape(
(4,) * dims).astype(np_type), rank, rtol=tol, atol=tol)
def testLargeBatch(self):
if test.is_gpu_available(cuda_only=True):
rank = 1
for dims in xrange(rank, rank + 3):
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-5)):
self._compare(
np.mod(np.arange(np.power(128, dims)), 10).reshape(
(128,) * dims).astype(np_type), rank, rtol=tol, atol=tol)
# TODO(yangzihao): Disable before we can figure out a way to
# properly test memory fail for large batch fft.
# def testLargeBatchMemoryFail(self):
# if test.is_gpu_available(cuda_only=True):
# rank = 1
# for dims in xrange(rank, rank + 3):
# self._checkMemoryFail(
# np.mod(np.arange(np.power(128, dims)), 64).reshape(
# (128,) * dims).astype(np.complex64), rank)
@test_util.run_deprecated_v1
def testBasicPlaceholder(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-8)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._compare(
np.mod(np.arange(np.power(4, dims)), 10).reshape(
(4,) * dims).astype(np_type),
rank, use_placeholder=True, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testRandom(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 5e-6)):
def gen(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
return (re + im * 1j).reshape(shape)
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._compare(gen((4,) * dims).astype(np_type), rank,
rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testRandom1D(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type in (np.complex64, np.complex128):
has_gpu = test.is_gpu_available(cuda_only=True)
tol = {(np.complex64, True): 1e-4,
(np.complex64, False): 1e-2,
(np.complex128, True): 1e-4,
(np.complex128, False): 1e-2}[(np_type, has_gpu)]
def gen(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
return (re + im * 1j).reshape(shape)
# Check a variety of power-of-2 FFT sizes.
for dim in (128, 256, 512, 1024):
self._compare(gen((dim,)).astype(np_type), 1, rtol=tol, atol=tol)
# Check a variety of non-power-of-2 FFT sizes.
for dim in (127, 255, 511, 1023):
self._compare(gen((dim,)).astype(np_type), 1, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testError(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfIFFT(x, rank)
@test_util.run_deprecated_v1
def testGrad_Simple(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.float32, 1e-4), (np.float64, 1e-10)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.ones(shape=(4,) * dims, dtype=np_type) / 10.0
im = np.zeros(shape=(4,) * dims, dtype=np_type)
self._checkGradComplex(self._tfFFTForRank(rank), re, im,
rtol=tol, atol=tol)
self._checkGradComplex(self._tfIFFTForRank(rank), re, im,
rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testGrad_Random(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.float32, 1e-2), (np.float64, 1e-10)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.random.rand(*((3,) * dims)).astype(np_type) * 2 - 1
im = np.random.rand(*((3,) * dims)).astype(np_type) * 2 - 1
self._checkGradComplex(self._tfFFTForRank(rank), re, im,
rtol=tol, atol=tol)
self._checkGradComplex(self._tfIFFTForRank(rank), re, im,
rtol=tol, atol=tol)
class RFFTOpsTest(BaseFFTOpsTest):
def _compareBackward(self, x, rank, fft_length=None, use_placeholder=False):
super(RFFTOpsTest, self)._compareBackward(x, rank, fft_length,
use_placeholder)
def _tfFFT(self, x, rank, fft_length=None, feed_dict=None):
with self.cached_session(use_gpu=True) as sess:
return sess.run(
self._tfFFTForRank(rank)(x, fft_length), feed_dict=feed_dict)
def _tfIFFT(self, x, rank, fft_length=None, feed_dict=None):
with self.cached_session(use_gpu=True) as sess:
return sess.run(
self._tfIFFTForRank(rank)(x, fft_length), feed_dict=feed_dict)
def _npFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.rfft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.rfft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.rfft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _npIFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.irfft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.irfft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.irfft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _tfFFTForRank(self, rank):
if rank == 1:
return fft_ops.rfft
elif rank == 2:
return fft_ops.rfft2d
elif rank == 3:
return fft_ops.rfft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return fft_ops.irfft
elif rank == 2:
return fft_ops.irfft2d
elif rank == 3:
return fft_ops.irfft3d
else:
raise ValueError("invalid rank")
@test_util.run_deprecated_v1
def testEmpty(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np.float32)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
x = np.zeros((0,) * dims).astype(np.complex64)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
@test_util.run_deprecated_v1
def testBasic(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._compareForward(r2c.astype(np.float32), rank, (size,) * rank)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._compareBackward(
c2r.astype(np.complex64), rank, (size,) * rank)
def testLargeBatch(self):
if test.is_gpu_available(cuda_only=True):
rank = 1
for dims in xrange(rank, rank + 3):
for size in (64, 128):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._compareForward(r2c.astype(np.float32), rank, (size,) * rank)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._compareBackward(c2r.astype(np.complex64), rank, (size,) * rank)
@test_util.run_deprecated_v1
def testBasicPlaceholder(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._compareForward(
r2c.astype(np.float32),
rank, (size,) * rank,
use_placeholder=True)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._compareBackward(
c2r.astype(np.complex64),
rank, (size,) * rank,
use_placeholder=True)
@test_util.run_deprecated_v1
def testFftLength(self):
if test.is_gpu_available(cuda_only=True):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
# Test truncation (FFT size < dimensions).
fft_length = (size - 2,) * rank
self._compareForward(r2c.astype(np.float32), rank, fft_length)
self._compareBackward(c2r.astype(np.complex64), rank, fft_length)
# Confirm it works with unknown shapes as well.
self._compareForward(
r2c.astype(np.float32),
rank,
fft_length,
use_placeholder=True)
self._compareBackward(
c2r.astype(np.complex64),
rank,
fft_length,
use_placeholder=True)
# Test padding (FFT size > dimensions).
fft_length = (size + 2,) * rank
self._compareForward(r2c.astype(np.float32), rank, fft_length)
self._compareBackward(c2r.astype(np.complex64), rank, fft_length)
# Confirm it works with unknown shapes as well.
self._compareForward(
r2c.astype(np.float32),
rank,
fft_length,
use_placeholder=True)
self._compareBackward(
c2r.astype(np.complex64),
rank,
fft_length,
use_placeholder=True)
@test_util.run_deprecated_v1
def testRandom(self):
with spectral_ops_test_util.fft_kernel_label_map():
def gen_real(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
ret = re.reshape(shape)
return ret
def gen_complex(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
ret = (re + im * 1j).reshape(shape)
return ret
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
self._compareForward(gen_real((size,) * dims), rank, (size,) * rank)
complex_dims = (size,) * (dims - 1) + (inner_dim,)
self._compareBackward(
gen_complex(complex_dims), rank, (size,) * rank)
@test_util.run_deprecated_v1
def testError(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape .* must have rank at least {}".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape .* must have rank at least {}".format(rank)):
self._tfIFFT(x, rank)
for dims in xrange(rank, rank + 2):
x = np.zeros((1,) * rank)
# Test non-rank-1 fft_length produces an error.
fft_length = np.zeros((1, 1)).astype(np.int32)
with self.assertRaisesWithPredicateMatch(ValueError,
"Shape .* must have rank 1"):
self._tfFFT(x, rank, fft_length)
with self.assertRaisesWithPredicateMatch(ValueError,
"Shape .* must have rank 1"):
self._tfIFFT(x, rank, fft_length)
# Test wrong fft_length length.
fft_length = np.zeros((rank + 1,)).astype(np.int32)
with self.assertRaisesWithPredicateMatch(
ValueError, "Dimension must be .*but is {}.*".format(rank + 1)):
self._tfFFT(x, rank, fft_length)
with self.assertRaisesWithPredicateMatch(
ValueError, "Dimension must be .*but is {}.*".format(rank + 1)):
self._tfIFFT(x, rank, fft_length)
# Test that calling the kernel directly without padding to fft_length
# produces an error.
rffts_for_rank = {
1: [gen_spectral_ops.rfft, gen_spectral_ops.irfft],
2: [gen_spectral_ops.rfft2d, gen_spectral_ops.irfft2d],
3: [gen_spectral_ops.rfft3d, gen_spectral_ops.irfft3d]
}
rfft_fn, irfft_fn = rffts_for_rank[rank]
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"Input dimension .* must have length of at least 6 but got: 5"):
x = np.zeros((5,) * rank).astype(np.float32)
fft_length = [6] * rank
with self.cached_session():
self.evaluate(rfft_fn(x, fft_length))
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"Input dimension .* must have length of at least .* but got: 3"):
x = np.zeros((3,) * rank).astype(np.complex64)
fft_length = [6] * rank
with self.cached_session():
self.evaluate(irfft_fn(x, fft_length))
@test_util.run_deprecated_v1
def testGrad_Simple(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
# rfft3d/irfft3d do not have gradients yet.
if rank == 3:
continue
for dims in xrange(rank, rank + 2):
for size in (5, 6):
re = np.ones(shape=(size,) * dims, dtype=np.float32)
im = -np.ones(shape=(size,) * dims, dtype=np.float32)
self._checkGradReal(self._tfFFTForRank(rank), re)
self._checkGradComplex(
self._tfIFFTForRank(rank), re, im, result_is_complex=False)
@test_util.run_deprecated_v1
def testGrad_Random(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
# rfft3d/irfft3d do not have gradients yet.
if rank == 3:
continue
for dims in xrange(rank, rank + 2):
for size in (5, 6):
re = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1
im = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1
self._checkGradReal(self._tfFFTForRank(rank), re)
self._checkGradComplex(
self._tfIFFTForRank(rank), re, im, result_is_complex=False)
class FFTShiftTest(test.TestCase):
@test_util.run_deprecated_v1
def testDefinition(self):
with self.session():
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), y)
self.assertAllEqual(fft_ops.ifftshift(y).eval(), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), y)
self.assertAllEqual(fft_ops.ifftshift(y).eval(), x)
@test_util.run_deprecated_v1
def testAxesKeyword(self):
with self.session():
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
self.assertAllEqual(fft_ops.fftshift(freqs, axes=(0, 1)).eval(), shifted)
self.assertAllEqual(
fft_ops.fftshift(freqs, axes=0).eval(),
fft_ops.fftshift(freqs, axes=(0,)).eval())
self.assertAllEqual(fft_ops.ifftshift(shifted, axes=(0, 1)).eval(), freqs)
self.assertAllEqual(
fft_ops.ifftshift(shifted, axes=0).eval(),
fft_ops.ifftshift(shifted, axes=(0,)).eval())
self.assertAllEqual(fft_ops.fftshift(freqs).eval(), shifted)
self.assertAllEqual(fft_ops.ifftshift(shifted).eval(), freqs)
@test_util.run_deprecated_v1
def testNumpyCompatibility(self):
with self.session():
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), np.fft.fftshift(x))
self.assertAllEqual(fft_ops.ifftshift(y).eval(), np.fft.ifftshift(y))
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), np.fft.fftshift(x))
self.assertAllEqual(fft_ops.ifftshift(y).eval(), np.fft.ifftshift(y))
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
self.assertAllEqual(
fft_ops.fftshift(freqs, axes=(0, 1)).eval(),
np.fft.fftshift(freqs, axes=(0, 1)))
self.assertAllEqual(
fft_ops.ifftshift(shifted, axes=(0, 1)).eval(),
np.fft.ifftshift(shifted, axes=(0, 1)))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/signal/fft_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for shape_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.kernel_tests.signal import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import shape_ops
from tensorflow.python.platform import test
class FrameTest(test.TestCase):
@tf_test_util.run_deprecated_v1
def test_mapping_of_indices_without_padding(self):
with self.session(use_gpu=True):
tensor = constant_op.constant(np.arange(9152), dtypes.int32)
tensor = array_ops.expand_dims(tensor, 0)
result = shape_ops.frame(tensor, 512, 180, pad_end=False).eval()
expected = np.tile(np.arange(512), (49, 1))
expected += np.tile(np.arange(49) * 180, (512, 1)).T
expected = np.expand_dims(expected, axis=0)
expected = np.array(expected, dtype=np.int32)
self.assertAllEqual(expected, result)
@tf_test_util.run_deprecated_v1
def test_mapping_of_indices_with_padding(self):
with self.session(use_gpu=True):
tensor = constant_op.constant(np.arange(10000), dtypes.int32)
tensor = array_ops.expand_dims(tensor, 0)
result = shape_ops.frame(tensor, 512, 192, pad_end=True).eval()
expected = np.tile(np.arange(512), (53, 1))
expected += np.tile(np.arange(53) * 192, (512, 1)).T
expected[expected >= 10000] = 0
expected = np.expand_dims(expected, axis=0)
expected = np.array(expected, dtype=np.int32)
self.assertAllEqual(expected, result)
@tf_test_util.run_deprecated_v1
def test_invalid_inputs(self):
# Rank 0 input signal.
with self.assertRaises(ValueError):
shape_ops.frame(1, 1, 1)
# If the rank is unknown, do not raise an exception.
shape_ops.frame(array_ops.placeholder(dtypes.float32), 1, 1)
# Non-scalar frame_length.
with self.assertRaises(ValueError):
shape_ops.frame([1], [1], 1)
# Non-scalar frame_step.
with self.assertRaises(ValueError):
shape_ops.frame([1], 1, [1])
# Non-scalar pad_value.
with self.assertRaises(ValueError):
shape_ops.frame([1], 1, 1, pad_end=True, pad_value=[1])
@tf_test_util.run_deprecated_v1
def test_length_zero(self):
signal = constant_op.constant([], dtype=dtypes.float32)
frame_length = 2
frame_step = 1
with self.session(use_gpu=True):
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99).eval()
self.assertEqual((0, 2), result.shape)
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False).eval()
self.assertEqual((0, 2), result.shape)
@tf_test_util.run_deprecated_v1
def test_shape_inference(self):
signal = array_ops.placeholder(dtypes.int32, shape=[1, 1])
frame_length = 2
frame_step = 1
# Shape inference is able to detect the rank and inner-most dimension
# if frame_length is known at graph definition time.
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99)
self.assertEqual([1, 1, 2], result.shape.as_list())
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False)
self.assertEqual([1, 0, 2], result.shape.as_list())
# If frame_length is not known, rank and (known) outer and inner dimensions
# are inferred.
signal = array_ops.placeholder(dtypes.int32, shape=[1, 2, 3, 4])
frame_length = array_ops.placeholder(dtypes.int32, shape=[])
frame_step = 1
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99, axis=1)
self.assertEqual([1, 2, None, 3, 4], result.shape.as_list())
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False, axis=1)
self.assertEqual([1, None, None, 3, 4], result.shape.as_list())
# If frame_length and inner-most dimension is known, rank, inner dimensions,
# and known outer dimensions are inferred.
signal = array_ops.placeholder(dtypes.int32,
shape=[None, 5, None, 20, 5, 3])
frame_length = 4
frame_step = 3
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99, axis=3)
self.assertEqual([None, 5, None, 7, 4, 5, 3], result.shape.as_list())
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False, axis=3)
self.assertEqual([None, 5, None, 6, 4, 5, 3], result.shape.as_list())
# Test that shape inference is consistent with actual returned shapes for
# small values of signal_length, frame_length, frame_step, and pad_end in
# [True, False].
frame_step = 1
for signal_length in range(2):
signal = [0] * signal_length
for frame_length in range(2):
for pad_end in [False, True]:
op = shape_ops.frame(signal, frame_length, frame_step,
pad_end=pad_end, pad_value=99)
with self.cached_session(use_gpu=True):
result = self.evaluate(op)
self.assertEqual(op.shape.as_list(), list(result.shape))
@tf_test_util.run_deprecated_v1
def test_basic_mono(self):
signal = np.arange(6)
frame_length = 3
frame_step = 2
with self.session(use_gpu=True):
for rank in range(5):
nd_signal = np.reshape(signal, (1,) * rank + signal.shape)
# With padding, we pad the last frame with pad_value.
result = shape_ops.frame(nd_signal, frame_length, frame_step,
pad_end=True, pad_value=99).eval()
expected_inner_frames = np.array([[0, 1, 2], [2, 3, 4], [4, 5, 99]])
expected = np.reshape(
expected_inner_frames, (1,) * rank + expected_inner_frames.shape)
self.assertAllEqual(expected, result)
# Without padding, we drop the last frame.
expected_inner_frames = np.array([[0, 1, 2], [2, 3, 4]])
expected = np.reshape(
expected_inner_frames, (1,) * rank + expected_inner_frames.shape)
result = shape_ops.frame(nd_signal, frame_length, frame_step,
pad_end=False).eval()
self.assertAllEqual(expected, result)
@tf_test_util.run_deprecated_v1
def test_basic_stereo(self):
signal = np.vstack([np.arange(6),
np.arange(6) + 10])
frame_length = 3
frame_step = 2
with self.session(use_gpu=True):
for rank in range(5):
nd_signal = np.reshape(signal, (1,) * rank + signal.shape)
# With padding, we pad the last frame with pad_value.
result = shape_ops.frame(nd_signal, frame_length, frame_step,
pad_end=True, pad_value=99).eval()
expected_inner_frames = np.array([
[[0, 1, 2], [2, 3, 4], [4, 5, 99]],
[[10, 11, 12], [12, 13, 14], [14, 15, 99]]])
expected = np.reshape(
expected_inner_frames, (1,) * rank + expected_inner_frames.shape)
self.assertAllEqual(expected, result)
# Without padding, we drop the last frame.
expected_inner_frames = np.array([[[0, 1, 2], [2, 3, 4]],
[[10, 11, 12], [12, 13, 14]]])
expected = np.reshape(
expected_inner_frames, (1,) * rank + expected_inner_frames.shape)
result = shape_ops.frame(nd_signal, frame_length, frame_step,
pad_end=False).eval()
self.assertAllEqual(expected, result)
@tf_test_util.run_deprecated_v1
def test_complex_shape(self):
signal = np.vstack([np.arange(6),
np.arange(6) + 10,
np.arange(6) + 20,
np.arange(6) + 30,
np.arange(6) + 40,
np.arange(6) + 50])
signal = np.reshape(signal, (2, 1, 3, 1, 6))
frame_length = 3
frame_step = 2
with self.session(use_gpu=True):
# With padding, we pad the last frame with pad_value.
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99).eval()
# Resulting shape is (2, 1, 3, 1, 3, 3).
expected = [[[[[[0, 1, 2], [2, 3, 4], [4, 5, 99]]],
[[[10, 11, 12], [12, 13, 14], [14, 15, 99]]],
[[[20, 21, 22], [22, 23, 24], [24, 25, 99]]]]],
[[[[[30, 31, 32], [32, 33, 34], [34, 35, 99]]],
[[[40, 41, 42], [42, 43, 44], [44, 45, 99]]],
[[[50, 51, 52], [52, 53, 54], [54, 55, 99]]]]]]
self.assertAllEqual(expected, result)
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False).eval()
# Resulting shape is (2, 1, 3, 1, 3, 2).
expected = [[[[[[0, 1, 2], [2, 3, 4]]],
[[[10, 11, 12], [12, 13, 14]]],
[[[20, 21, 22], [22, 23, 24]]]]],
[[[[[30, 31, 32], [32, 33, 34]]],
[[[40, 41, 42], [42, 43, 44]]],
[[[50, 51, 52], [52, 53, 54]]]]]]
self.assertAllEqual(expected, result)
def test_axis(self):
signal = np.reshape(np.arange(16), (2, 4, 2))
with self.session(use_gpu=True):
result = shape_ops.frame(signal, frame_length=2, frame_step=2,
pad_end=True, axis=1)
expected = np.reshape(np.arange(16), (2, 2, 2, 2))
self.assertAllEqual(expected, self.evaluate(result))
result = shape_ops.frame(signal, frame_length=2, frame_step=1,
pad_end=True, axis=1)
expected = [[[[0, 1], [2, 3]],
[[2, 3], [4, 5]],
[[4, 5], [6, 7]],
[[6, 7], [0, 0]]],
[[[8, 9], [10, 11]],
[[10, 11], [12, 13]],
[[12, 13], [14, 15]],
[[14, 15], [0, 0]]]]
self.assertAllEqual(expected, self.evaluate(result))
result = shape_ops.frame(signal, frame_length=3, frame_step=1,
pad_end=True, axis=1)
expected = [[[[0, 1], [2, 3], [4, 5]],
[[2, 3], [4, 5], [6, 7]],
[[4, 5], [6, 7], [0, 0]],
[[6, 7], [0, 0], [0, 0]]],
[[[8, 9], [10, 11], [12, 13]],
[[10, 11], [12, 13], [14, 15]],
[[12, 13], [14, 15], [0, 0]],
[[14, 15], [0, 0], [0, 0]]]]
self.assertAllEqual(expected, self.evaluate(result))
@tf_test_util.run_deprecated_v1
def test_window_larger_than_signal(self):
signal = constant_op.constant([[1, 2], [11, 12]], dtype=dtypes.float32)
frame_length = 4
frame_step = 1
with self.session(use_gpu=True):
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99).eval()
self.assertAllClose([[[1, 2, 99, 99], [2, 99, 99, 99]],
[[11, 12, 99, 99], [12, 99, 99, 99]]], result)
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False).eval()
self.assertEqual((2, 0, 4), result.shape)
frame_step = 2
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99).eval()
self.assertAllClose([[[1, 2, 99, 99]], [[11, 12, 99, 99]]], result)
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False).eval()
self.assertEqual((2, 0, 4), result.shape)
def test_preserves_type(self):
signal = math_ops.range(10, dtype=dtypes.float64)
frame_length = 2
frame_step = 3
with self.session(use_gpu=True):
result = shape_ops.frame(signal, frame_length, frame_step)
self.assertEqual(result.dtype, signal.dtype)
@tf_test_util.run_deprecated_v1
def test_dynamic_tensor(self):
# Show that frame works even when the dimensions of its input are
# not known at graph creation time.
input_signal = np.vstack([np.arange(4), np.arange(4) + 10,
np.arange(4) + 20])
frame_length = 2
frame_step = 2
with self.session(use_gpu=True) as sess:
signal_placeholder = array_ops.placeholder(shape=(None, None),
dtype=dtypes.float32)
result = sess.run(shape_ops.frame(
signal_placeholder, frame_length, frame_step),
feed_dict={signal_placeholder: input_signal})
self.assertAllEqual([[[0, 1], [2, 3]],
[[10, 11], [12, 13]],
[[20, 21], [22, 23]]], result)
@tf_test_util.run_deprecated_v1
def test_gradient_numerical(self):
with self.session(use_gpu=True):
signal_shape = (2, 128)
signal = array_ops.ones(signal_shape)
frame_length = 33
frame_step = 9
frames = shape_ops.frame(signal, frame_length, frame_step)
error = test.compute_gradient_error(
signal, signal_shape, frames, frames.shape.as_list())
self.assertLess(error, 2e-5)
def test_constant_folding(self):
"""frame should be constant foldable for constant inputs."""
for pad_end in [True, False]:
g = ops.Graph()
with g.as_default():
frame_length, frame_step = 32, 16
signal_shape = (2, 128)
signal = array_ops.ones(signal_shape)
frames = shape_ops.frame(signal, frame_length, frame_step,
pad_end=pad_end)
rewritten_graph = test_util.grappler_optimize(g, [frames])
self.assertEqual(1, len(rewritten_graph.node))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/signal/shape_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DCT operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.signal import dct_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
fftpack = try_import("scipy.fftpack")
def _modify_input_for_dct(signals, n=None):
""" This is a supporting function for the numpy implementation
of DCT operations. If n < signal size, it returns the first n elements,
else it pads the signal with zeros.
"""
signal = np.array(signals)
if n is None or n == signal.shape[-1]:
signal_mod = signal
elif n >= 1:
signal_len = signal.shape[-1]
if n <= signal_len:
signal_mod = signal[..., 0:n]
else:
output_shape = list(signal.shape)
output_shape[-1] = n
signal_mod = np.zeros(output_shape)
signal_mod[..., 0:signal.shape[-1]] = signal
if n:
assert signal_mod.shape[-1] == n
return signal_mod
def _np_dct1(signals, n=None, norm=None):
"""Computes the DCT-I manually with NumPy."""
# X_k = (x_0 + (-1)**k * x_{N-1} +
# 2 * sum_{n=0}^{N-2} x_n * cos(\frac{pi}{N-1} * n * k) k=0,...,N-1
del norm
signals_mod = _modify_input_for_dct(signals, n=n)
dct_size = signals_mod.shape[-1]
dct = np.zeros_like(signals_mod)
for k in range(dct_size):
phi = np.cos(np.pi * np.arange(1, dct_size - 1) * k / (dct_size - 1))
dct[..., k] = 2 * np.sum(
signals_mod[..., 1:-1] * phi, axis=-1) + (
signals_mod[..., 0] + (-1)**k * signals_mod[..., -1])
return dct
def _np_dct2(signals, n=None, norm=None):
"""Computes the DCT-II manually with NumPy."""
# X_k = sum_{n=0}^{N-1} x_n * cos(\frac{pi}{N} * (n + 0.5) * k) k=0,...,N-1
signals_mod = _modify_input_for_dct(signals, n=n)
dct_size = signals_mod.shape[-1]
dct = np.zeros_like(signals_mod)
for k in range(dct_size):
phi = np.cos(np.pi * (np.arange(dct_size) + 0.5) * k / dct_size)
dct[..., k] = np.sum(signals_mod * phi, axis=-1)
# SciPy's `dct` has a scaling factor of 2.0 which we follow.
# https://github.com/scipy/scipy/blob/v0.15.1/scipy/fftpack/src/dct.c.src
if norm == "ortho":
# The orthonormal scaling includes a factor of 0.5 which we combine with
# the overall scaling of 2.0 to cancel.
dct[..., 0] *= np.sqrt(1.0 / dct_size)
dct[..., 1:] *= np.sqrt(2.0 / dct_size)
else:
dct *= 2.0
return dct
def _np_dct3(signals, n=None, norm=None):
"""Computes the DCT-III manually with NumPy."""
# SciPy's `dct` has a scaling factor of 2.0 which we follow.
# https://github.com/scipy/scipy/blob/v0.15.1/scipy/fftpack/src/dct.c.src
signals_mod = _modify_input_for_dct(signals, n=n)
dct_size = signals_mod.shape[-1]
signals_mod = np.array(signals_mod) # make a copy so we can modify
if norm == "ortho":
signals_mod[..., 0] *= np.sqrt(4.0 / dct_size)
signals_mod[..., 1:] *= np.sqrt(2.0 / dct_size)
else:
signals_mod *= 2.0
dct = np.zeros_like(signals_mod)
# X_k = 0.5 * x_0 +
# sum_{n=1}^{N-1} x_n * cos(\frac{pi}{N} * n * (k + 0.5)) k=0,...,N-1
half_x0 = 0.5 * signals_mod[..., 0]
for k in range(dct_size):
phi = np.cos(np.pi * np.arange(1, dct_size) * (k + 0.5) / dct_size)
dct[..., k] = half_x0 + np.sum(signals_mod[..., 1:] * phi, axis=-1)
return dct
NP_DCT = {1: _np_dct1, 2: _np_dct2, 3: _np_dct3}
NP_IDCT = {1: _np_dct1, 2: _np_dct3, 3: _np_dct2}
class DCTOpsTest(parameterized.TestCase, test.TestCase):
def _compare(self, signals, n, norm, dct_type, atol=5e-4, rtol=5e-4):
"""Compares (I)DCT to SciPy (if available) and a NumPy implementation."""
np_dct = NP_DCT[dct_type](signals, n=n, norm=norm)
tf_dct = dct_ops.dct(signals, n=n, type=dct_type, norm=norm).eval()
self.assertAllClose(np_dct, tf_dct, atol=atol, rtol=rtol)
np_idct = NP_IDCT[dct_type](signals, n=None, norm=norm)
tf_idct = dct_ops.idct(signals, type=dct_type, norm=norm).eval()
self.assertAllClose(np_idct, tf_idct, atol=atol, rtol=rtol)
if fftpack:
scipy_dct = fftpack.dct(signals, n=n, type=dct_type, norm=norm)
self.assertAllClose(scipy_dct, tf_dct, atol=atol, rtol=rtol)
scipy_idct = fftpack.idct(signals, type=dct_type, norm=norm)
self.assertAllClose(scipy_idct, tf_idct, atol=atol, rtol=rtol)
# Verify inverse(forward(s)) == s, up to a normalization factor.
# Since `n` is not implemented for IDCT operation, re-calculating tf_dct without n.
tf_dct = dct_ops.dct(signals, type=dct_type, norm=norm).eval()
tf_idct_dct = dct_ops.idct(
tf_dct, type=dct_type, norm=norm).eval()
tf_dct_idct = dct_ops.dct(
tf_idct, type=dct_type, norm=norm).eval()
if norm is None:
if dct_type == 1:
tf_idct_dct *= 0.5 / (signals.shape[-1] - 1)
tf_dct_idct *= 0.5 / (signals.shape[-1] - 1)
else:
tf_idct_dct *= 0.5 / signals.shape[-1]
tf_dct_idct *= 0.5 / signals.shape[-1]
self.assertAllClose(signals, tf_idct_dct, atol=atol, rtol=rtol)
self.assertAllClose(signals, tf_dct_idct, atol=atol, rtol=rtol)
@parameterized.parameters([
[[2]], [[3]], [[10]], [[2, 20]], [[2, 3, 25]]])
@test_util.run_deprecated_v1
def test_random(self, shape):
"""Test randomly generated batches of data."""
with spectral_ops_test_util.fft_kernel_label_map():
with self.session(use_gpu=True):
signals = np.random.rand(*shape).astype(np.float32)
n = np.random.randint(1, 2 * signals.shape[-1])
n = np.random.choice([None, n])
# Normalization not implemented for orthonormal.
self._compare(signals, n, norm=None, dct_type=1)
for norm in (None, "ortho"):
self._compare(signals, n=n, norm=norm, dct_type=2)
self._compare(signals, n=n, norm=norm, dct_type=3)
def test_error(self):
signals = np.random.rand(10)
# Unsupported type.
with self.assertRaises(ValueError):
dct_ops.dct(signals, type=5)
# Invalid n.
with self.assertRaises(ValueError):
dct_ops.dct(signals, n=-2)
# DCT-I normalization not implemented.
with self.assertRaises(ValueError):
dct_ops.dct(signals, type=1, norm="ortho")
# DCT-I requires at least two inputs.
with self.assertRaises(ValueError):
dct_ops.dct(np.random.rand(1), type=1)
# Unknown normalization.
with self.assertRaises(ValueError):
dct_ops.dct(signals, norm="bad")
with self.assertRaises(NotImplementedError):
dct_ops.dct(signals, axis=0)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/signal/dct_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mfcc_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.signal import mfcc_ops
from tensorflow.python.platform import test
# TODO(rjryan): We have no open source tests for MFCCs at the moment. Internally
# at Google, this code is tested against a reference implementation that follows
# HTK conventions.
class MFCCTest(test.TestCase):
@test_util.run_deprecated_v1
def test_error(self):
# num_mel_bins must be positive.
with self.assertRaises(ValueError):
signal = array_ops.zeros((2, 3, 0))
mfcc_ops.mfccs_from_log_mel_spectrograms(signal)
# signal must be float32
with self.assertRaises(ValueError):
signal = array_ops.zeros((2, 3, 5), dtype=dtypes.float64)
mfcc_ops.mfccs_from_log_mel_spectrograms(signal)
@test_util.run_deprecated_v1
def test_basic(self):
"""A basic test that the op runs on random input."""
with spectral_ops_test_util.fft_kernel_label_map():
with self.session(use_gpu=True):
signal = random_ops.random_normal((2, 3, 5))
mfcc_ops.mfccs_from_log_mel_spectrograms(signal).eval()
@test_util.run_deprecated_v1
def test_unknown_shape(self):
"""A test that the op runs when shape and rank are unknown."""
with spectral_ops_test_util.fft_kernel_label_map():
with self.session(use_gpu=True):
signal = array_ops.placeholder_with_default(
random_ops.random_normal((2, 3, 5)), tensor_shape.TensorShape(None))
self.assertIsNone(signal.shape.ndims)
mfcc_ops.mfccs_from_log_mel_spectrograms(signal).eval()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/signal/mfcc_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for window_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.kernel_tests.signal import test_util
from tensorflow.python.ops.signal import window_ops
from tensorflow.python.platform import test
def _scipy_raised_cosine(length, symmetric=True, a=0.5, b=0.5):
"""A simple implementation of a raised cosine window that matches SciPy.
https://en.wikipedia.org/wiki/Window_function#Hann_window
https://github.com/scipy/scipy/blob/v0.14.0/scipy/signal/windows.py#L615
Args:
length: The window length.
symmetric: Whether to create a symmetric window.
a: The alpha parameter of the raised cosine window.
b: The beta parameter of the raised cosine window.
Returns:
A raised cosine window of length `length`.
"""
if length == 1:
return np.ones(1)
odd = length % 2
if not symmetric and not odd:
length += 1
window = a - b * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))
if not symmetric and not odd:
window = window[:-1]
return window
class WindowOpsTest(test.TestCase):
def setUp(self):
self._window_lengths = [1, 2, 3, 4, 5, 31, 64, 128]
self._dtypes = [(dtypes.float16, 1e-2),
(dtypes.float32, 1e-6),
(dtypes.float64, 1e-9)]
def _compare_window_fns(self, np_window_fn, tf_window_fn):
with self.session(use_gpu=True):
for window_length in self._window_lengths:
for periodic in [False, True]:
for tf_dtype, tol in self._dtypes:
np_dtype = tf_dtype.as_numpy_dtype
expected = np_window_fn(window_length,
symmetric=not periodic).astype(np_dtype)
actual = tf_window_fn(window_length, periodic=periodic,
dtype=tf_dtype).eval()
self.assertAllClose(expected, actual, tol, tol)
@tf_test_util.run_deprecated_v1
def test_hann_window(self):
"""Check that hann_window matches scipy.signal.hann behavior."""
# The Hann window is a raised cosine window with parameters alpha=0.5 and
# beta=0.5.
# https://en.wikipedia.org/wiki/Window_function#Hann_window
self._compare_window_fns(
functools.partial(_scipy_raised_cosine, a=0.5, b=0.5),
window_ops.hann_window)
@tf_test_util.run_deprecated_v1
def test_hamming_window(self):
"""Check that hamming_window matches scipy.signal.hamming's behavior."""
# The Hamming window is a raised cosine window with parameters alpha=0.54
# and beta=0.46.
# https://en.wikipedia.org/wiki/Window_function#Hamming_window
self._compare_window_fns(
functools.partial(_scipy_raised_cosine, a=0.54, b=0.46),
window_ops.hamming_window)
def test_constant_folding(self):
"""Window functions should be constant foldable for constant inputs."""
for window_fn in (window_ops.hann_window, window_ops.hamming_window):
for dtype, _ in self._dtypes:
for periodic in [False, True]:
g = ops.Graph()
with g.as_default():
window = window_fn(100, periodic=periodic, dtype=dtype)
rewritten_graph = test_util.grappler_optimize(g, [window])
self.assertEqual(1, len(rewritten_graph.node))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/signal/window_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities for tf.signal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.training import saver
def grappler_optimize(graph, fetches=None, config_proto=None):
"""Tries to optimize the provided graph using grappler.
Args:
graph: A `tf.Graph` instance containing the graph to optimize.
fetches: An optional list of `Tensor`s to fetch (i.e. not optimize away).
Grappler uses the 'train_op' collection to look for fetches, so if not
provided this collection should be non-empty.
config_proto: An optional `tf.compat.v1.ConfigProto` to use when rewriting
the graph.
Returns:
A `tf.compat.v1.GraphDef` containing the rewritten graph.
"""
if config_proto is None:
config_proto = config_pb2.ConfigProto()
config_proto.graph_options.rewrite_options.min_graph_nodes = -1
if fetches is not None:
for fetch in fetches:
graph.add_to_collection('train_op', fetch)
metagraph = saver.export_meta_graph(graph_def=graph.as_graph_def())
return tf_optimizer.OptimizeGraph(config_proto, metagraph)
|
tensorflow-master
|
tensorflow/python/kernel_tests/signal/test_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mel_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.kernel_tests.signal import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.signal import mel_ops
from tensorflow.python.platform import test
# mel spectrum constants and functions.
_MEL_BREAK_FREQUENCY_HERTZ = 700.0
_MEL_HIGH_FREQUENCY_Q = 1127.0
def hertz_to_mel(frequencies_hertz):
"""Convert frequencies to mel scale using HTK formula.
Copied from
https://github.com/tensorflow/models/blob/master/research/audioset/mel_features.py.
Args:
frequencies_hertz: Scalar or np.array of frequencies in hertz.
Returns:
Object of same size as frequencies_hertz containing corresponding values
on the mel scale.
"""
return _MEL_HIGH_FREQUENCY_Q * np.log(
1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ))
def spectrogram_to_mel_matrix(num_mel_bins=20,
num_spectrogram_bins=129,
audio_sample_rate=8000,
lower_edge_hertz=125.0,
upper_edge_hertz=3800.0,
unused_dtype=None):
"""Return a matrix that can post-multiply spectrogram rows to make mel.
Copied from
https://github.com/tensorflow/models/blob/master/research/audioset/mel_features.py.
Returns a np.array matrix A that can be used to post-multiply a matrix S of
spectrogram values (STFT magnitudes) arranged as frames x bins to generate a
"mel spectrogram" M of frames x num_mel_bins. M = S A.
The classic HTK algorithm exploits the complementarity of adjacent mel bands
to multiply each FFT bin by only one mel weight, then add it, with positive
and negative signs, to the two adjacent mel bands to which that bin
contributes. Here, by expressing this operation as a matrix multiply, we go
from num_fft multiplies per frame (plus around 2*num_fft adds) to around
num_fft^2 multiplies and adds. However, because these are all presumably
accomplished in a single call to np.dot(), it's not clear which approach is
faster in Python. The matrix multiplication has the attraction of being more
general and flexible, and much easier to read.
Args:
num_mel_bins: How many bands in the resulting mel spectrum. This is
the number of columns in the output matrix.
num_spectrogram_bins: How many bins there are in the source spectrogram
data, which is understood to be fft_size/2 + 1, i.e. the spectrogram
only contains the nonredundant FFT bins.
audio_sample_rate: Samples per second of the audio at the input to the
spectrogram. We need this to figure out the actual frequencies for
each spectrogram bin, which dictates how they are mapped into mel.
lower_edge_hertz: Lower bound on the frequencies to be included in the mel
spectrum. This corresponds to the lower edge of the lowest triangular
band.
upper_edge_hertz: The desired top edge of the highest frequency band.
Returns:
An np.array with shape (num_spectrogram_bins, num_mel_bins).
Raises:
ValueError: if frequency edges are incorrectly ordered.
"""
nyquist_hertz = audio_sample_rate / 2.
if lower_edge_hertz >= upper_edge_hertz:
raise ValueError("lower_edge_hertz %.1f >= upper_edge_hertz %.1f" %
(lower_edge_hertz, upper_edge_hertz))
spectrogram_bins_hertz = np.linspace(0.0, nyquist_hertz, num_spectrogram_bins)
spectrogram_bins_mel = hertz_to_mel(spectrogram_bins_hertz)
# The i'th mel band (starting from i=1) has center frequency
# band_edges_mel[i], lower edge band_edges_mel[i-1], and higher edge
# band_edges_mel[i+1]. Thus, we need num_mel_bins + 2 values in
# the band_edges_mel arrays.
band_edges_mel = np.linspace(hertz_to_mel(lower_edge_hertz),
hertz_to_mel(upper_edge_hertz), num_mel_bins + 2)
# Matrix to post-multiply feature arrays whose rows are num_spectrogram_bins
# of spectrogram values.
mel_weights_matrix = np.empty((num_spectrogram_bins, num_mel_bins))
for i in range(num_mel_bins):
lower_edge_mel, center_mel, upper_edge_mel = band_edges_mel[i:i + 3]
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the *mel* domain, not hertz.
lower_slope = ((spectrogram_bins_mel - lower_edge_mel) /
(center_mel - lower_edge_mel))
upper_slope = ((upper_edge_mel - spectrogram_bins_mel) /
(upper_edge_mel - center_mel))
# .. then intersect them with each other and zero.
mel_weights_matrix[:, i] = np.maximum(0.0, np.minimum(lower_slope,
upper_slope))
# HTK excludes the spectrogram DC bin; make sure it always gets a zero
# coefficient.
mel_weights_matrix[0, :] = 0.0
return mel_weights_matrix
class LinearToMelTest(test.TestCase):
def test_matches_reference_implementation(self):
# Tuples of (num_mel_bins, num_spectrogram_bins, sample_rate,
# lower_edge_hertz, upper_edge_hertz) to test.
configs = [
# Defaults.
(20, 129, 8000.0, 125.0, 3800.0, dtypes.float64),
# Settings used by Tacotron (https://arxiv.org/abs/1703.10135).
(80, 1025, 24000.0, 80.0, 12000.0, dtypes.float64)
]
with self.session(use_gpu=True):
for config in configs:
mel_matrix_np = spectrogram_to_mel_matrix(*config)
mel_matrix = mel_ops.linear_to_mel_weight_matrix(*config)
self.assertAllClose(mel_matrix_np, self.evaluate(mel_matrix), atol=3e-6)
@tf_test_util.run_deprecated_v1
def test_dtypes(self):
# LinSpace is not supported for tf.float16.
for dtype in (dtypes.bfloat16, dtypes.float32, dtypes.float64):
self.assertEqual(dtype,
mel_ops.linear_to_mel_weight_matrix(dtype=dtype).dtype)
@tf_test_util.run_deprecated_v1
def test_error(self):
with self.assertRaises(ValueError):
mel_ops.linear_to_mel_weight_matrix(num_mel_bins=0)
with self.assertRaises(ValueError):
mel_ops.linear_to_mel_weight_matrix(num_spectrogram_bins=0)
with self.assertRaises(ValueError):
mel_ops.linear_to_mel_weight_matrix(sample_rate=0.0)
with self.assertRaises(ValueError):
mel_ops.linear_to_mel_weight_matrix(lower_edge_hertz=-1)
with self.assertRaises(ValueError):
mel_ops.linear_to_mel_weight_matrix(lower_edge_hertz=100,
upper_edge_hertz=10)
with self.assertRaises(ValueError):
mel_ops.linear_to_mel_weight_matrix(upper_edge_hertz=1000,
sample_rate=800)
with self.assertRaises(ValueError):
mel_ops.linear_to_mel_weight_matrix(dtype=dtypes.int32)
def test_constant_folding(self):
"""Mel functions should be constant foldable."""
# TODO(rjryan): tf.bloat16 cannot be constant folded by Grappler.
for dtype in (dtypes.float32, dtypes.float64):
g = ops.Graph()
with g.as_default():
mel_matrix = mel_ops.linear_to_mel_weight_matrix(dtype=dtype)
rewritten_graph = test_util.grappler_optimize(g, [mel_matrix])
self.assertEqual(1, len(rewritten_graph.node))
@tf_test_util.run_deprecated_v1
def test_num_spectrogram_bins_dynamic(self):
with self.session(use_gpu=True):
num_spectrogram_bins = array_ops.placeholder(shape=(),
dtype=dtypes.int32)
mel_matrix_np = spectrogram_to_mel_matrix(
20, 129, 8000.0, 125.0, 3800.0)
mel_matrix = mel_ops.linear_to_mel_weight_matrix(
20, num_spectrogram_bins, 8000.0, 125.0, 3800.0)
self.assertAllClose(
mel_matrix_np,
mel_matrix.eval(feed_dict={num_spectrogram_bins: 129}), atol=3e-6)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/signal/mel_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for spectral_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.signal import spectral_ops
from tensorflow.python.ops.signal import window_ops
from tensorflow.python.platform import test
class SpectralOpsTest(test.TestCase):
@staticmethod
def _np_hann_periodic_window(length):
if length == 1:
return np.ones(1)
odd = length % 2
if not odd:
length += 1
window = 0.5 - 0.5 * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))
if not odd:
window = window[:-1]
return window
@staticmethod
def _np_frame(data, window_length, hop_length):
num_frames = 1 + int(np.floor((len(data) - window_length) // hop_length))
shape = (num_frames, window_length)
strides = (data.strides[0] * hop_length, data.strides[0])
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@staticmethod
def _np_stft(data, fft_length, hop_length, window_length):
frames = SpectralOpsTest._np_frame(data, window_length, hop_length)
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return np.fft.rfft(frames * window, fft_length)
@staticmethod
def _np_inverse_stft(stft, fft_length, hop_length, window_length):
frames = np.fft.irfft(stft, fft_length)
# Pad or truncate frames's inner dimension to window_length.
frames = frames[..., :window_length]
frames = np.pad(frames, [[0, 0]] * (frames.ndim - 1) +
[[0, max(0, window_length - frames.shape[-1])]], "constant")
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return SpectralOpsTest._np_overlap_add(frames * window, hop_length)
@staticmethod
def _np_overlap_add(stft, hop_length):
num_frames, window_length = np.shape(stft)
# Output length will be one complete window, plus another hop_length's
# worth of points for each additional window.
output_length = window_length + (num_frames - 1) * hop_length
output = np.zeros(output_length)
for i in range(num_frames):
output[i * hop_length:i * hop_length + window_length] += stft[i,]
return output
def _compare(self, signal, frame_length, frame_step, fft_length):
with spectral_ops_test_util.fft_kernel_label_map(), (
self.cached_session(use_gpu=True)) as sess:
actual_stft = spectral_ops.stft(
signal, frame_length, frame_step, fft_length, pad_end=False)
signal_ph = array_ops.placeholder(dtype=dtypes.as_dtype(signal.dtype))
actual_stft_from_ph = spectral_ops.stft(
signal_ph, frame_length, frame_step, fft_length, pad_end=False)
actual_inverse_stft = spectral_ops.inverse_stft(
actual_stft, frame_length, frame_step, fft_length)
actual_stft, actual_stft_from_ph, actual_inverse_stft = sess.run(
[actual_stft, actual_stft_from_ph, actual_inverse_stft],
feed_dict={signal_ph: signal})
actual_stft_ph = array_ops.placeholder(dtype=actual_stft.dtype)
actual_inverse_stft_from_ph = sess.run(
spectral_ops.inverse_stft(
actual_stft_ph, frame_length, frame_step, fft_length),
feed_dict={actual_stft_ph: actual_stft})
# Confirm that there is no difference in output when shape/rank is fully
# unknown or known.
self.assertAllClose(actual_stft, actual_stft_from_ph)
self.assertAllClose(actual_inverse_stft, actual_inverse_stft_from_ph)
expected_stft = SpectralOpsTest._np_stft(
signal, fft_length, frame_step, frame_length)
self.assertAllClose(expected_stft, actual_stft, 1e-4, 1e-4)
expected_inverse_stft = SpectralOpsTest._np_inverse_stft(
expected_stft, fft_length, frame_step, frame_length)
self.assertAllClose(
expected_inverse_stft, actual_inverse_stft, 1e-4, 1e-4)
def test_shapes(self):
with spectral_ops_test_util.fft_kernel_label_map(), (
self.session(use_gpu=True)):
signal = np.zeros((512,)).astype(np.float32)
# If fft_length is not provided, the smallest enclosing power of 2 of
# frame_length (8) is used.
stft = spectral_ops.stft(signal, frame_length=7, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
fft_length=16, pad_end=True)
self.assertAllEqual([64, 9], stft.shape.as_list())
self.assertAllEqual([64, 9], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=16, frame_step=8,
fft_length=8, pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = np.zeros((32, 9)).astype(np.complex64)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length=8,
fft_length=16, frame_step=8)
expected_length = (stft.shape[0] - 1) * 8 + 8
self.assertAllEqual([256], inverse_stft.shape.as_list())
self.assertAllEqual([expected_length], self.evaluate(inverse_stft).shape)
def test_stft_and_inverse_stft(self):
"""Test that spectral_ops.stft/inverse_stft match a NumPy implementation."""
# Tuples of (signal_length, frame_length, frame_step, fft_length).
test_configs = [
(512, 64, 32, 64),
(512, 64, 64, 64),
(512, 72, 64, 64),
(512, 64, 25, 64),
(512, 25, 15, 36),
(123, 23, 5, 42),
]
for signal_length, frame_length, frame_step, fft_length in test_configs:
signal = np.random.random(signal_length).astype(np.float32)
self._compare(signal, frame_length, frame_step, fft_length)
def test_stft_round_trip(self):
# Tuples of (signal_length, frame_length, frame_step, fft_length,
# threshold, corrected_threshold).
test_configs = [
# 87.5% overlap.
(4096, 256, 32, 256, 1e-5, 1e-6),
# 75% overlap.
(4096, 256, 64, 256, 1e-5, 1e-6),
# Odd frame hop.
(4096, 128, 25, 128, 1e-3, 1e-6),
# Odd frame length.
(4096, 127, 32, 128, 1e-3, 1e-6),
# 50% overlap.
(4096, 128, 64, 128, 0.40, 1e-6),
]
for (signal_length, frame_length, frame_step, fft_length, threshold,
corrected_threshold) in test_configs:
# Generate a random white Gaussian signal.
signal = random_ops.random_normal([signal_length])
with spectral_ops_test_util.fft_kernel_label_map(), (
self.cached_session(use_gpu=True)) as sess:
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,
pad_end=False)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,
fft_length)
inverse_stft_corrected = spectral_ops.inverse_stft(
stft, frame_length, frame_step, fft_length,
window_fn=spectral_ops.inverse_stft_window_fn(frame_step))
signal, inverse_stft, inverse_stft_corrected = sess.run(
[signal, inverse_stft, inverse_stft_corrected])
# Truncate signal to the size of inverse stft.
signal = signal[:inverse_stft.shape[0]]
# Ignore the frame_length samples at either edge.
signal = signal[frame_length:-frame_length]
inverse_stft = inverse_stft[frame_length:-frame_length]
inverse_stft_corrected = inverse_stft_corrected[
frame_length:-frame_length]
# Check that the inverse and original signal are close up to a scale
# factor.
inverse_stft_scaled = inverse_stft / np.mean(np.abs(inverse_stft))
signal_scaled = signal / np.mean(np.abs(signal))
self.assertLess(np.std(inverse_stft_scaled - signal_scaled), threshold)
# Check that the inverse with correction and original signal are close.
self.assertLess(np.std(inverse_stft_corrected - signal),
corrected_threshold)
def test_inverse_stft_window_fn(self):
"""Test that inverse_stft_window_fn has unit gain at each window phase."""
# Tuples of (frame_length, frame_step).
test_configs = [
(256, 32),
(256, 64),
(128, 25),
(127, 32),
(128, 64),
]
for (frame_length, frame_step) in test_configs:
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
with self.cached_session(use_gpu=True) as sess:
hann_window, inverse_window = self.evaluate(
[hann_window, inverse_window])
# Expect unit gain at each phase of the window.
product_window = hann_window * inverse_window
for i in range(frame_step):
self.assertAllClose(1.0, np.sum(product_window[i::frame_step]))
def test_inverse_stft_window_fn_special_case(self):
"""Test inverse_stft_window_fn in special overlap = 3/4 case."""
# Cases in which frame_length is an integer multiple of 4 * frame_step are
# special because they allow exact reproduction of the waveform with a
# squared Hann window (Hann window in both forward and reverse transforms).
# In the case where frame_length = 4 * frame_step, that combination
# produces a constant gain of 1.5, and so the corrected window will be the
# Hann window / 1.5.
# Tuples of (frame_length, frame_step).
test_configs = [
(256, 64),
(128, 32),
]
for (frame_length, frame_step) in test_configs:
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
with self.cached_session(use_gpu=True) as sess:
hann_window, inverse_window = self.evaluate(
[hann_window, inverse_window])
self.assertAllClose(hann_window, inverse_window * 1.5)
@staticmethod
def _compute_stft_gradient(signal, frame_length=32, frame_step=16,
fft_length=32):
"""Computes the gradient of the STFT with respect to `signal`."""
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length)
magnitude_stft = math_ops.abs(stft)
loss = math_ops.reduce_sum(magnitude_stft)
return gradients_impl.gradients([loss], [signal])[0]
def test_gradients(self):
"""Test that spectral_ops.stft has a working gradient."""
with spectral_ops_test_util.fft_kernel_label_map(), (
self.session(use_gpu=True)) as sess:
signal_length = 512
# An all-zero signal has all zero gradients with respect to the sum of the
# magnitude STFT.
empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)
empty_signal_gradient = sess.run(
self._compute_stft_gradient(empty_signal))
self.assertTrue((empty_signal_gradient == 0.0).all())
# A sinusoid will have non-zero components of its gradient with respect to
# the sum of the magnitude STFT.
sinusoid = math_ops.sin(
2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))
sinusoid_gradient = self.evaluate(self._compute_stft_gradient(sinusoid))
self.assertFalse((sinusoid_gradient == 0.0).all())
def test_gradients_numerical(self):
with spectral_ops_test_util.fft_kernel_label_map(), (
self.session(use_gpu=True)):
# Tuples of (signal_length, frame_length, frame_step, fft_length,
# stft_bound, inverse_stft_bound).
# TODO(rjryan): Investigate why STFT gradient error is so high.
test_configs = [
(64, 16, 8, 16),
(64, 16, 16, 16),
(64, 16, 7, 16),
(64, 7, 4, 9),
(29, 5, 1, 10),
]
for (signal_length, frame_length, frame_step, fft_length) in test_configs:
signal_shape = [signal_length]
signal = random_ops.random_uniform(signal_shape)
stft_shape = [max(0, 1 + (signal_length - frame_length) // frame_step),
fft_length // 2 + 1]
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,
pad_end=False)
inverse_stft_shape = [(stft_shape[0] - 1) * frame_step + frame_length]
inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,
fft_length)
stft_error = test.compute_gradient_error(signal, [signal_length],
stft, stft_shape)
inverse_stft_error = test.compute_gradient_error(
stft, stft_shape, inverse_stft, inverse_stft_shape)
self.assertLess(stft_error, 2e-3)
self.assertLess(inverse_stft_error, 5e-4)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/signal/spectral_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reconstruction_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import reconstruction_ops
from tensorflow.python.platform import test
class ReconstructionOpsTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ReconstructionOpsTest, self).__init__(*args, **kwargs)
self.batch_size = 3
self.frames = 3
self.samples = 5
self.bases = np.array(range(2, 5))
exponents = np.array(range(self.frames * self.samples))
powers = np.power(self.bases[:, np.newaxis], exponents[np.newaxis, :])
self.powers = np.reshape(powers, [self.batch_size, self.frames,
self.samples])
self.frame_hop = 2
# Hand computed example using powers of unique numbers: this is easily
# verified.
self.expected_string = ["1", "10", "100100", "1001000", "10010010000",
"100100000000", "1001000000000", "10000000000000",
"100000000000000"]
def test_all_ones(self):
signal = array_ops.ones([3, 5])
reconstruction = reconstruction_ops.overlap_and_add(signal, 2)
self.assertEqual(reconstruction.shape.as_list(), [9])
with self.session(use_gpu=True):
output = self.evaluate(reconstruction)
expected_output = np.array([1, 1, 2, 2, 3, 2, 2, 1, 1])
self.assertAllClose(output, expected_output)
@test_util.run_deprecated_v1
def test_unknown_shapes(self):
# This test uses placeholders and does not work in eager mode.
if context.executing_eagerly():
return
signal = array_ops.placeholder(dtype=dtypes.int32, shape=[None, None, None])
frame_step = array_ops.placeholder(dtype=dtypes.int32, shape=[])
reconstruction = reconstruction_ops.overlap_and_add(signal, frame_step)
self.assertEqual(reconstruction.shape.as_list(), [None, None])
with self.session(use_gpu=True) as sess:
output = sess.run(reconstruction,
feed_dict={signal: np.ones([4, 3, 5]), frame_step: 2})
expected_output = np.array([[1, 1, 2, 2, 3, 2, 2, 1, 1]] * 4)
self.assertAllClose(output, expected_output)
@test_util.run_deprecated_v1
def test_unknown_rank(self):
# This test uses placeholders and does not work in eager mode.
if context.executing_eagerly():
return
signal = array_ops.placeholder(dtype=dtypes.int32, shape=None)
frame_step = array_ops.placeholder(dtype=dtypes.int32, shape=[])
reconstruction = reconstruction_ops.overlap_and_add(signal, frame_step)
self.assertEqual(reconstruction.shape, None)
with self.session(use_gpu=True) as sess:
output = sess.run(reconstruction,
feed_dict={signal: np.ones([4, 3, 5]), frame_step: 2})
expected_output = np.array([[1, 1, 2, 2, 3, 2, 2, 1, 1]] * 4)
self.assertAllClose(output, expected_output)
@test_util.run_deprecated_v1
def test_fast_path(self):
# This test uses tensor names and does not work in eager mode.
if context.executing_eagerly():
return
signal = array_ops.ones([3, 5])
frame_step = 5
reconstruction = reconstruction_ops.overlap_and_add(signal, frame_step)
self.assertEqual(reconstruction.name, "overlap_and_add/fast_path:0")
with self.session(use_gpu=True) as sess:
output = self.evaluate(reconstruction)
expected_output = np.ones([15])
self.assertAllClose(output, expected_output)
@test_util.run_deprecated_v1
def test_simple(self):
def make_input(frame_length, num_frames=3):
"""Generate a tensor of num_frames frames of frame_length."""
return np.reshape(np.arange(1, num_frames * frame_length + 1),
(-1, frame_length))
# List of (signal, expected_result, frame_hop).
configurations = [
# All hop lengths on a frame length of 2.
(make_input(2), [1, 5, 9, 6], 1),
(make_input(2), [1, 2, 3, 4, 5, 6], 2),
# All hop lengths on a frame length of 3.
(make_input(3), [1, 6, 15, 14, 9], 1),
(make_input(3), [1, 2, 7, 5, 13, 8, 9], 2),
(make_input(3), [1, 2, 3, 4, 5, 6, 7, 8, 9], 3),
# All hop lengths on a frame length of 4.
(make_input(4), [1, 7, 18, 21, 19, 12], 1),
(make_input(4), [1, 2, 8, 10, 16, 18, 11, 12], 2),
(make_input(4), [1, 2, 3, 9, 6, 7, 17, 10, 11, 12], 3),
(make_input(4), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 4),
]
with self.session(use_gpu=True):
for signal, expected, frame_hop in configurations:
reconstruction = reconstruction_ops.overlap_and_add(
np.array(signal), frame_hop).eval()
expected_output = np.array(expected)
self.assertAllClose(reconstruction, expected_output)
def test_powers(self):
signal = constant_op.constant(np.squeeze(self.powers[0, :, :]),
dtype=dtypes.int64)
reconstruction = reconstruction_ops.overlap_and_add(signal, self.frame_hop)
with self.session(use_gpu=True):
output = self.evaluate(reconstruction)
string_output = [np.base_repr(x, self.bases[0]) for x in output]
self.assertEqual(string_output, self.expected_string)
def test_batch(self):
signal = constant_op.constant(self.powers, dtype=dtypes.int64)
reconstruction = reconstruction_ops.overlap_and_add(signal, self.frame_hop)
with self.session(use_gpu=True):
output = self.evaluate(reconstruction)
accumulator = True
for i in range(self.batch_size):
string_output = [np.base_repr(x, self.bases[i]) for x in output[i, :]]
accumulator = accumulator and (string_output == self.expected_string)
self.assertTrue(accumulator)
def test_one_element_batch(self):
input_matrix = np.squeeze(self.powers[0, :, :])
input_matrix = input_matrix[np.newaxis, :, :].astype(float)
signal = constant_op.constant(input_matrix, dtype=dtypes.float32)
reconstruction = reconstruction_ops.overlap_and_add(signal, self.frame_hop)
with self.session(use_gpu=True):
output = self.evaluate(reconstruction)
string_output = [np.base_repr(int(x), self.bases[0]) for x in
np.squeeze(output)]
self.assertEqual(output.shape, (1, 9))
self.assertEqual(string_output, self.expected_string)
@test_util.run_deprecated_v1
def test_gradient(self):
configurations = [
((1, 128), 1),
((5, 35), 17),
((10, 128), 128),
((2, 10, 128), 127),
((2, 2, 10, 128), 126),
((2, 2, 2, 10, 128), 125),
]
with self.session(use_gpu=True) as sess:
for shape, frame_hop in configurations:
signal = array_ops.zeros(shape)
reconstruction = reconstruction_ops.overlap_and_add(signal, frame_hop)
loss = math_ops.reduce_sum(reconstruction)
# Increasing any sample in the input frames by one will increase the sum
# of all the samples in the reconstruction by 1, so the gradient should
# be all ones, no matter the shape or hop.
gradient = sess.run(gradients_impl.gradients([loss], [signal])[0])
self.assertTrue((gradient == 1.0).all())
@test_util.run_deprecated_v1
def test_gradient_batch(self):
with self.session(use_gpu=True) as sess:
signal = array_ops.zeros((2, 10, 10))
frame_hop = 10
reconstruction = reconstruction_ops.overlap_and_add(signal, frame_hop)
# Multiply the first batch-item's reconstruction by zeros. This will block
# gradient from flowing into the first batch item from the loss. Multiply
# the second batch item by the integers from 0 to 99. Since there is zero
# overlap, the gradient for this batch item will be 0-99 shaped as (10,
# 10).
reconstruction *= array_ops.stack(
[array_ops.zeros((100,)),
math_ops.cast(math_ops.range(100), dtypes.float32)])
loss = math_ops.reduce_sum(reconstruction)
# Verify that only the second batch item receives gradient.
gradient = sess.run(gradients_impl.gradients([loss], [signal])[0])
expected_gradient = np.stack([
np.zeros((10, 10)),
np.reshape(np.arange(100).astype(np.float32), (10, 10))])
self.assertAllEqual(expected_gradient, gradient)
@test_util.run_deprecated_v1
def test_gradient_numerical(self):
with self.session(use_gpu=True):
shape = (2, 10, 10)
framed_signal = array_ops.zeros(shape)
frame_hop = 10
reconstruction = reconstruction_ops.overlap_and_add(
framed_signal, frame_hop)
error = test.compute_gradient_error(
framed_signal, shape, reconstruction, [2, 100])
self.assertLess(error, 2e-5)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/signal/reconstruction_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import beta as beta_lib
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
special = try_import("scipy.special")
stats = try_import("scipy.stats")
@test_util.run_all_in_graph_and_eager_modes
class BetaTest(test.TestCase):
def testSimpleShapes(self):
a = np.random.rand(3)
b = np.random.rand(3)
dist = beta_lib.Beta(a, b)
self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual([3], self.evaluate(dist.batch_shape_tensor()))
self.assertEqual(tensor_shape.TensorShape([]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3]), dist.batch_shape)
def testComplexShapes(self):
a = np.random.rand(3, 2, 2)
b = np.random.rand(3, 2, 2)
dist = beta_lib.Beta(a, b)
self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual([3, 2, 2], self.evaluate(dist.batch_shape_tensor()))
self.assertEqual(tensor_shape.TensorShape([]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2, 2]), dist.batch_shape)
def testComplexShapesBroadcast(self):
a = np.random.rand(3, 2, 2)
b = np.random.rand(2, 2)
dist = beta_lib.Beta(a, b)
self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual([3, 2, 2], self.evaluate(dist.batch_shape_tensor()))
self.assertEqual(tensor_shape.TensorShape([]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2, 2]), dist.batch_shape)
def testAlphaProperty(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
dist = beta_lib.Beta(a, b)
self.assertEqual([1, 3], dist.concentration1.get_shape())
self.assertAllClose(a, self.evaluate(dist.concentration1))
def testBetaProperty(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
dist = beta_lib.Beta(a, b)
self.assertEqual([1, 3], dist.concentration0.get_shape())
self.assertAllClose(b, self.evaluate(dist.concentration0))
def testPdfXProper(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
dist = beta_lib.Beta(a, b, validate_args=True)
self.evaluate(dist.prob([.1, .3, .6]))
self.evaluate(dist.prob([.2, .3, .5]))
# Either condition can trigger.
with self.assertRaisesOpError("sample must be positive"):
self.evaluate(dist.prob([-1., 0.1, 0.5]))
with self.assertRaisesOpError("sample must be positive"):
self.evaluate(dist.prob([0., 0.1, 0.5]))
with self.assertRaisesOpError("sample must be less than `1`"):
self.evaluate(dist.prob([.1, .2, 1.2]))
with self.assertRaisesOpError("sample must be less than `1`"):
self.evaluate(dist.prob([.1, .2, 1.0]))
def testPdfTwoBatches(self):
a = [1., 2]
b = [1., 2]
x = [.5, .5]
dist = beta_lib.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([1., 3. / 2], self.evaluate(pdf))
self.assertEqual((2,), pdf.get_shape())
def testPdfTwoBatchesNontrivialX(self):
a = [1., 2]
b = [1., 2]
x = [.3, .7]
dist = beta_lib.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([1, 63. / 50], self.evaluate(pdf))
self.assertEqual((2,), pdf.get_shape())
def testPdfUniformZeroBatch(self):
# This is equivalent to a uniform distribution
a = 1.
b = 1.
x = np.array([.1, .2, .3, .5, .8], dtype=np.float32)
dist = beta_lib.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([1.] * 5, self.evaluate(pdf))
self.assertEqual((5,), pdf.get_shape())
def testPdfAlphaStretchedInBroadcastWhenSameRank(self):
a = [[1., 2]]
b = [[1., 2]]
x = [[.5, .5], [.3, .7]]
dist = beta_lib.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([[1., 3. / 2], [1., 63. / 50]], self.evaluate(pdf))
self.assertEqual((2, 2), pdf.get_shape())
def testPdfAlphaStretchedInBroadcastWhenLowerRank(self):
a = [1., 2]
b = [1., 2]
x = [[.5, .5], [.2, .8]]
pdf = beta_lib.Beta(a, b).prob(x)
self.assertAllClose([[1., 3. / 2], [1., 24. / 25]], self.evaluate(pdf))
self.assertEqual((2, 2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenSameRank(self):
a = [[1., 2], [2., 3]]
b = [[1., 2], [2., 3]]
x = [[.5, .5]]
pdf = beta_lib.Beta(a, b).prob(x)
self.assertAllClose([[1., 3. / 2], [3. / 2, 15. / 8]], self.evaluate(pdf))
self.assertEqual((2, 2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenLowerRank(self):
a = [[1., 2], [2., 3]]
b = [[1., 2], [2., 3]]
x = [.5, .5]
pdf = beta_lib.Beta(a, b).prob(x)
self.assertAllClose([[1., 3. / 2], [3. / 2, 15. / 8]], self.evaluate(pdf))
self.assertEqual((2, 2), pdf.get_shape())
def testLogPdfOnBoundaryIsFiniteWhenAlphaIsOne(self):
b = [[0.01, 0.1, 1., 2], [5., 10., 2., 3]]
pdf = self.evaluate(beta_lib.Beta(1., b).prob(0.))
self.assertAllEqual(np.ones_like(pdf, dtype=np.bool), np.isfinite(pdf))
def testBetaMean(self):
a = [1., 2, 3]
b = [2., 4, 1.2]
dist = beta_lib.Beta(a, b)
self.assertEqual(dist.mean().get_shape(), (3,))
if not stats:
return
expected_mean = stats.beta.mean(a, b)
self.assertAllClose(expected_mean, self.evaluate(dist.mean()))
def testBetaVariance(self):
a = [1., 2, 3]
b = [2., 4, 1.2]
dist = beta_lib.Beta(a, b)
self.assertEqual(dist.variance().get_shape(), (3,))
if not stats:
return
expected_variance = stats.beta.var(a, b)
self.assertAllClose(expected_variance, self.evaluate(dist.variance()))
def testBetaMode(self):
a = np.array([1.1, 2, 3])
b = np.array([2., 4, 1.2])
expected_mode = (a - 1) / (a + b - 2)
dist = beta_lib.Beta(a, b)
self.assertEqual(dist.mode().get_shape(), (3,))
self.assertAllClose(expected_mode, self.evaluate(dist.mode()))
def testBetaModeInvalid(self):
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = beta_lib.Beta(a, b, allow_nan_stats=False)
with self.assertRaisesOpError("Condition x < y.*"):
self.evaluate(dist.mode())
a = np.array([2., 2, 3])
b = np.array([1., 4, 1.2])
dist = beta_lib.Beta(a, b, allow_nan_stats=False)
with self.assertRaisesOpError("Condition x < y.*"):
self.evaluate(dist.mode())
def testBetaModeEnableAllowNanStats(self):
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = beta_lib.Beta(a, b, allow_nan_stats=True)
expected_mode = (a - 1) / (a + b - 2)
expected_mode[0] = np.nan
self.assertEqual((3,), dist.mode().get_shape())
self.assertAllClose(expected_mode, self.evaluate(dist.mode()))
a = np.array([2., 2, 3])
b = np.array([1., 4, 1.2])
dist = beta_lib.Beta(a, b, allow_nan_stats=True)
expected_mode = (a - 1) / (a + b - 2)
expected_mode[0] = np.nan
self.assertEqual((3,), dist.mode().get_shape())
self.assertAllClose(expected_mode, self.evaluate(dist.mode()))
def testBetaEntropy(self):
a = [1., 2, 3]
b = [2., 4, 1.2]
dist = beta_lib.Beta(a, b)
self.assertEqual(dist.entropy().get_shape(), (3,))
if not stats:
return
expected_entropy = stats.beta.entropy(a, b)
self.assertAllClose(expected_entropy, self.evaluate(dist.entropy()))
def testBetaSample(self):
a = 1.
b = 2.
beta = beta_lib.Beta(a, b)
n = constant_op.constant(100000)
samples = beta.sample(n)
sample_values = self.evaluate(samples)
self.assertEqual(sample_values.shape, (100000,))
self.assertFalse(np.any(sample_values < 0.0))
if not stats:
return
self.assertLess(
stats.kstest(
# Beta is a univariate distribution.
sample_values,
stats.beta(a=1., b=2.).cdf)[0],
0.01)
# The standard error of the sample mean is 1 / (sqrt(18 * n))
self.assertAllClose(
sample_values.mean(axis=0), stats.beta.mean(a, b), atol=1e-2)
self.assertAllClose(
np.cov(sample_values, rowvar=0), stats.beta.var(a, b), atol=1e-1)
def testBetaFullyReparameterized(self):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
with backprop.GradientTape() as tape:
tape.watch(a)
tape.watch(b)
beta = beta_lib.Beta(a, b)
samples = beta.sample(100)
grad_a, grad_b = tape.gradient(samples, [a, b])
self.assertIsNotNone(grad_a)
self.assertIsNotNone(grad_b)
# Test that sampling with the same seed twice gives the same results.
def testBetaSampleMultipleTimes(self):
a_val = 1.
b_val = 2.
n_val = 100
random_seed.set_random_seed(654321)
beta1 = beta_lib.Beta(
concentration1=a_val, concentration0=b_val, name="beta1")
samples1 = self.evaluate(beta1.sample(n_val, seed=123456))
random_seed.set_random_seed(654321)
beta2 = beta_lib.Beta(
concentration1=a_val, concentration0=b_val, name="beta2")
samples2 = self.evaluate(beta2.sample(n_val, seed=123456))
self.assertAllClose(samples1, samples2)
def testBetaSampleMultidimensional(self):
a = np.random.rand(3, 2, 2).astype(np.float32)
b = np.random.rand(3, 2, 2).astype(np.float32)
beta = beta_lib.Beta(a, b)
n = constant_op.constant(100000)
samples = beta.sample(n)
sample_values = self.evaluate(samples)
self.assertEqual(sample_values.shape, (100000, 3, 2, 2))
self.assertFalse(np.any(sample_values < 0.0))
if not stats:
return
self.assertAllClose(
sample_values[:, 1, :].mean(axis=0),
stats.beta.mean(a, b)[1, :],
atol=1e-1)
def testBetaCdf(self):
shape = (30, 40, 50)
for dt in (np.float32, np.float64):
a = 10. * np.random.random(shape).astype(dt)
b = 10. * np.random.random(shape).astype(dt)
x = np.random.random(shape).astype(dt)
actual = self.evaluate(beta_lib.Beta(a, b).cdf(x))
self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
if not stats:
return
self.assertAllClose(stats.beta.cdf(x, a, b), actual, rtol=1e-4, atol=0)
def testBetaLogCdf(self):
shape = (30, 40, 50)
for dt in (np.float32, np.float64):
a = 10. * np.random.random(shape).astype(dt)
b = 10. * np.random.random(shape).astype(dt)
x = np.random.random(shape).astype(dt)
actual = self.evaluate(math_ops.exp(beta_lib.Beta(a, b).log_cdf(x)))
self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
if not stats:
return
self.assertAllClose(stats.beta.cdf(x, a, b), actual, rtol=1e-4, atol=0)
def testBetaWithSoftplusConcentration(self):
a, b = -4.2, -9.1
dist = beta_lib.BetaWithSoftplusConcentration(a, b)
self.assertAllClose(
self.evaluate(nn_ops.softplus(a)), self.evaluate(dist.concentration1))
self.assertAllClose(
self.evaluate(nn_ops.softplus(b)), self.evaluate(dist.concentration0))
def testBetaBetaKL(self):
for shape in [(10,), (4, 5)]:
a1 = 6.0 * np.random.random(size=shape) + 1e-4
b1 = 6.0 * np.random.random(size=shape) + 1e-4
a2 = 6.0 * np.random.random(size=shape) + 1e-4
b2 = 6.0 * np.random.random(size=shape) + 1e-4
# Take inverse softplus of values to test BetaWithSoftplusConcentration
a1_sp = np.log(np.exp(a1) - 1.0)
b1_sp = np.log(np.exp(b1) - 1.0)
a2_sp = np.log(np.exp(a2) - 1.0)
b2_sp = np.log(np.exp(b2) - 1.0)
d1 = beta_lib.Beta(concentration1=a1, concentration0=b1)
d2 = beta_lib.Beta(concentration1=a2, concentration0=b2)
d1_sp = beta_lib.BetaWithSoftplusConcentration(concentration1=a1_sp,
concentration0=b1_sp)
d2_sp = beta_lib.BetaWithSoftplusConcentration(concentration1=a2_sp,
concentration0=b2_sp)
if not special:
return
kl_expected = (special.betaln(a2, b2) - special.betaln(a1, b1) +
(a1 - a2) * special.digamma(a1) +
(b1 - b2) * special.digamma(b1) +
(a2 - a1 + b2 - b1) * special.digamma(a1 + b1))
for dist1 in [d1, d1_sp]:
for dist2 in [d2, d2_sp]:
kl = kullback_leibler.kl_divergence(dist1, dist2)
kl_val = self.evaluate(kl)
self.assertEqual(kl.get_shape(), shape)
self.assertAllClose(kl_val, kl_expected)
# Make sure KL(d1||d1) is 0
kl_same = self.evaluate(kullback_leibler.kl_divergence(d1, d1))
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/beta_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for distributions KL mechanism."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal
from tensorflow.python.platform import test
# pylint: disable=protected-access
_DIVERGENCES = kullback_leibler._DIVERGENCES
_registered_kl = kullback_leibler._registered_kl
# pylint: enable=protected-access
class KLTest(test.TestCase):
def testRegistration(self):
class MyDist(normal.Normal):
pass
# Register KL to a lambda that spits out the name parameter
@kullback_leibler.RegisterKL(MyDist, MyDist)
def _kl(a, b, name=None): # pylint: disable=unused-argument,unused-variable
return name
a = MyDist(loc=0.0, scale=1.0)
self.assertEqual("OK", kullback_leibler.kl_divergence(a, a, name="OK"))
@test_util.run_deprecated_v1
def testDomainErrorExceptions(self):
class MyDistException(normal.Normal):
pass
# Register KL to a lambda that spits out the name parameter
@kullback_leibler.RegisterKL(MyDistException, MyDistException)
# pylint: disable=unused-argument,unused-variable
def _kl(a, b, name=None):
return array_ops.identity([float("nan")])
# pylint: disable=unused-argument,unused-variable
with self.cached_session():
a = MyDistException(loc=0.0, scale=1.0, allow_nan_stats=False)
kl = kullback_leibler.kl_divergence(a, a, allow_nan_stats=False)
with self.assertRaisesOpError(
"KL calculation between .* and .* returned NaN values"):
self.evaluate(kl)
with self.assertRaisesOpError(
"KL calculation between .* and .* returned NaN values"):
a.kl_divergence(a).eval()
a = MyDistException(loc=0.0, scale=1.0, allow_nan_stats=True)
kl_ok = kullback_leibler.kl_divergence(a, a)
self.assertAllEqual([float("nan")], self.evaluate(kl_ok))
self_kl_ok = a.kl_divergence(a)
self.assertAllEqual([float("nan")], self.evaluate(self_kl_ok))
cross_ok = a.cross_entropy(a)
self.assertAllEqual([float("nan")], self.evaluate(cross_ok))
def testRegistrationFailures(self):
class MyDist(normal.Normal):
pass
with self.assertRaisesRegexp(TypeError, "must be callable"):
kullback_leibler.RegisterKL(MyDist, MyDist)("blah")
# First registration is OK
kullback_leibler.RegisterKL(MyDist, MyDist)(lambda a, b: None)
# Second registration fails
with self.assertRaisesRegexp(ValueError, "has already been registered"):
kullback_leibler.RegisterKL(MyDist, MyDist)(lambda a, b: None)
def testExactRegistrationsAllMatch(self):
for (k, v) in _DIVERGENCES.items():
self.assertEqual(v, _registered_kl(*k))
def _testIndirectRegistration(self, fn):
class Sub1(normal.Normal):
def entropy(self):
return ""
class Sub2(normal.Normal):
def entropy(self):
return ""
class Sub11(Sub1):
def entropy(self):
return ""
# pylint: disable=unused-argument,unused-variable
@kullback_leibler.RegisterKL(Sub1, Sub1)
def _kl11(a, b, name=None):
return "sub1-1"
@kullback_leibler.RegisterKL(Sub1, Sub2)
def _kl12(a, b, name=None):
return "sub1-2"
@kullback_leibler.RegisterKL(Sub2, Sub1)
def _kl21(a, b, name=None):
return "sub2-1"
# pylint: enable=unused-argument,unused_variable
sub1 = Sub1(loc=0.0, scale=1.0)
sub2 = Sub2(loc=0.0, scale=1.0)
sub11 = Sub11(loc=0.0, scale=1.0)
self.assertEqual("sub1-1", fn(sub1, sub1))
self.assertEqual("sub1-2", fn(sub1, sub2))
self.assertEqual("sub2-1", fn(sub2, sub1))
self.assertEqual("sub1-1", fn(sub11, sub11))
self.assertEqual("sub1-1", fn(sub11, sub1))
self.assertEqual("sub1-2", fn(sub11, sub2))
self.assertEqual("sub1-1", fn(sub11, sub1))
self.assertEqual("sub1-2", fn(sub11, sub2))
self.assertEqual("sub2-1", fn(sub2, sub11))
self.assertEqual("sub1-1", fn(sub1, sub11))
def testIndirectRegistrationKLFun(self):
self._testIndirectRegistration(kullback_leibler.kl_divergence)
def testIndirectRegistrationKLSelf(self):
self._testIndirectRegistration(
lambda p, q: p.kl_divergence(q))
def testIndirectRegistrationCrossEntropy(self):
self._testIndirectRegistration(
lambda p, q: p.cross_entropy(q))
def testFunctionCrossEntropy(self):
self._testIndirectRegistration(kullback_leibler.cross_entropy)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/kullback_leibler_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import multinomial
from tensorflow.python.platform import test
class MultinomialTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
@test_util.run_v1_only("b/120545219")
def testSimpleShapes(self):
with self.cached_session():
p = [.1, .3, .6]
dist = multinomial.Multinomial(total_count=1., probs=p)
self.assertEqual(3, dist.event_shape_tensor().eval())
self.assertAllEqual([], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape)
@test_util.run_v1_only("b/120545219")
def testComplexShapes(self):
with self.cached_session():
p = 0.5 * np.ones([3, 2, 2], dtype=np.float32)
n = [[3., 2], [4, 5], [6, 7]]
dist = multinomial.Multinomial(total_count=n, probs=p)
self.assertEqual(2, dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape)
@test_util.run_v1_only("b/120545219")
def testN(self):
p = [[0.1, 0.2, 0.7], [0.2, 0.3, 0.5]]
n = [[3.], [4]]
with self.cached_session():
dist = multinomial.Multinomial(total_count=n, probs=p)
self.assertEqual((2, 1), dist.total_count.get_shape())
self.assertAllClose(n, dist.total_count.eval())
@test_util.run_v1_only("b/120545219")
def testP(self):
p = [[0.1, 0.2, 0.7]]
with self.cached_session():
dist = multinomial.Multinomial(total_count=3., probs=p)
self.assertEqual((1, 3), dist.probs.get_shape())
self.assertEqual((1, 3), dist.logits.get_shape())
self.assertAllClose(p, dist.probs.eval())
@test_util.run_v1_only("b/120545219")
def testLogits(self):
p = np.array([[0.1, 0.2, 0.7]], dtype=np.float32)
logits = np.log(p) - 50.
with self.cached_session():
multinom = multinomial.Multinomial(total_count=3., logits=logits)
self.assertEqual((1, 3), multinom.probs.get_shape())
self.assertEqual((1, 3), multinom.logits.get_shape())
self.assertAllClose(p, multinom.probs.eval())
self.assertAllClose(logits, multinom.logits.eval())
@test_util.run_v1_only("b/120545219")
def testPmfUnderflow(self):
logits = np.array([[-200, 0]], dtype=np.float32)
with self.cached_session():
dist = multinomial.Multinomial(total_count=1., logits=logits)
lp = dist.log_prob([1., 0.]).eval()[0]
self.assertAllClose(-200, lp, atol=0, rtol=1e-6)
@test_util.run_v1_only("b/120545219")
def testPmfandCountsAgree(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.cached_session():
dist = multinomial.Multinomial(total_count=n, probs=p, validate_args=True)
dist.prob([2., 3, 0]).eval()
dist.prob([3., 0, 2]).eval()
with self.assertRaisesOpError("must be non-negative"):
dist.prob([-1., 4, 2]).eval()
with self.assertRaisesOpError("counts must sum to `self.total_count`"):
dist.prob([3., 3, 0]).eval()
@test_util.run_v1_only("b/120545219")
def testPmfNonIntegerCounts(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.cached_session():
# No errors with integer n.
multinom = multinomial.Multinomial(
total_count=n, probs=p, validate_args=True)
multinom.prob([2., 1, 2]).eval()
multinom.prob([3., 0, 2]).eval()
# Counts don't sum to n.
with self.assertRaisesOpError("counts must sum to `self.total_count`"):
multinom.prob([2., 3, 2]).eval()
# Counts are non-integers.
x = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(
"cannot contain fractional components."):
multinom.prob(x).eval(feed_dict={x: [1.0, 2.5, 1.5]})
multinom = multinomial.Multinomial(
total_count=n, probs=p, validate_args=False)
multinom.prob([1., 2., 2.]).eval()
# Non-integer arguments work.
multinom.prob([1.0, 2.5, 1.5]).eval()
def testPmfBothZeroBatches(self):
with self.cached_session():
# Both zero-batches. No broadcast
p = [0.5, 0.5]
counts = [1., 0]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(0.5, self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesNontrivialN(self):
with self.cached_session():
# Both zero-batches. No broadcast
p = [0.1, 0.9]
counts = [3., 2]
dist = multinomial.Multinomial(total_count=5., probs=p)
pmf = dist.prob(counts)
# 5 choose 3 = 5 choose 2 = 10. 10 * (.9)^2 * (.1)^3 = 81/10000.
self.assertAllClose(81. / 10000, self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenSameRank(self):
with self.cached_session():
p = [[0.1, 0.9]]
counts = [[1., 0], [0, 1]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose([0.1, 0.9], self.evaluate(pmf))
self.assertEqual((2), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenLowerRank(self):
with self.cached_session():
p = [0.1, 0.9]
counts = [[1., 0], [0, 1]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose([0.1, 0.9], self.evaluate(pmf))
self.assertEqual((2), pmf.get_shape())
@test_util.run_v1_only("b/120545219")
def testPmfCountsStretchedInBroadcastWhenSameRank(self):
with self.cached_session():
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [[1., 0]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(pmf.eval(), [0.1, 0.7])
self.assertEqual((2), pmf.get_shape())
@test_util.run_v1_only("b/120545219")
def testPmfCountsStretchedInBroadcastWhenLowerRank(self):
with self.cached_session():
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [1., 0]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(pmf.eval(), [0.1, 0.7])
self.assertEqual(pmf.get_shape(), (2))
def testPmfShapeCountsStretchedN(self):
with self.cached_session():
# [2, 2, 2]
p = [[[0.1, 0.9], [0.1, 0.9]], [[0.7, 0.3], [0.7, 0.3]]]
# [2, 2]
n = [[3., 3], [3, 3]]
# [2]
counts = [2., 1]
pmf = multinomial.Multinomial(total_count=n, probs=p).prob(counts)
self.evaluate(pmf)
self.assertEqual(pmf.get_shape(), (2, 2))
def testPmfShapeCountsPStretchedN(self):
with self.cached_session():
p = [0.1, 0.9]
counts = [3., 2]
n = np.full([4, 3], 5., dtype=np.float32)
pmf = multinomial.Multinomial(total_count=n, probs=p).prob(counts)
self.evaluate(pmf)
self.assertEqual((4, 3), pmf.get_shape())
@test_util.run_v1_only("b/120545219")
def testMultinomialMean(self):
with self.cached_session():
n = 5.
p = [0.1, 0.2, 0.7]
dist = multinomial.Multinomial(total_count=n, probs=p)
expected_means = 5 * np.array(p, dtype=np.float32)
self.assertEqual((3,), dist.mean().get_shape())
self.assertAllClose(expected_means, dist.mean().eval())
@test_util.run_v1_only("b/120545219")
def testMultinomialCovariance(self):
with self.cached_session():
n = 5.
p = [0.1, 0.2, 0.7]
dist = multinomial.Multinomial(total_count=n, probs=p)
expected_covariances = [[9. / 20, -1 / 10, -7 / 20],
[-1 / 10, 4 / 5, -7 / 10],
[-7 / 20, -7 / 10, 21 / 20]]
self.assertEqual((3, 3), dist.covariance().get_shape())
self.assertAllClose(expected_covariances, dist.covariance().eval())
@test_util.run_v1_only("b/120545219")
def testMultinomialCovarianceBatch(self):
with self.cached_session():
# Shape [2]
n = [5.] * 2
# Shape [4, 1, 2]
p = [[[0.1, 0.9]], [[0.1, 0.9]]] * 2
dist = multinomial.Multinomial(total_count=n, probs=p)
# Shape [2, 2]
inner_var = [[9. / 20, -9 / 20], [-9 / 20, 9 / 20]]
# Shape [4, 2, 2, 2]
expected_covariances = [[inner_var, inner_var]] * 4
self.assertEqual((4, 2, 2, 2), dist.covariance().get_shape())
self.assertAllClose(expected_covariances, dist.covariance().eval())
def testCovarianceMultidimensional(self):
# Shape [3, 5, 4]
p = np.random.dirichlet([.25, .25, .25, .25], [3, 5]).astype(np.float32)
# Shape [6, 3, 3]
p2 = np.random.dirichlet([.3, .3, .4], [6, 3]).astype(np.float32)
ns = np.random.randint(low=1, high=11, size=[3, 5]).astype(np.float32)
ns2 = np.random.randint(low=1, high=11, size=[6, 1]).astype(np.float32)
with self.cached_session():
dist = multinomial.Multinomial(ns, p)
dist2 = multinomial.Multinomial(ns2, p2)
covariance = dist.covariance()
covariance2 = dist2.covariance()
self.assertEqual((3, 5, 4, 4), covariance.get_shape())
self.assertEqual((6, 3, 3, 3), covariance2.get_shape())
@test_util.run_v1_only("b/120545219")
def testCovarianceFromSampling(self):
# We will test mean, cov, var, stddev on a DirichletMultinomial constructed
# via broadcast between alpha, n.
theta = np.array([[1., 2, 3],
[2.5, 4, 0.01]], dtype=np.float32)
theta /= np.sum(theta, 1)[..., array_ops.newaxis]
n = np.array([[10., 9.], [8., 7.], [6., 5.]], dtype=np.float32)
with self.cached_session() as sess:
# batch_shape=[3, 2], event_shape=[3]
dist = multinomial.Multinomial(n, theta)
x = dist.sample(int(1000e3), seed=1)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean[array_ops.newaxis, ...]
sample_cov = math_ops.reduce_mean(math_ops.matmul(
x_centered[..., array_ops.newaxis],
x_centered[..., array_ops.newaxis, :]), 0)
sample_var = array_ops.matrix_diag_part(sample_cov)
sample_stddev = math_ops.sqrt(sample_var)
[
sample_mean_,
sample_cov_,
sample_var_,
sample_stddev_,
analytic_mean,
analytic_cov,
analytic_var,
analytic_stddev,
] = sess.run([
sample_mean,
sample_cov,
sample_var,
sample_stddev,
dist.mean(),
dist.covariance(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(sample_mean_, analytic_mean, atol=0.01, rtol=0.01)
self.assertAllClose(sample_cov_, analytic_cov, atol=0.01, rtol=0.01)
self.assertAllClose(sample_var_, analytic_var, atol=0.01, rtol=0.01)
self.assertAllClose(sample_stddev_, analytic_stddev, atol=0.01, rtol=0.01)
@test_util.run_v1_only("b/120545219")
def testSampleUnbiasedNonScalarBatch(self):
with self.cached_session() as sess:
dist = multinomial.Multinomial(
total_count=[7., 6., 5.],
logits=math_ops.log(2. * self._rng.rand(4, 3, 2).astype(np.float32)))
n = int(3e4)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
# Cyclically rotate event dims left.
x_centered = array_ops.transpose(x - sample_mean, [1, 2, 3, 0])
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_b=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.covariance(),
])
self.assertAllEqual([4, 3, 2], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.10)
self.assertAllEqual([4, 3, 2, 2], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.20)
@test_util.run_v1_only("b/120545219")
def testSampleUnbiasedScalarBatch(self):
with self.cached_session() as sess:
dist = multinomial.Multinomial(
total_count=5.,
logits=math_ops.log(2. * self._rng.rand(4).astype(np.float32)))
n = int(5e3)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean # Already transposed to [n, 2].
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_a=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.covariance(),
])
self.assertAllEqual([4], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.10)
self.assertAllEqual([4, 4], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.20)
def testNotReparameterized(self):
total_count = constant_op.constant(5.0)
p = constant_op.constant([0.2, 0.6])
with backprop.GradientTape() as tape:
tape.watch(total_count)
tape.watch(p)
dist = multinomial.Multinomial(
total_count=total_count,
probs=p)
samples = dist.sample(100)
grad_total_count, grad_p = tape.gradient(samples, [total_count, p])
self.assertIsNone(grad_total_count)
self.assertIsNone(grad_p)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/multinomial_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Special Math Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import importlib
import numpy as np
from tensorflow.python.eager import backprop as tfe_backprop
from tensorflow.python.eager import context as tfe_context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops.distributions import special_math
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
special = try_import("scipy.special")
stats = try_import("scipy.stats")
sm = special_math
def _check_strictly_increasing(array_1d):
diff = np.diff(array_1d)
np.testing.assert_array_less(0, diff)
def _make_grid(dtype, grid_spec):
"""Returns a uniform grid + noise, reshaped to shape argument."""
rng = np.random.RandomState(0)
num_points = np.prod(grid_spec.shape)
grid = np.linspace(grid_spec.min, grid_spec.max, num=num_points).astype(dtype)
grid_spacing = (grid_spec.max - grid_spec.min) / num_points
grid += 0.1 * grid_spacing * rng.randn(*grid.shape)
# More useful if it's sorted (e.g. for testing monotonicity, or debugging).
grid = np.sort(grid)
return np.reshape(grid, grid_spec.shape)
def _value_and_gradient(fn, *args):
"""Calls `fn` and computes the gradient of the result wrt `arg`."""
if tfe_context.executing_eagerly():
v, g = tfe_backprop.val_and_grad_function(fn)(args)
else:
v = fn(*args)
g = gradients_impl.gradients(v, args)
return v, g
GridSpec = collections.namedtuple("GridSpec", ["min", "max", "shape"])
ErrorSpec = collections.namedtuple("ErrorSpec", ["rtol", "atol"])
class NdtriTest(test.TestCase):
def assertAllFinite(self, x):
is_finite = np.isfinite(x)
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
@test_util.run_in_graph_and_eager_modes
def testNdtri(self):
"""Verifies that ndtri computation is correct."""
if not special:
return
p = np.linspace(0., 1.0, 50).astype(np.float64)
# Quantile performs piecewise rational approximation so adding some
# special input values to make sure we hit all the pieces.
p = np.hstack((p, np.exp(-32), 1. - np.exp(-32), np.exp(-2),
1. - np.exp(-2)))
expected_x = special.ndtri(p)
x = special_math.ndtri(p)
self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
@test_util.run_deprecated_v1
def testNdtriDynamicShape(self):
"""Verifies that ndtri computation is correct."""
with self.cached_session() as sess:
if not special:
return
p = array_ops.placeholder(np.float32)
p_ = np.linspace(0., 1.0, 50).astype(np.float32)
x = special_math.ndtri(p)
x_ = sess.run(x, feed_dict={p: p_})
expected_x_ = special.ndtri(p_)
self.assertAllClose(expected_x_, x_, atol=0.)
def _baseNdtriFiniteGradientTest(self, dtype):
"""Verifies that ndtri has finite gradients at interesting points."""
# Tests gradients at 0, 1, and piece-wise boundaries.
p = constant_op.constant(
np.array([
0.,
np.exp(-32.),
np.exp(-2.),
1. - np.exp(-2.),
1. - np.exp(-32.),
1.,
]).astype(dtype))
# Not having the lambda sanitzer means we'd get an `IndexError` whenever
# the user supplied function has default args.
_, grads = _value_and_gradient(
lambda x: special_math.ndtri(x), p) # pylint: disable=unnecessary-lambda
self.assertAllFinite(self.evaluate(grads[0]))
@test_util.run_in_graph_and_eager_modes
def testNdtriFiniteGradientFloat32(self):
self._baseNdtriFiniteGradientTest(np.float32)
@test_util.run_in_graph_and_eager_modes
def testNdtriFiniteGradientFloat64(self):
self._baseNdtriFiniteGradientTest(np.float64)
@test_util.run_all_in_graph_and_eager_modes
class NdtrTest(test.TestCase):
_use_log = False
# Grid min/max chosen to ensure 0 < cdf(x) < 1.
_grid32 = GridSpec(min=-12.9, max=5., shape=[100])
_grid64 = GridSpec(min=-37.5, max=8., shape=[100])
_error32 = ErrorSpec(rtol=1e-4, atol=0.)
_error64 = ErrorSpec(rtol=1e-6, atol=0.)
def _test_grid(self, dtype, grid_spec, error_spec):
if self._use_log:
self._test_grid_log(dtype, grid_spec, error_spec)
else:
self._test_grid_no_log(dtype, grid_spec, error_spec)
def _test_grid_log(self, dtype, grid_spec, error_spec):
if not special:
return
grid = _make_grid(dtype, grid_spec)
actual = self.evaluate(sm.log_ndtr(grid))
# Basic tests.
# isfinite checks for NaN and Inf.
self.assertTrue(np.isfinite(actual).all())
# On the grid, -inf < log_cdf(x) < 0. In this case, we should be able
# to use a huge grid because we have used tricks to escape numerical
# difficulties.
self.assertTrue((actual < 0).all())
_check_strictly_increasing(actual)
# Versus scipy.
expected = special.log_ndtr(grid)
# Scipy prematurely goes to zero at some places that we don't. So don't
# include these in the comparison.
self.assertAllClose(
expected.astype(np.float64)[expected < 0],
actual.astype(np.float64)[expected < 0],
rtol=error_spec.rtol,
atol=error_spec.atol)
def _test_grid_no_log(self, dtype, grid_spec, error_spec):
if not special:
return
grid = _make_grid(dtype, grid_spec)
actual = self.evaluate(sm.ndtr(grid))
# Basic tests.
# isfinite checks for NaN and Inf.
self.assertTrue(np.isfinite(actual).all())
# On the grid, 0 < cdf(x) < 1. The grid cannot contain everything due
# to numerical limitations of cdf.
self.assertTrue((actual > 0).all())
self.assertTrue((actual < 1).all())
_check_strictly_increasing(actual)
# Versus scipy.
expected = special.ndtr(grid)
# Scipy prematurely goes to zero at some places that we don't. So don't
# include these in the comparison.
self.assertAllClose(
expected.astype(np.float64)[expected < 0],
actual.astype(np.float64)[expected < 0],
rtol=error_spec.rtol,
atol=error_spec.atol)
@test_util.run_deprecated_v1
def test_float32(self):
self._test_grid(np.float32, self._grid32, self._error32)
@test_util.run_deprecated_v1
def test_float64(self):
self._test_grid(np.float64, self._grid64, self._error64)
class LogNdtrTestLower(NdtrTest):
_use_log = True
_grid32 = GridSpec(min=-100., max=sm.LOGNDTR_FLOAT32_LOWER, shape=[100])
_grid64 = GridSpec(min=-100., max=sm.LOGNDTR_FLOAT64_LOWER, shape=[100])
_error32 = ErrorSpec(rtol=1e-4, atol=0.)
_error64 = ErrorSpec(rtol=1e-4, atol=0.)
# The errors are quite large when the input is > 6 or so. Also,
# scipy.special.log_ndtr becomes zero very early, before 10,
# (due to ndtr becoming 1). We approximate Log[1 + epsilon] as epsilon, and
# avoid this issue.
class LogNdtrTestMid(NdtrTest):
_use_log = True
_grid32 = GridSpec(
min=sm.LOGNDTR_FLOAT32_LOWER, max=sm.LOGNDTR_FLOAT32_UPPER, shape=[100])
_grid64 = GridSpec(
min=sm.LOGNDTR_FLOAT64_LOWER, max=sm.LOGNDTR_FLOAT64_UPPER, shape=[100])
# Differences show up as soon as we're in the tail, so add some atol.
_error32 = ErrorSpec(rtol=0.1, atol=1e-7)
_error64 = ErrorSpec(rtol=0.1, atol=1e-7)
class LogNdtrTestUpper(NdtrTest):
_use_log = True
_grid32 = GridSpec(
min=sm.LOGNDTR_FLOAT32_UPPER,
max=12., # Beyond this, log_cdf(x) may be zero.
shape=[100])
_grid64 = GridSpec(
min=sm.LOGNDTR_FLOAT64_UPPER,
max=35., # Beyond this, log_cdf(x) may be zero.
shape=[100])
_error32 = ErrorSpec(rtol=1e-6, atol=1e-14)
_error64 = ErrorSpec(rtol=1e-6, atol=1e-14)
class NdtrGradientTest(test.TestCase):
_use_log = False
_grid = GridSpec(min=-100., max=100., shape=[1, 2, 3, 8])
_error32 = ErrorSpec(rtol=1e-4, atol=0)
_error64 = ErrorSpec(rtol=1e-7, atol=0)
def assert_all_true(self, v):
self.assertAllEqual(np.ones_like(v, dtype=np.bool), v)
def assert_all_false(self, v):
self.assertAllEqual(np.zeros_like(v, dtype=np.bool), v)
def _test_grad_finite(self, dtype):
x = constant_op.constant([-100., 0., 100.], dtype=dtype)
output = (sm.log_ndtr(x) if self._use_log else sm.ndtr(x))
fn = sm.log_ndtr if self._use_log else sm.ndtr
# Not having the lambda sanitzer means we'd get an `IndexError` whenever
# the user supplied function has default args.
output, grad_output = _value_and_gradient(
lambda x_: fn(x_), x) # pylint: disable=unnecessary-lambda
# isfinite checks for NaN and Inf.
output_, grad_output_ = self.evaluate([output, grad_output])
self.assert_all_true(np.isfinite(output_))
self.assert_all_true(np.isfinite(grad_output_[0]))
def _test_grad_accuracy(self, dtype, grid_spec, error_spec):
raw_grid = _make_grid(dtype, grid_spec)
grid = ops.convert_to_tensor(raw_grid)
with self.cached_session():
fn = sm.log_ndtr if self._use_log else sm.ndtr
# If there are N points in the grid,
# grad_eval.shape = (N, N), with grad_eval[i, j] the partial derivative of
# the ith output point w.r.t. the jth grid point. We only expect the
# diagonal to be nonzero.
# TODO(b/31131137): Replace tf.compat.v1.test.compute_gradient with our
# own custom gradient evaluation to ensure we correctly handle small
# function delta.
grad_eval, _ = gradient_checker.compute_gradient(grid, grid_spec.shape,
fn(grid),
grid_spec.shape)
grad_eval = np.diag(grad_eval)
# Check for NaN separately in order to get informative failures.
self.assert_all_false(np.isnan(grad_eval))
self.assert_all_true(grad_eval > 0.)
# isfinite checks for NaN and Inf.
self.assert_all_true(np.isfinite(grad_eval))
# Do the same checks but explicitly compute the gradient.
# (We did this because we're not sure if we trust
# tf.test.compute_gradient.)
grad_eval = gradients_impl.gradients(fn(grid), grid)[0].eval()
self.assert_all_false(np.isnan(grad_eval))
if self._use_log:
g = np.reshape(grad_eval, [-1])
half = np.ceil(len(g) / 2)
self.assert_all_true(g[:int(half)] > 0.)
self.assert_all_true(g[int(half):] >= 0.)
else:
# The ndtr gradient will only be non-zero in the range [-14, 14] for
# float32 and [-38, 38] for float64.
self.assert_all_true(grad_eval >= 0.)
# isfinite checks for NaN and Inf.
self.assert_all_true(np.isfinite(grad_eval))
# Versus scipy.
if not (special and stats):
return
expected = stats.norm.pdf(raw_grid)
if self._use_log:
expected /= special.ndtr(raw_grid)
expected[np.isnan(expected)] = 0.
# Scipy prematurely goes to zero at some places that we don't. So don't
# include these in the comparison.
self.assertAllClose(
expected.astype(np.float64)[expected < 0],
grad_eval.astype(np.float64)[expected < 0],
rtol=error_spec.rtol,
atol=error_spec.atol)
@test_util.run_deprecated_v1
def test_float32(self):
self._test_grad_accuracy(np.float32, self._grid, self._error32)
self._test_grad_finite(np.float32)
@test_util.run_deprecated_v1
def test_float64(self):
self._test_grad_accuracy(np.float64, self._grid, self._error64)
self._test_grad_finite(np.float64)
class LogNdtrGradientTest(NdtrGradientTest):
_use_log = True
class ErfInvTest(test.TestCase):
def testErfInvValues(self):
with self.cached_session():
if not special:
return
x = np.linspace(0., 1.0, 50).astype(np.float64)
expected_x = special.erfinv(x)
x = special_math.erfinv(x)
self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
def testErfInvIntegerInput(self):
with self.cached_session():
with self.assertRaises(TypeError):
x = np.array([1, 2, 3]).astype(np.int32)
special_math.erfinv(x)
with self.assertRaises(TypeError):
x = np.array([1, 2, 3]).astype(np.int64)
special_math.erfinv(x)
class LogCDFLaplaceTest(test.TestCase):
# Note that scipy.stats.laplace does not have a stable Log CDF, so we cannot
# rely on scipy to cross check the extreme values.
# Test will be done differently over different ranges. These are the values
# such that when exceeded by x, produce output that causes the naive (scipy)
# implementation to have numerical issues.
#
# If x = log(1 / (2 * eps)), then 0.5 * exp{-x} = eps.
# With inserting eps = np.finfo(dtype).eps, we see that log(1 / (2 * eps)) is
# the value of x such that any larger value will result in
# 1 - 0.5 * exp{-x} = 0, which will cause the log_cdf_laplace code to take a
# log # of zero. We therefore choose these as our cutoffs for testing.
CUTOFF_FLOAT64_UPPER = np.log(1. / (2. * np.finfo(np.float64).eps)) - 1.
CUTOFF_FLOAT32_UPPER = np.log(1. / (2. * np.finfo(np.float32).eps)) - 1.
def assertAllTrue(self, x):
self.assertAllEqual(np.ones_like(x, dtype=np.bool), x)
def _test_grid_log(self, dtype, scipy_dtype, grid_spec, error_spec):
with self.cached_session():
grid = _make_grid(dtype, grid_spec)
actual = sm.log_cdf_laplace(grid).eval()
# Basic tests.
# isfinite checks for NaN and Inf.
self.assertAllTrue(np.isfinite(actual))
self.assertAllTrue((actual < 0))
_check_strictly_increasing(actual)
# Versus scipy.
if not stats:
return
scipy_dist = stats.laplace(loc=0., scale=1.)
expected = scipy_dist.logcdf(grid.astype(scipy_dtype))
self.assertAllClose(
expected.astype(np.float64),
actual.astype(np.float64),
rtol=error_spec.rtol,
atol=error_spec.atol)
@test_util.run_deprecated_v1
def test_float32_lower_and_mid_segment_scipy_float32_ok(self):
# Choose values mild enough that we can use scipy in float32, which will
# allow for a high accuracy match to scipy (since we both use float32).
self._test_grid_log(
np.float32, # dtype
np.float32, # scipy_dtype
GridSpec(min=-10, max=self.CUTOFF_FLOAT32_UPPER - 5, shape=[100]),
ErrorSpec(rtol=5e-4, atol=0))
@test_util.run_deprecated_v1
def test_float32_all_segments_with_scipy_float64_ok(self):
# Choose values outside the range where scipy float32 works.
# Let scipy use float64. This means we
# won't be exactly the same since we are in float32.
self._test_grid_log(
np.float32, # dtype
np.float64, # scipy_dtype
GridSpec(min=-50, max=self.CUTOFF_FLOAT32_UPPER + 5, shape=[100]),
ErrorSpec(rtol=0.05, atol=0))
@test_util.run_deprecated_v1
def test_float32_extreme_values_result_and_gradient_finite_and_nonzero(self):
with self.cached_session() as sess:
# On the lower branch, log_cdf_laplace(x) = x, so we know this will be
# fine, but test to -200 anyways.
grid = _make_grid(
np.float32, GridSpec(min=-200, max=80, shape=[20, 100]))
grid = ops.convert_to_tensor(grid)
actual = sm.log_cdf_laplace(grid)
grad = gradients_impl.gradients(actual, grid)[0]
actual_, grad_ = self.evaluate([actual, grad])
# isfinite checks for NaN and Inf.
self.assertAllTrue(np.isfinite(actual_))
self.assertAllTrue(np.isfinite(grad_))
self.assertFalse(np.any(actual_ == 0))
self.assertFalse(np.any(grad_ == 0))
@test_util.run_deprecated_v1
def test_float64_extreme_values_result_and_gradient_finite_and_nonzero(self):
with self.cached_session() as sess:
# On the lower branch, log_cdf_laplace(x) = x, so we know this will be
# fine, but test to -200 anyways.
grid = _make_grid(
np.float64, GridSpec(min=-200, max=700, shape=[20, 100]))
grid = ops.convert_to_tensor(grid)
actual = sm.log_cdf_laplace(grid)
grad = gradients_impl.gradients(actual, grid)[0]
actual_, grad_ = self.evaluate([actual, grad])
# isfinite checks for NaN and Inf.
self.assertAllTrue(np.isfinite(actual_))
self.assertAllTrue(np.isfinite(grad_))
self.assertFalse(np.any(actual_ == 0))
self.assertFalse(np.any(grad_ == 0))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/special_math_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import math
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class NormalTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
def assertAllFinite(self, tensor):
is_finite = np.isfinite(self.evaluate(tensor))
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
def _testParamShapes(self, sample_shape, expected):
param_shapes = normal_lib.Normal.param_shapes(sample_shape)
mu_shape, sigma_shape = param_shapes["loc"], param_shapes["scale"]
self.assertAllEqual(expected, self.evaluate(mu_shape))
self.assertAllEqual(expected, self.evaluate(sigma_shape))
mu = array_ops.zeros(mu_shape)
sigma = array_ops.ones(sigma_shape)
self.assertAllEqual(
expected,
self.evaluate(array_ops.shape(normal_lib.Normal(mu, sigma).sample())))
def _testParamStaticShapes(self, sample_shape, expected):
param_shapes = normal_lib.Normal.param_static_shapes(sample_shape)
mu_shape, sigma_shape = param_shapes["loc"], param_shapes["scale"]
self.assertEqual(expected, mu_shape)
self.assertEqual(expected, sigma_shape)
@test_util.run_in_graph_and_eager_modes
def testSampleLikeArgsGetDistDType(self):
dist = normal_lib.Normal(0., 1.)
self.assertEqual(dtypes.float32, dist.dtype)
for method in ("log_prob", "prob", "log_cdf", "cdf",
"log_survival_function", "survival_function", "quantile"):
self.assertEqual(dtypes.float32, getattr(dist, method)(1).dtype)
@test_util.run_in_graph_and_eager_modes
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(constant_op.constant(sample_shape), sample_shape)
@test_util.run_in_graph_and_eager_modes
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(
tensor_shape.TensorShape(sample_shape), sample_shape)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNormalWithSoftplusScale(self):
mu = array_ops.zeros((10, 3))
rho = array_ops.ones((10, 3)) * -2.
normal = normal_lib.NormalWithSoftplusScale(loc=mu, scale=rho)
self.assertAllEqual(self.evaluate(mu), self.evaluate(normal.loc))
self.assertAllEqual(
self.evaluate(nn_ops.softplus(rho)), self.evaluate(normal.scale))
@test_util.run_in_graph_and_eager_modes
def testNormalLogPDF(self):
batch_size = 6
mu = constant_op.constant([3.0] * batch_size)
sigma = constant_op.constant([math.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
normal = normal_lib.Normal(loc=mu, scale=sigma)
log_pdf = normal.log_prob(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), log_pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(log_pdf).shape)
self.assertAllEqual(normal.batch_shape, log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(log_pdf).shape)
pdf = normal.prob(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(pdf).shape)
self.assertAllEqual(normal.batch_shape, pdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(pdf).shape)
if not stats:
return
expected_log_pdf = stats.norm(self.evaluate(mu),
self.evaluate(sigma)).logpdf(x)
self.assertAllClose(expected_log_pdf, self.evaluate(log_pdf))
self.assertAllClose(np.exp(expected_log_pdf), self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes
def testNormalLogPDFMultidimensional(self):
batch_size = 6
mu = constant_op.constant([[3.0, -3.0]] * batch_size)
sigma = constant_op.constant(
[[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
normal = normal_lib.Normal(loc=mu, scale=sigma)
log_pdf = normal.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), log_pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(log_pdf).shape)
self.assertAllEqual(normal.batch_shape, log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(log_pdf).shape)
pdf = normal.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), pdf_values.shape)
self.assertAllEqual(normal.batch_shape, pdf.get_shape())
self.assertAllEqual(normal.batch_shape, pdf_values.shape)
if not stats:
return
expected_log_pdf = stats.norm(self.evaluate(mu),
self.evaluate(sigma)).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
@test_util.run_in_graph_and_eager_modes
def testNormalCDF(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
cdf = normal.cdf(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), cdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(cdf).shape)
self.assertAllEqual(normal.batch_shape, cdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(cdf).shape)
if not stats:
return
expected_cdf = stats.norm(mu, sigma).cdf(x)
self.assertAllClose(expected_cdf, self.evaluate(cdf), atol=0)
@test_util.run_in_graph_and_eager_modes
def testNormalSurvivalFunction(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
sf = normal.survival_function(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), sf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(sf).shape)
self.assertAllEqual(normal.batch_shape, sf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(sf).shape)
if not stats:
return
expected_sf = stats.norm(mu, sigma).sf(x)
self.assertAllClose(expected_sf, self.evaluate(sf), atol=0)
@test_util.run_in_graph_and_eager_modes
def testNormalLogCDF(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-100.0, 10.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
cdf = normal.log_cdf(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), cdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(cdf).shape)
self.assertAllEqual(normal.batch_shape, cdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(cdf).shape)
if not stats:
return
expected_cdf = stats.norm(mu, sigma).logcdf(x)
self.assertAllClose(expected_cdf, self.evaluate(cdf), atol=0, rtol=1e-3)
def testFiniteGradientAtDifficultPoints(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
mu = variables.Variable(dtype(0.0))
sigma = variables.Variable(dtype(1.0))
dist = normal_lib.Normal(loc=mu, scale=sigma)
x = np.array([-100., -20., -5., 0., 5., 20., 100.]).astype(dtype)
for func in [
dist.cdf, dist.log_cdf, dist.survival_function,
dist.log_survival_function, dist.log_prob, dist.prob
]:
value = func(x)
grads = gradients_impl.gradients(value, [mu, sigma])
with self.session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(value)
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
@test_util.run_in_graph_and_eager_modes
def testNormalLogSurvivalFunction(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-10.0, 100.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
sf = normal.log_survival_function(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), sf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(sf).shape)
self.assertAllEqual(normal.batch_shape, sf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(sf).shape)
if not stats:
return
expected_sf = stats.norm(mu, sigma).logsf(x)
self.assertAllClose(expected_sf, self.evaluate(sf), atol=0, rtol=1e-5)
@test_util.run_in_graph_and_eager_modes
def testNormalEntropyWithScalarInputs(self):
# Scipy.stats.norm cannot deal with the shapes in the other test.
mu_v = 2.34
sigma_v = 4.56
normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)
entropy = normal.entropy()
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), entropy.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(entropy).shape)
self.assertAllEqual(normal.batch_shape, entropy.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(entropy).shape)
# scipy.stats.norm cannot deal with these shapes.
if not stats:
return
expected_entropy = stats.norm(mu_v, sigma_v).entropy()
self.assertAllClose(expected_entropy, self.evaluate(entropy))
@test_util.run_in_graph_and_eager_modes
def testNormalEntropy(self):
mu_v = np.array([1.0, 1.0, 1.0])
sigma_v = np.array([[1.0, 2.0, 3.0]]).T
normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)
# scipy.stats.norm cannot deal with these shapes.
sigma_broadcast = mu_v * sigma_v
expected_entropy = 0.5 * np.log(2 * np.pi * np.exp(1) * sigma_broadcast**2)
entropy = normal.entropy()
np.testing.assert_allclose(expected_entropy, self.evaluate(entropy))
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), entropy.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(entropy).shape)
self.assertAllEqual(normal.batch_shape, entropy.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(entropy).shape)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNormalMeanAndMode(self):
# Mu will be broadcast to [7, 7, 7].
mu = [7.]
sigma = [11., 12., 13.]
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertAllEqual((3,), normal.mean().get_shape())
self.assertAllEqual([7., 7, 7], self.evaluate(normal.mean()))
self.assertAllEqual((3,), normal.mode().get_shape())
self.assertAllEqual([7., 7, 7], self.evaluate(normal.mode()))
@test_util.run_in_graph_and_eager_modes
def testNormalQuantile(self):
batch_size = 52
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
p = np.linspace(0., 1.0, batch_size - 2).astype(np.float64)
# Quantile performs piecewise rational approximation so adding some
# special input values to make sure we hit all the pieces.
p = np.hstack((p, np.exp(-33), 1. - np.exp(-33)))
normal = normal_lib.Normal(loc=mu, scale=sigma)
x = normal.quantile(p)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), x.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(x).shape)
self.assertAllEqual(normal.batch_shape, x.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(x).shape)
if not stats:
return
expected_x = stats.norm(mu, sigma).ppf(p)
self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
def _baseQuantileFiniteGradientAtDifficultPoints(self, dtype):
g = ops.Graph()
with g.as_default():
mu = variables.Variable(dtype(0.0))
sigma = variables.Variable(dtype(1.0))
dist = normal_lib.Normal(loc=mu, scale=sigma)
p = variables.Variable(
np.array([0.,
np.exp(-32.), np.exp(-2.),
1. - np.exp(-2.), 1. - np.exp(-32.),
1.]).astype(dtype))
value = dist.quantile(p)
grads = gradients_impl.gradients(value, [mu, p])
with self.cached_session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
def testQuantileFiniteGradientAtDifficultPointsFloat32(self):
self._baseQuantileFiniteGradientAtDifficultPoints(np.float32)
def testQuantileFiniteGradientAtDifficultPointsFloat64(self):
self._baseQuantileFiniteGradientAtDifficultPoints(np.float64)
@test_util.run_in_graph_and_eager_modes
def testNormalVariance(self):
# sigma will be broadcast to [7, 7, 7]
mu = [1., 2., 3.]
sigma = [7.]
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertAllEqual((3,), normal.variance().get_shape())
self.assertAllEqual([49., 49, 49], self.evaluate(normal.variance()))
@test_util.run_in_graph_and_eager_modes
def testNormalStandardDeviation(self):
# sigma will be broadcast to [7, 7, 7]
mu = [1., 2., 3.]
sigma = [7.]
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertAllEqual((3,), normal.stddev().get_shape())
self.assertAllEqual([7., 7, 7], self.evaluate(normal.stddev()))
@test_util.run_in_graph_and_eager_modes
def testNormalSample(self):
mu = constant_op.constant(3.0)
sigma = constant_op.constant(math.sqrt(3.0))
mu_v = 3.0
sigma_v = np.sqrt(3.0)
n = constant_op.constant(100000)
normal = normal_lib.Normal(loc=mu, scale=sigma)
samples = normal.sample(n)
sample_values = self.evaluate(samples)
# Note that the standard error for the sample mean is ~ sigma / sqrt(n).
# The sample variance similarly is dependent on sigma and n.
# Thus, the tolerances below are very sensitive to number of samples
# as well as the variances chosen.
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(sample_values.mean(), mu_v, atol=1e-1)
self.assertAllClose(sample_values.std(), sigma_v, atol=1e-1)
expected_samples_shape = tensor_shape.TensorShape(
[self.evaluate(n)]).concatenate(
tensor_shape.TensorShape(
self.evaluate(normal.batch_shape_tensor())))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
expected_samples_shape = (
tensor_shape.TensorShape([self.evaluate(n)]).concatenate(
normal.batch_shape))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
def testNormalFullyReparameterized(self):
mu = constant_op.constant(4.0)
sigma = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
tape.watch(mu)
tape.watch(sigma)
normal = normal_lib.Normal(loc=mu, scale=sigma)
samples = normal.sample(100)
grad_mu, grad_sigma = tape.gradient(samples, [mu, sigma])
self.assertIsNotNone(grad_mu)
self.assertIsNotNone(grad_sigma)
@test_util.run_in_graph_and_eager_modes
def testNormalSampleMultiDimensional(self):
batch_size = 2
mu = constant_op.constant([[3.0, -3.0]] * batch_size)
sigma = constant_op.constant(
[[math.sqrt(2.0), math.sqrt(3.0)]] * batch_size)
mu_v = [3.0, -3.0]
sigma_v = [np.sqrt(2.0), np.sqrt(3.0)]
n = constant_op.constant(100000)
normal = normal_lib.Normal(loc=mu, scale=sigma)
samples = normal.sample(n)
sample_values = self.evaluate(samples)
# Note that the standard error for the sample mean is ~ sigma / sqrt(n).
# The sample variance similarly is dependent on sigma and n.
# Thus, the tolerances below are very sensitive to number of samples
# as well as the variances chosen.
self.assertEqual(samples.get_shape(), (100000, batch_size, 2))
self.assertAllClose(sample_values[:, 0, 0].mean(), mu_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 0].std(), sigma_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].mean(), mu_v[1], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].std(), sigma_v[1], atol=1e-1)
expected_samples_shape = tensor_shape.TensorShape(
[self.evaluate(n)]).concatenate(
tensor_shape.TensorShape(
self.evaluate(normal.batch_shape_tensor())))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
expected_samples_shape = (
tensor_shape.TensorShape([self.evaluate(n)]).concatenate(
normal.batch_shape))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
@test_util.run_in_graph_and_eager_modes
def testNegativeSigmaFails(self):
with self.assertRaisesOpError("Condition x > 0 did not hold"):
normal = normal_lib.Normal(
loc=[1.], scale=[-5.], validate_args=True, name="G")
self.evaluate(normal.mean())
@test_util.run_in_graph_and_eager_modes
def testNormalShape(self):
mu = constant_op.constant([-3.0] * 5)
sigma = constant_op.constant(11.0)
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertEqual(self.evaluate(normal.batch_shape_tensor()), [5])
self.assertEqual(normal.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(self.evaluate(normal.event_shape_tensor()), [])
self.assertEqual(normal.event_shape, tensor_shape.TensorShape([]))
@test_util.run_deprecated_v1
def testNormalShapeWithPlaceholders(self):
mu = array_ops.placeholder(dtype=dtypes.float32)
sigma = array_ops.placeholder(dtype=dtypes.float32)
normal = normal_lib.Normal(loc=mu, scale=sigma)
with self.cached_session() as sess:
# get_batch_shape should return an "<unknown>" tensor.
self.assertEqual(normal.batch_shape, tensor_shape.TensorShape(None))
self.assertEqual(normal.event_shape, ())
self.assertAllEqual(self.evaluate(normal.event_shape_tensor()), [])
self.assertAllEqual(
sess.run(normal.batch_shape_tensor(),
feed_dict={mu: 5.0,
sigma: [1.0, 2.0]}), [2])
@test_util.run_in_graph_and_eager_modes
def testNormalNormalKL(self):
batch_size = 6
mu_a = np.array([3.0] * batch_size)
sigma_a = np.array([1.0, 2.0, 3.0, 1.5, 2.5, 3.5])
mu_b = np.array([-3.0] * batch_size)
sigma_b = np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
n_a = normal_lib.Normal(loc=mu_a, scale=sigma_a)
n_b = normal_lib.Normal(loc=mu_b, scale=sigma_b)
kl = kullback_leibler.kl_divergence(n_a, n_b)
kl_val = self.evaluate(kl)
kl_expected = ((mu_a - mu_b)**2 / (2 * sigma_b**2) + 0.5 * (
(sigma_a**2 / sigma_b**2) - 1 - 2 * np.log(sigma_a / sigma_b)))
self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/normal_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import exponential as exponential_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
@test_util.run_all_in_graph_and_eager_modes
class ExponentialTest(test.TestCase):
def testExponentialLogPDF(self):
batch_size = 6
lam = constant_op.constant([2.0] * batch_size)
lam_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
exponential = exponential_lib.Exponential(rate=lam)
log_pdf = exponential.log_prob(x)
self.assertEqual(log_pdf.get_shape(), (6,))
pdf = exponential.prob(x)
self.assertEqual(pdf.get_shape(), (6,))
if not stats:
return
expected_log_pdf = stats.expon.logpdf(x, scale=1 / lam_v)
self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf)
self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))
def testExponentialLogPDFBoundary(self):
# Check that Log PDF is finite at 0.
rate = np.array([0.1, 0.5, 1., 2., 5., 10.], dtype=np.float32)
exponential = exponential_lib.Exponential(rate=rate)
log_pdf = exponential.log_prob(0.)
self.assertAllClose(np.log(rate), self.evaluate(log_pdf))
def testExponentialCDF(self):
batch_size = 6
lam = constant_op.constant([2.0] * batch_size)
lam_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
exponential = exponential_lib.Exponential(rate=lam)
cdf = exponential.cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
if not stats:
return
expected_cdf = stats.expon.cdf(x, scale=1 / lam_v)
self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testExponentialLogSurvival(self):
batch_size = 7
lam = constant_op.constant([2.0] * batch_size)
lam_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0, 10.0], dtype=np.float32)
exponential = exponential_lib.Exponential(rate=lam)
log_survival = exponential.log_survival_function(x)
self.assertEqual(log_survival.get_shape(), (7,))
if not stats:
return
expected_log_survival = stats.expon.logsf(x, scale=1 / lam_v)
self.assertAllClose(self.evaluate(log_survival), expected_log_survival)
def testExponentialMean(self):
lam_v = np.array([1.0, 4.0, 2.5])
exponential = exponential_lib.Exponential(rate=lam_v)
self.assertEqual(exponential.mean().get_shape(), (3,))
if not stats:
return
expected_mean = stats.expon.mean(scale=1 / lam_v)
self.assertAllClose(self.evaluate(exponential.mean()), expected_mean)
def testExponentialVariance(self):
lam_v = np.array([1.0, 4.0, 2.5])
exponential = exponential_lib.Exponential(rate=lam_v)
self.assertEqual(exponential.variance().get_shape(), (3,))
if not stats:
return
expected_variance = stats.expon.var(scale=1 / lam_v)
self.assertAllClose(
self.evaluate(exponential.variance()), expected_variance)
def testExponentialEntropy(self):
lam_v = np.array([1.0, 4.0, 2.5])
exponential = exponential_lib.Exponential(rate=lam_v)
self.assertEqual(exponential.entropy().get_shape(), (3,))
if not stats:
return
expected_entropy = stats.expon.entropy(scale=1 / lam_v)
self.assertAllClose(self.evaluate(exponential.entropy()), expected_entropy)
def testExponentialSample(self):
lam = constant_op.constant([3.0, 4.0])
lam_v = [3.0, 4.0]
n = constant_op.constant(100000)
exponential = exponential_lib.Exponential(rate=lam)
samples = exponential.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(sample_values.shape, (100000, 2))
self.assertFalse(np.any(sample_values < 0.0))
if not stats:
return
for i in range(2):
self.assertLess(
stats.kstest(sample_values[:, i],
stats.expon(scale=1.0 / lam_v[i]).cdf)[0], 0.01)
def testExponentialSampleMultiDimensional(self):
batch_size = 2
lam_v = [3.0, 22.0]
lam = constant_op.constant([lam_v] * batch_size)
exponential = exponential_lib.Exponential(rate=lam)
n = 100000
samples = exponential.sample(n, seed=138)
self.assertEqual(samples.get_shape(), (n, batch_size, 2))
sample_values = self.evaluate(samples)
self.assertFalse(np.any(sample_values < 0.0))
if not stats:
return
for i in range(2):
self.assertLess(
stats.kstest(sample_values[:, 0, i],
stats.expon(scale=1.0 / lam_v[i]).cdf)[0], 0.01)
self.assertLess(
stats.kstest(sample_values[:, 1, i],
stats.expon(scale=1.0 / lam_v[i]).cdf)[0], 0.01)
def testFullyReparameterized(self):
lam = constant_op.constant([0.1, 1.0])
with backprop.GradientTape() as tape:
tape.watch(lam)
exponential = exponential_lib.Exponential(rate=lam)
samples = exponential.sample(100)
grad_lam = tape.gradient(samples, lam)
self.assertIsNotNone(grad_lam)
def testExponentialWithSoftplusRate(self):
lam = [-2.2, -3.4]
exponential = exponential_lib.ExponentialWithSoftplusRate(rate=lam)
self.assertAllClose(
self.evaluate(nn_ops.softplus(lam)), self.evaluate(exponential.rate))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/exponential_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import gamma as gamma_lib
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
special = try_import("scipy.special")
stats = try_import("scipy.stats")
@test_util.run_all_in_graph_and_eager_modes
class GammaTest(test.TestCase):
def testGammaShape(self):
alpha = constant_op.constant([3.0] * 5)
beta = constant_op.constant(11.0)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
self.assertEqual(self.evaluate(gamma.batch_shape_tensor()), (5,))
self.assertEqual(gamma.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(self.evaluate(gamma.event_shape_tensor()), [])
self.assertEqual(gamma.event_shape, tensor_shape.TensorShape([]))
def testGammaLogPDF(self):
batch_size = 6
alpha = constant_op.constant([2.0] * batch_size)
beta = constant_op.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
log_pdf = gamma.log_prob(x)
self.assertEqual(log_pdf.get_shape(), (6,))
pdf = gamma.prob(x)
self.assertEqual(pdf.get_shape(), (6,))
if not stats:
return
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf)
self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))
def testGammaLogPDFBoundary(self):
# When concentration = 1, we have an exponential distribution. Check that at
# 0 we have finite log prob.
rate = np.array([0.1, 0.5, 1., 2., 5., 10.], dtype=np.float32)
gamma = gamma_lib.Gamma(concentration=1., rate=rate)
log_pdf = gamma.log_prob(0.)
self.assertAllClose(np.log(rate), self.evaluate(log_pdf))
def testGammaLogPDFMultidimensional(self):
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant([[3.0, 4.0]] * batch_size)
alpha_v = np.array([2.0, 4.0])
beta_v = np.array([3.0, 4.0])
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
log_pdf = gamma.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
pdf = gamma.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
if not stats:
return
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(log_pdf_values, expected_log_pdf)
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaLogPDFMultidimensionalBroadcasting(self):
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant(3.0)
alpha_v = np.array([2.0, 4.0])
beta_v = 3.0
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
log_pdf = gamma.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
pdf = gamma.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
if not stats:
return
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(log_pdf_values, expected_log_pdf)
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaCDF(self):
batch_size = 6
alpha = constant_op.constant([2.0] * batch_size)
beta = constant_op.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
cdf = gamma.cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
if not stats:
return
expected_cdf = stats.gamma.cdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testGammaMean(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
self.assertEqual(gamma.mean().get_shape(), (3,))
if not stats:
return
expected_means = stats.gamma.mean(alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(gamma.mean()), expected_means)
def testGammaModeAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self):
alpha_v = np.array([5.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
expected_modes = (alpha_v - 1) / beta_v
self.assertEqual(gamma.mode().get_shape(), (3,))
self.assertAllClose(self.evaluate(gamma.mode()), expected_modes)
def testGammaModeAllowNanStatsFalseRaisesForUndefinedBatchMembers(self):
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(
concentration=alpha_v, rate=beta_v, allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
self.evaluate(gamma.mode())
def testGammaModeAllowNanStatsIsTrueReturnsNaNforUndefinedBatchMembers(self):
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(
concentration=alpha_v, rate=beta_v, allow_nan_stats=True)
expected_modes = (alpha_v - 1) / beta_v
expected_modes[0] = np.nan
self.assertEqual(gamma.mode().get_shape(), (3,))
self.assertAllClose(self.evaluate(gamma.mode()), expected_modes)
def testGammaVariance(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
self.assertEqual(gamma.variance().get_shape(), (3,))
if not stats:
return
expected_variances = stats.gamma.var(alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(gamma.variance()), expected_variances)
def testGammaStd(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
self.assertEqual(gamma.stddev().get_shape(), (3,))
if not stats:
return
expected_stddev = stats.gamma.std(alpha_v, scale=1. / beta_v)
self.assertAllClose(self.evaluate(gamma.stddev()), expected_stddev)
def testGammaEntropy(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
self.assertEqual(gamma.entropy().get_shape(), (3,))
if not stats:
return
expected_entropy = stats.gamma.entropy(alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(gamma.entropy()), expected_entropy)
def testGammaSampleSmallAlpha(self):
alpha_v = 0.05
beta_v = 1.0
alpha = constant_op.constant(alpha_v)
beta = constant_op.constant(beta_v)
n = 100000
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
samples = gamma.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
if not stats:
return
self.assertAllClose(
sample_values.mean(),
stats.gamma.mean(alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
def testGammaSample(self):
alpha_v = 4.0
beta_v = 3.0
alpha = constant_op.constant(alpha_v)
beta = constant_op.constant(beta_v)
n = 100000
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
samples = gamma.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
if not stats:
return
self.assertAllClose(
sample_values.mean(),
stats.gamma.mean(alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
def testGammaFullyReparameterized(self):
alpha = constant_op.constant(4.0)
beta = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
tape.watch(alpha)
tape.watch(beta)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
samples = gamma.sample(100)
grad_alpha, grad_beta = tape.gradient(samples, [alpha, beta])
self.assertIsNotNone(grad_alpha)
self.assertIsNotNone(grad_beta)
def testGammaSampleMultiDimensional(self):
alpha_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100
beta_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
n = 10000
samples = gamma.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (n, 10, 100))
self.assertEqual(sample_values.shape, (n, 10, 100))
zeros = np.zeros_like(alpha_v + beta_v) # 10 x 100
alpha_bc = alpha_v + zeros
beta_bc = beta_v + zeros
if not stats:
return
self.assertAllClose(
sample_values.mean(axis=0),
stats.gamma.mean(alpha_bc, scale=1 / beta_bc),
atol=0.,
rtol=.05)
self.assertAllClose(
sample_values.var(axis=0),
stats.gamma.var(alpha_bc, scale=1 / beta_bc),
atol=10.0,
rtol=0.)
fails = 0
trials = 0
for ai, a in enumerate(np.reshape(alpha_v, [-1])):
for bi, b in enumerate(np.reshape(beta_v, [-1])):
s = sample_values[:, bi, ai]
trials += 1
fails += 0 if self._kstest(a, b, s) else 1
self.assertLess(fails, trials * 0.03)
def _kstest(self, alpha, beta, samples):
# Uses the Kolmogorov-Smirnov test for goodness of fit.
if not stats:
return True # If we can't test, return that the test passes.
ks, _ = stats.kstest(samples, stats.gamma(alpha, scale=1 / beta).cdf)
# Return True when the test passes.
return ks < 0.02
def testGammaPdfOfSampleMultiDims(self):
gamma = gamma_lib.Gamma(concentration=[7., 11.], rate=[[5.], [6.]])
num = 50000
samples = gamma.sample(num, seed=137)
pdfs = gamma.prob(samples)
sample_vals, pdf_vals = self.evaluate([samples, pdfs])
self.assertEqual(samples.get_shape(), (num, 2, 2))
self.assertEqual(pdfs.get_shape(), (num, 2, 2))
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
if not stats:
return
self.assertAllClose(
stats.gamma.mean([[7., 11.], [7., 11.]],
scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.mean(axis=0),
atol=.1)
self.assertAllClose(
stats.gamma.var([[7., 11.], [7., 11.]],
scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.var(axis=0),
atol=.1)
def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (0, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testGammaNonPositiveInitializationParamsRaises(self):
alpha_v = constant_op.constant(0.0, name="alpha")
beta_v = constant_op.constant(1.0, name="beta")
with self.assertRaisesOpError("x > 0"):
gamma = gamma_lib.Gamma(
concentration=alpha_v, rate=beta_v, validate_args=True)
self.evaluate(gamma.mean())
alpha_v = constant_op.constant(1.0, name="alpha")
beta_v = constant_op.constant(0.0, name="beta")
with self.assertRaisesOpError("x > 0"):
gamma = gamma_lib.Gamma(
concentration=alpha_v, rate=beta_v, validate_args=True)
self.evaluate(gamma.mean())
def testGammaWithSoftplusConcentrationRate(self):
alpha_v = constant_op.constant([0.0, -2.1], name="alpha")
beta_v = constant_op.constant([1.0, -3.6], name="beta")
gamma = gamma_lib.GammaWithSoftplusConcentrationRate(
concentration=alpha_v, rate=beta_v)
self.assertAllEqual(
self.evaluate(nn_ops.softplus(alpha_v)),
self.evaluate(gamma.concentration))
self.assertAllEqual(
self.evaluate(nn_ops.softplus(beta_v)), self.evaluate(gamma.rate))
def testGammaGammaKL(self):
alpha0 = np.array([3.])
beta0 = np.array([1., 2., 3., 1.5, 2.5, 3.5])
alpha1 = np.array([0.4])
beta1 = np.array([0.5, 1., 1.5, 2., 2.5, 3.])
# Build graph.
g0 = gamma_lib.Gamma(concentration=alpha0, rate=beta0)
g1 = gamma_lib.Gamma(concentration=alpha1, rate=beta1)
x = g0.sample(int(1e4), seed=0)
kl_sample = math_ops.reduce_mean(g0.log_prob(x) - g1.log_prob(x), 0)
kl_actual = kullback_leibler.kl_divergence(g0, g1)
# Execute graph.
[kl_sample_, kl_actual_] = self.evaluate([kl_sample, kl_actual])
self.assertEqual(beta0.shape, kl_actual.get_shape())
if not special:
return
kl_expected = ((alpha0 - alpha1) * special.digamma(alpha0)
+ special.gammaln(alpha1)
- special.gammaln(alpha0)
+ alpha1 * np.log(beta0)
- alpha1 * np.log(beta1)
+ alpha0 * (beta1 / beta0 - 1.))
self.assertAllClose(kl_expected, kl_actual_, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_actual_, atol=0., rtol=1e-1)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/gamma_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Kernel tests for tf.distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Uniform distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import uniform as uniform_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class UniformTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testUniformRange(self):
a = 3.0
b = 10.0
uniform = uniform_lib.Uniform(low=a, high=b)
self.assertAllClose(a, self.evaluate(uniform.low))
self.assertAllClose(b, self.evaluate(uniform.high))
self.assertAllClose(b - a, self.evaluate(uniform.range()))
@test_util.run_in_graph_and_eager_modes
def testUniformPDF(self):
a = constant_op.constant([-3.0] * 5 + [15.0])
b = constant_op.constant([11.0] * 5 + [20.0])
uniform = uniform_lib.Uniform(low=a, high=b)
a_v = -3.0
b_v = 11.0
x = np.array([-10.5, 4.0, 0.0, 10.99, 11.3, 17.0], dtype=np.float32)
def _expected_pdf():
pdf = np.zeros_like(x) + 1.0 / (b_v - a_v)
pdf[x > b_v] = 0.0
pdf[x < a_v] = 0.0
pdf[5] = 1.0 / (20.0 - 15.0)
return pdf
expected_pdf = _expected_pdf()
pdf = uniform.prob(x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
log_pdf = uniform.log_prob(x)
self.assertAllClose(np.log(expected_pdf), self.evaluate(log_pdf))
@test_util.run_in_graph_and_eager_modes
def testUniformShape(self):
a = constant_op.constant([-3.0] * 5)
b = constant_op.constant(11.0)
uniform = uniform_lib.Uniform(low=a, high=b)
self.assertEqual(self.evaluate(uniform.batch_shape_tensor()), (5,))
self.assertEqual(uniform.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(self.evaluate(uniform.event_shape_tensor()), [])
self.assertEqual(uniform.event_shape, tensor_shape.TensorShape([]))
@test_util.run_in_graph_and_eager_modes
def testUniformPDFWithScalarEndpoint(self):
a = constant_op.constant([0.0, 5.0])
b = constant_op.constant(10.0)
uniform = uniform_lib.Uniform(low=a, high=b)
x = np.array([0.0, 8.0], dtype=np.float32)
expected_pdf = np.array([1.0 / (10.0 - 0.0), 1.0 / (10.0 - 5.0)])
pdf = uniform.prob(x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes
def testUniformCDF(self):
batch_size = 6
a = constant_op.constant([1.0] * batch_size)
b = constant_op.constant([11.0] * batch_size)
a_v = 1.0
b_v = 11.0
x = np.array([-2.5, 2.5, 4.0, 0.0, 10.99, 12.0], dtype=np.float32)
uniform = uniform_lib.Uniform(low=a, high=b)
def _expected_cdf():
cdf = (x - a_v) / (b_v - a_v)
cdf[x >= b_v] = 1
cdf[x < a_v] = 0
return cdf
cdf = uniform.cdf(x)
self.assertAllClose(_expected_cdf(), self.evaluate(cdf))
log_cdf = uniform.log_cdf(x)
self.assertAllClose(np.log(_expected_cdf()), self.evaluate(log_cdf))
@test_util.run_in_graph_and_eager_modes
def testUniformEntropy(self):
a_v = np.array([1.0, 1.0, 1.0])
b_v = np.array([[1.5, 2.0, 3.0]])
uniform = uniform_lib.Uniform(low=a_v, high=b_v)
expected_entropy = np.log(b_v - a_v)
self.assertAllClose(expected_entropy, self.evaluate(uniform.entropy()))
@test_util.run_in_graph_and_eager_modes
def testUniformAssertMaxGtMin(self):
a_v = np.array([1.0, 1.0, 1.0], dtype=np.float32)
b_v = np.array([1.0, 2.0, 3.0], dtype=np.float32)
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"x < y"):
uniform = uniform_lib.Uniform(low=a_v, high=b_v, validate_args=True)
self.evaluate(uniform.low)
@test_util.run_in_graph_and_eager_modes
def testUniformSample(self):
a = constant_op.constant([3.0, 4.0])
b = constant_op.constant(13.0)
a1_v = 3.0
a2_v = 4.0
b_v = 13.0
n = constant_op.constant(100000)
uniform = uniform_lib.Uniform(low=a, high=b)
samples = uniform.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(sample_values.shape, (100000, 2))
self.assertAllClose(
sample_values[::, 0].mean(), (b_v + a1_v) / 2, atol=1e-1, rtol=0.)
self.assertAllClose(
sample_values[::, 1].mean(), (b_v + a2_v) / 2, atol=1e-1, rtol=0.)
self.assertFalse(
np.any(sample_values[::, 0] < a1_v) or np.any(sample_values >= b_v))
self.assertFalse(
np.any(sample_values[::, 1] < a2_v) or np.any(sample_values >= b_v))
@test_util.run_in_graph_and_eager_modes
def _testUniformSampleMultiDimensional(self):
# DISABLED: Please enable this test once b/issues/30149644 is resolved.
batch_size = 2
a_v = [3.0, 22.0]
b_v = [13.0, 35.0]
a = constant_op.constant([a_v] * batch_size)
b = constant_op.constant([b_v] * batch_size)
uniform = uniform_lib.Uniform(low=a, high=b)
n_v = 100000
n = constant_op.constant(n_v)
samples = uniform.sample(n)
self.assertEqual(samples.get_shape(), (n_v, batch_size, 2))
sample_values = self.evaluate(samples)
self.assertFalse(
np.any(sample_values[:, 0, 0] < a_v[0]) or
np.any(sample_values[:, 0, 0] >= b_v[0]))
self.assertFalse(
np.any(sample_values[:, 0, 1] < a_v[1]) or
np.any(sample_values[:, 0, 1] >= b_v[1]))
self.assertAllClose(
sample_values[:, 0, 0].mean(), (a_v[0] + b_v[0]) / 2, atol=1e-2)
self.assertAllClose(
sample_values[:, 0, 1].mean(), (a_v[1] + b_v[1]) / 2, atol=1e-2)
@test_util.run_in_graph_and_eager_modes
def testUniformMean(self):
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
if not stats:
return
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.mean()), s_uniform.mean())
@test_util.run_in_graph_and_eager_modes
def testUniformVariance(self):
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
if not stats:
return
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.variance()), s_uniform.var())
@test_util.run_in_graph_and_eager_modes
def testUniformStd(self):
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
if not stats:
return
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.stddev()), s_uniform.std())
@test_util.run_in_graph_and_eager_modes
def testUniformNans(self):
a = 10.0
b = [11.0, 100.0]
uniform = uniform_lib.Uniform(low=a, high=b)
no_nans = constant_op.constant(1.0)
nans = constant_op.constant(0.0) / constant_op.constant(0.0)
self.assertTrue(self.evaluate(math_ops.is_nan(nans)))
with_nans = array_ops.stack([no_nans, nans])
pdf = uniform.prob(with_nans)
is_nan = self.evaluate(math_ops.is_nan(pdf))
self.assertFalse(is_nan[0])
self.assertTrue(is_nan[1])
@test_util.run_in_graph_and_eager_modes
def testUniformSamplePdf(self):
a = 10.0
b = [11.0, 100.0]
uniform = uniform_lib.Uniform(a, b)
self.assertTrue(
self.evaluate(
math_ops.reduce_all(uniform.prob(uniform.sample(10)) > 0)))
@test_util.run_in_graph_and_eager_modes
def testUniformBroadcasting(self):
a = 10.0
b = [11.0, 20.0]
uniform = uniform_lib.Uniform(a, b)
pdf = uniform.prob([[10.5, 11.5], [9.0, 19.0], [10.5, 21.0]])
expected_pdf = np.array([[1.0, 0.1], [0.0, 0.1], [1.0, 0.0]])
self.assertAllClose(expected_pdf, self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes
def testUniformSampleWithShape(self):
a = 10.0
b = [11.0, 20.0]
uniform = uniform_lib.Uniform(a, b)
pdf = uniform.prob(uniform.sample((2, 3)))
# pylint: disable=bad-continuation
expected_pdf = [
[[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
[[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
]
# pylint: enable=bad-continuation
self.assertAllClose(expected_pdf, self.evaluate(pdf))
pdf = uniform.prob(uniform.sample())
expected_pdf = [1.0, 0.1]
self.assertAllClose(expected_pdf, self.evaluate(pdf))
def testFullyReparameterized(self):
a = constant_op.constant(0.1)
b = constant_op.constant(0.8)
with backprop.GradientTape() as tape:
tape.watch(a)
tape.watch(b)
uniform = uniform_lib.Uniform(a, b)
samples = uniform.sample(100)
grad_a, grad_b = tape.gradient(samples, [a, b])
self.assertIsNotNone(grad_a)
self.assertIsNotNone(grad_b)
# Eager doesn't pass due to a type mismatch in one of the ops.
def testUniformFloat64(self):
uniform = uniform_lib.Uniform(
low=np.float64(0.), high=np.float64(1.))
self.assertAllClose(
[1., 1.],
self.evaluate(uniform.prob(np.array([0.5, 0.6], dtype=np.float64))))
self.assertAllClose(
[0.5, 0.6],
self.evaluate(uniform.cdf(np.array([0.5, 0.6], dtype=np.float64))))
self.assertAllClose(0.5, self.evaluate(uniform.mean()))
self.assertAllClose(1 / 12., self.evaluate(uniform.variance()))
self.assertAllClose(0., self.evaluate(uniform.entropy()))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/uniform_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import laplace as laplace_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
@test_util.run_all_in_graph_and_eager_modes
class LaplaceTest(test.TestCase):
def testLaplaceShape(self):
loc = constant_op.constant([3.0] * 5)
scale = constant_op.constant(11.0)
laplace = laplace_lib.Laplace(loc=loc, scale=scale)
self.assertEqual(self.evaluate(laplace.batch_shape_tensor()), (5,))
self.assertEqual(laplace.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(self.evaluate(laplace.event_shape_tensor()), [])
self.assertEqual(laplace.event_shape, tensor_shape.TensorShape([]))
def testLaplaceLogPDF(self):
batch_size = 6
loc = constant_op.constant([2.0] * batch_size)
scale = constant_op.constant([3.0] * batch_size)
loc_v = 2.0
scale_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
laplace = laplace_lib.Laplace(loc=loc, scale=scale)
log_pdf = laplace.log_prob(x)
self.assertEqual(log_pdf.get_shape(), (6,))
if not stats:
return
expected_log_pdf = stats.laplace.logpdf(x, loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf)
pdf = laplace.prob(x)
self.assertEqual(pdf.get_shape(), (6,))
self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))
def testLaplaceLogPDFMultidimensional(self):
batch_size = 6
loc = constant_op.constant([[2.0, 4.0]] * batch_size)
scale = constant_op.constant([[3.0, 4.0]] * batch_size)
loc_v = np.array([2.0, 4.0])
scale_v = np.array([3.0, 4.0])
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
laplace = laplace_lib.Laplace(loc=loc, scale=scale)
log_pdf = laplace.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
pdf = laplace.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
if not stats:
return
expected_log_pdf = stats.laplace.logpdf(x, loc_v, scale=scale_v)
self.assertAllClose(log_pdf_values, expected_log_pdf)
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testLaplaceLogPDFMultidimensionalBroadcasting(self):
batch_size = 6
loc = constant_op.constant([[2.0, 4.0]] * batch_size)
scale = constant_op.constant(3.0)
loc_v = np.array([2.0, 4.0])
scale_v = 3.0
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
laplace = laplace_lib.Laplace(loc=loc, scale=scale)
log_pdf = laplace.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
pdf = laplace.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
if not stats:
return
expected_log_pdf = stats.laplace.logpdf(x, loc_v, scale=scale_v)
self.assertAllClose(log_pdf_values, expected_log_pdf)
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testLaplaceCDF(self):
batch_size = 6
loc = constant_op.constant([2.0] * batch_size)
scale = constant_op.constant([3.0] * batch_size)
loc_v = 2.0
scale_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
laplace = laplace_lib.Laplace(loc=loc, scale=scale)
cdf = laplace.cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
if not stats:
return
expected_cdf = stats.laplace.cdf(x, loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testLaplaceLogCDF(self):
batch_size = 6
loc = constant_op.constant([2.0] * batch_size)
scale = constant_op.constant([3.0] * batch_size)
loc_v = 2.0
scale_v = 3.0
x = np.array([-2.5, 2.5, -4.0, 0.1, 1.0, 2.0], dtype=np.float32)
laplace = laplace_lib.Laplace(loc=loc, scale=scale)
cdf = laplace.log_cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
if not stats:
return
expected_cdf = stats.laplace.logcdf(x, loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testLaplaceLogSurvivalFunction(self):
batch_size = 6
loc = constant_op.constant([2.0] * batch_size)
scale = constant_op.constant([3.0] * batch_size)
loc_v = 2.0
scale_v = 3.0
x = np.array([-2.5, 2.5, -4.0, 0.1, 1.0, 2.0], dtype=np.float32)
laplace = laplace_lib.Laplace(loc=loc, scale=scale)
sf = laplace.log_survival_function(x)
self.assertEqual(sf.get_shape(), (6,))
if not stats:
return
expected_sf = stats.laplace.logsf(x, loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(sf), expected_sf)
def testLaplaceMean(self):
loc_v = np.array([1.0, 3.0, 2.5])
scale_v = np.array([1.0, 4.0, 5.0])
laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v)
self.assertEqual(laplace.mean().get_shape(), (3,))
if not stats:
return
expected_means = stats.laplace.mean(loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(laplace.mean()), expected_means)
def testLaplaceMode(self):
loc_v = np.array([0.5, 3.0, 2.5])
scale_v = np.array([1.0, 4.0, 5.0])
laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v)
self.assertEqual(laplace.mode().get_shape(), (3,))
self.assertAllClose(self.evaluate(laplace.mode()), loc_v)
def testLaplaceVariance(self):
loc_v = np.array([1.0, 3.0, 2.5])
scale_v = np.array([1.0, 4.0, 5.0])
laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v)
self.assertEqual(laplace.variance().get_shape(), (3,))
if not stats:
return
expected_variances = stats.laplace.var(loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(laplace.variance()), expected_variances)
def testLaplaceStd(self):
loc_v = np.array([1.0, 3.0, 2.5])
scale_v = np.array([1.0, 4.0, 5.0])
laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v)
self.assertEqual(laplace.stddev().get_shape(), (3,))
if not stats:
return
expected_stddev = stats.laplace.std(loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(laplace.stddev()), expected_stddev)
def testLaplaceEntropy(self):
loc_v = np.array([1.0, 3.0, 2.5])
scale_v = np.array([1.0, 4.0, 5.0])
laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v)
self.assertEqual(laplace.entropy().get_shape(), (3,))
if not stats:
return
expected_entropy = stats.laplace.entropy(loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(laplace.entropy()), expected_entropy)
def testLaplaceSample(self):
loc_v = 4.0
scale_v = 3.0
loc = constant_op.constant(loc_v)
scale = constant_op.constant(scale_v)
n = 100000
laplace = laplace_lib.Laplace(loc=loc, scale=scale)
samples = laplace.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
if not stats:
return
self.assertAllClose(
sample_values.mean(),
stats.laplace.mean(loc_v, scale=scale_v),
rtol=0.05,
atol=0.)
self.assertAllClose(
sample_values.var(),
stats.laplace.var(loc_v, scale=scale_v),
rtol=0.05,
atol=0.)
self.assertTrue(self._kstest(loc_v, scale_v, sample_values))
def testLaplaceFullyReparameterized(self):
loc = constant_op.constant(4.0)
scale = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
tape.watch(loc)
tape.watch(scale)
laplace = laplace_lib.Laplace(loc=loc, scale=scale)
samples = laplace.sample(100)
grad_loc, grad_scale = tape.gradient(samples, [loc, scale])
self.assertIsNotNone(grad_loc)
self.assertIsNotNone(grad_scale)
def testLaplaceSampleMultiDimensional(self):
loc_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100
scale_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1
laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v)
n = 10000
samples = laplace.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (n, 10, 100))
self.assertEqual(sample_values.shape, (n, 10, 100))
zeros = np.zeros_like(loc_v + scale_v) # 10 x 100
loc_bc = loc_v + zeros
scale_bc = scale_v + zeros
if not stats:
return
self.assertAllClose(
sample_values.mean(axis=0),
stats.laplace.mean(loc_bc, scale=scale_bc),
rtol=0.35,
atol=0.)
self.assertAllClose(
sample_values.var(axis=0),
stats.laplace.var(loc_bc, scale=scale_bc),
rtol=0.105,
atol=0.0)
fails = 0
trials = 0
for ai, a in enumerate(np.reshape(loc_v, [-1])):
for bi, b in enumerate(np.reshape(scale_v, [-1])):
s = sample_values[:, bi, ai]
trials += 1
fails += 0 if self._kstest(a, b, s) else 1
self.assertLess(fails, trials * 0.03)
def _kstest(self, loc, scale, samples):
# Uses the Kolmogorov-Smirnov test for goodness of fit.
if not stats:
return True # If scipy isn't available, return "True" for passing
ks, _ = stats.kstest(samples, stats.laplace(loc, scale=scale).cdf)
# Return True when the test passes.
return ks < 0.02
def testLaplacePdfOfSampleMultiDims(self):
laplace = laplace_lib.Laplace(loc=[7., 11.], scale=[[5.], [6.]])
num = 50000
samples = laplace.sample(num, seed=137)
pdfs = laplace.prob(samples)
sample_vals, pdf_vals = self.evaluate([samples, pdfs])
self.assertEqual(samples.get_shape(), (num, 2, 2))
self.assertEqual(pdfs.get_shape(), (num, 2, 2))
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
if not stats:
return
self.assertAllClose(
stats.laplace.mean(
[[7., 11.], [7., 11.]], scale=np.array([[5., 5.], [6., 6.]])),
sample_vals.mean(axis=0),
rtol=0.05,
atol=0.)
self.assertAllClose(
stats.laplace.var([[7., 11.], [7., 11.]],
scale=np.array([[5., 5.], [6., 6.]])),
sample_vals.var(axis=0),
rtol=0.05,
atol=0.)
def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (0, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testLaplaceNonPositiveInitializationParamsRaises(self):
loc_v = constant_op.constant(0.0, name="loc")
scale_v = constant_op.constant(-1.0, name="scale")
with self.assertRaisesOpError("Condition x > 0 did not hold element-wise"):
laplace = laplace_lib.Laplace(
loc=loc_v, scale=scale_v, validate_args=True)
self.evaluate(laplace.mean())
loc_v = constant_op.constant(1.0, name="loc")
scale_v = constant_op.constant(0.0, name="scale")
with self.assertRaisesOpError("Condition x > 0 did not hold element-wise"):
laplace = laplace_lib.Laplace(
loc=loc_v, scale=scale_v, validate_args=True)
self.evaluate(laplace.mean())
def testLaplaceWithSoftplusScale(self):
loc_v = constant_op.constant([0.0, 1.0], name="loc")
scale_v = constant_op.constant([-1.0, 2.0], name="scale")
laplace = laplace_lib.LaplaceWithSoftplusScale(loc=loc_v, scale=scale_v)
self.assertAllClose(
self.evaluate(nn_ops.softplus(scale_v)), self.evaluate(laplace.scale))
self.assertAllClose(self.evaluate(loc_v), self.evaluate(laplace.loc))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/laplace_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import util as distribution_util
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
du = distribution_util
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
special = try_import("scipy.special")
def _logit(x):
x = np.asarray(x)
return np.log(x) - np.log1p(-x)
class AssertCloseTest(test.TestCase):
@test_util.run_deprecated_v1
def testAssertIntegerForm(self):
# This should only be detected as an integer.
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
# First component isn't less than float32.eps = 1e-7
z = array_ops.placeholder(dtypes.float32)
# This shouldn"t be detected as an integer.
w = array_ops.placeholder(dtypes.float32)
feed_dict = {x: [1., 5, 10, 15, 20], y: [1.1, 5, 10, 15, 20],
z: [1.0001, 5, 10, 15, 20], w: [1e-8, 5, 10, 15, 20]}
with self.cached_session():
with ops.control_dependencies([du.assert_integer_form(x)]):
array_ops.identity(x).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("has non-integer components"):
with ops.control_dependencies(
[du.assert_integer_form(y)]):
array_ops.identity(y).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("has non-integer components"):
with ops.control_dependencies(
[du.assert_integer_form(z)]):
array_ops.identity(z).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("has non-integer components"):
with ops.control_dependencies(
[du.assert_integer_form(w)]):
array_ops.identity(w).eval(feed_dict=feed_dict)
class MaybeGetStaticTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testGetStaticInt(self):
x = 2
self.assertEqual(x, du.maybe_get_static_value(x))
self.assertAllClose(
np.array(2.), du.maybe_get_static_value(x, dtype=np.float64))
@test_util.run_in_graph_and_eager_modes
def testGetStaticNumpyArray(self):
x = np.array(2, dtype=np.int32)
self.assertEqual(x, du.maybe_get_static_value(x))
self.assertAllClose(
np.array(2.), du.maybe_get_static_value(x, dtype=np.float64))
@test_util.run_in_graph_and_eager_modes
def testGetStaticConstant(self):
x = constant_op.constant(2, dtype=dtypes.int32)
self.assertEqual(np.array(2, dtype=np.int32), du.maybe_get_static_value(x))
self.assertAllClose(
np.array(2.), du.maybe_get_static_value(x, dtype=np.float64))
@test_util.run_deprecated_v1
def testGetStaticPlaceholder(self):
x = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
self.assertEqual(None, du.maybe_get_static_value(x))
self.assertEqual(None, du.maybe_get_static_value(x, dtype=np.float64))
class GetLogitsAndProbsTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testImproperArguments(self):
with self.assertRaises(ValueError):
du.get_logits_and_probs(logits=None, probs=None)
with self.assertRaises(ValueError):
du.get_logits_and_probs(logits=[0.1], probs=[0.1])
@test_util.run_in_graph_and_eager_modes
def testLogits(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
logits = _logit(p)
new_logits, new_p = du.get_logits_and_probs(
logits=logits, validate_args=True)
self.assertAllClose(p, self.evaluate(new_p), rtol=1e-5, atol=0.)
self.assertAllClose(logits, self.evaluate(new_logits), rtol=1e-5, atol=0.)
@test_util.run_in_graph_and_eager_modes
def testLogitsMultidimensional(self):
p = np.array([0.2, 0.3, 0.5], dtype=np.float32)
logits = np.log(p)
new_logits, new_p = du.get_logits_and_probs(
logits=logits, multidimensional=True, validate_args=True)
self.assertAllClose(self.evaluate(new_p), p)
self.assertAllClose(self.evaluate(new_logits), logits)
@test_util.run_in_graph_and_eager_modes
def testProbability(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
new_logits, new_p = du.get_logits_and_probs(probs=p, validate_args=True)
self.assertAllClose(_logit(p), self.evaluate(new_logits))
self.assertAllClose(p, self.evaluate(new_p))
@test_util.run_in_graph_and_eager_modes
def testProbabilityMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
new_logits, new_p = du.get_logits_and_probs(
probs=p, multidimensional=True, validate_args=True)
self.assertAllClose(np.log(p), self.evaluate(new_logits))
self.assertAllClose(p, self.evaluate(new_p))
@test_util.run_in_graph_and_eager_modes
def testProbabilityValidateArgs(self):
p = [0.01, 0.2, 0.5, 0.7, .99]
# Component less than 0.
p2 = [-1, 0.2, 0.5, 0.3, .2]
# Component greater than 1.
p3 = [2, 0.2, 0.5, 0.3, .2]
_, prob = du.get_logits_and_probs(probs=p, validate_args=True)
self.evaluate(prob)
with self.assertRaisesOpError("Condition x >= 0"):
_, prob = du.get_logits_and_probs(probs=p2, validate_args=True)
self.evaluate(prob)
_, prob = du.get_logits_and_probs(probs=p2, validate_args=False)
self.evaluate(prob)
with self.assertRaisesOpError("probs has components greater than 1"):
_, prob = du.get_logits_and_probs(probs=p3, validate_args=True)
self.evaluate(prob)
_, prob = du.get_logits_and_probs(probs=p3, validate_args=False)
self.evaluate(prob)
@test_util.run_in_graph_and_eager_modes
def testProbabilityValidateArgsMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
# Component less than 0. Still sums to 1.
p2 = np.array([[-.3, 0.4, 0.9], [0.1, 0.5, 0.4]], dtype=np.float32)
# Component greater than 1. Does not sum to 1.
p3 = np.array([[1.3, 0.0, 0.0], [0.1, 0.5, 0.4]], dtype=np.float32)
# Does not sum to 1.
p4 = np.array([[1.1, 0.3, 0.4], [0.1, 0.5, 0.4]], dtype=np.float32)
_, prob = du.get_logits_and_probs(probs=p, multidimensional=True)
self.evaluate(prob)
with self.assertRaisesOpError("Condition x >= 0"):
_, prob = du.get_logits_and_probs(
probs=p2, multidimensional=True, validate_args=True)
self.evaluate(prob)
_, prob = du.get_logits_and_probs(
probs=p2, multidimensional=True, validate_args=False)
self.evaluate(prob)
with self.assertRaisesOpError(
"(probs has components greater than 1|probs does not sum to 1)"):
_, prob = du.get_logits_and_probs(
probs=p3, multidimensional=True, validate_args=True)
self.evaluate(prob)
_, prob = du.get_logits_and_probs(
probs=p3, multidimensional=True, validate_args=False)
self.evaluate(prob)
with self.assertRaisesOpError("probs does not sum to 1"):
_, prob = du.get_logits_and_probs(
probs=p4, multidimensional=True, validate_args=True)
self.evaluate(prob)
_, prob = du.get_logits_and_probs(
probs=p4, multidimensional=True, validate_args=False)
self.evaluate(prob)
@test_util.run_deprecated_v1
def testProbsMultidimShape(self):
with self.cached_session():
with self.assertRaises(ValueError):
p = array_ops.ones([int(2**11+1)], dtype=np.float16)
du.get_logits_and_probs(
probs=p, multidimensional=True, validate_args=True)
with self.assertRaisesOpError(
"Number of classes exceeds `dtype` precision"):
p = array_ops.placeholder(dtype=dtypes.float16)
_, prob = du.get_logits_and_probs(
probs=p, multidimensional=True, validate_args=True)
prob.eval(feed_dict={p: np.ones([int(2**11+1)])})
@test_util.run_deprecated_v1
def testLogitsMultidimShape(self):
with self.cached_session():
with self.assertRaises(ValueError):
l = array_ops.ones([int(2**11+1)], dtype=np.float16)
du.get_logits_and_probs(
logits=l, multidimensional=True, validate_args=True)
with self.assertRaisesOpError(
"Number of classes exceeds `dtype` precision"):
l = array_ops.placeholder(dtype=dtypes.float16)
logit, _ = du.get_logits_and_probs(
logits=l, multidimensional=True, validate_args=True)
logit.eval(feed_dict={l: np.ones([int(2**11+1)])})
class EmbedCheckCategoricalEventShapeTest(test.TestCase):
@test_util.run_deprecated_v1
def testTooSmall(self):
with self.cached_session():
with self.assertRaises(ValueError):
param = array_ops.ones([1], dtype=np.float16)
checked_param = du.embed_check_categorical_event_shape(
param)
with self.assertRaisesOpError(
"must have at least 2 events"):
param = array_ops.placeholder(dtype=dtypes.float16)
checked_param = du.embed_check_categorical_event_shape(
param)
checked_param.eval(feed_dict={param: np.ones([1])})
@test_util.run_deprecated_v1
def testTooLarge(self):
with self.cached_session():
with self.assertRaises(ValueError):
param = array_ops.ones([int(2**11+1)], dtype=dtypes.float16)
checked_param = du.embed_check_categorical_event_shape(
param)
with self.assertRaisesOpError(
"Number of classes exceeds `dtype` precision"):
param = array_ops.placeholder(dtype=dtypes.float16)
checked_param = du.embed_check_categorical_event_shape(
param)
checked_param.eval(feed_dict={param: np.ones([int(2**11+1)])})
@test_util.run_in_graph_and_eager_modes
def testUnsupportedDtype(self):
param = ops.convert_to_tensor(
np.ones([2**11 + 1]).astype(dtypes.qint16.as_numpy_dtype),
dtype=dtypes.qint16)
with self.assertRaises(TypeError):
du.embed_check_categorical_event_shape(param)
class EmbedCheckIntegerCastingClosedTest(test.TestCase):
@test_util.run_deprecated_v1
def testCorrectlyAssertsNonnegative(self):
with self.cached_session():
with self.assertRaisesOpError("Elements must be non-negative"):
x = array_ops.placeholder(dtype=dtypes.float16)
x_checked = du.embed_check_integer_casting_closed(
x, target_dtype=dtypes.int16)
x_checked.eval(feed_dict={x: np.array([1, -1], dtype=np.float16)})
@test_util.run_deprecated_v1
def testCorrectlyAssersIntegerForm(self):
with self.cached_session():
with self.assertRaisesOpError("Elements must be int16-equivalent."):
x = array_ops.placeholder(dtype=dtypes.float16)
x_checked = du.embed_check_integer_casting_closed(
x, target_dtype=dtypes.int16)
x_checked.eval(feed_dict={x: np.array([1, 1.5], dtype=np.float16)})
@test_util.run_deprecated_v1
def testCorrectlyAssertsLargestPossibleInteger(self):
with self.cached_session():
with self.assertRaisesOpError("Elements cannot exceed 32767."):
x = array_ops.placeholder(dtype=dtypes.int32)
x_checked = du.embed_check_integer_casting_closed(
x, target_dtype=dtypes.int16)
x_checked.eval(feed_dict={x: np.array([1, 2**15], dtype=np.int32)})
@test_util.run_deprecated_v1
def testCorrectlyAssertsSmallestPossibleInteger(self):
with self.cached_session():
with self.assertRaisesOpError("Elements cannot be smaller than 0."):
x = array_ops.placeholder(dtype=dtypes.int32)
x_checked = du.embed_check_integer_casting_closed(
x, target_dtype=dtypes.uint16, assert_nonnegative=False)
x_checked.eval(feed_dict={x: np.array([1, -1], dtype=np.int32)})
@test_util.run_all_in_graph_and_eager_modes
class LogCombinationsTest(test.TestCase):
def testLogCombinationsBinomial(self):
n = [2, 5, 12, 15]
k = [1, 2, 4, 11]
if not special:
return
log_combs = np.log(special.binom(n, k))
n = np.array(n, dtype=np.float32)
counts = [[1., 1], [2., 3], [4., 8], [11, 4]]
log_binom = du.log_combinations(n, counts)
self.assertEqual([4], log_binom.get_shape())
self.assertAllClose(log_combs, self.evaluate(log_binom))
def testLogCombinationsShape(self):
# Shape [2, 2]
n = [[2, 5], [12, 15]]
n = np.array(n, dtype=np.float32)
# Shape [2, 2, 4]
counts = [[[1., 1, 0, 0], [2., 2, 1, 0]], [[4., 4, 1, 3], [10, 1, 1, 4]]]
log_binom = du.log_combinations(n, counts)
self.assertEqual([2, 2], log_binom.get_shape())
class DynamicShapeTest(test.TestCase):
@test_util.run_deprecated_v1
def testSameDynamicShape(self):
with self.cached_session():
scalar = constant_op.constant(2.0)
scalar1 = array_ops.placeholder(dtype=dtypes.float32)
vector = [0.3, 0.4, 0.5]
vector1 = array_ops.placeholder(dtype=dtypes.float32, shape=[None])
vector2 = array_ops.placeholder(dtype=dtypes.float32, shape=[None])
multidimensional = [[0.3, 0.4], [0.2, 0.6]]
multidimensional1 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None])
multidimensional2 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None])
# Scalar
self.assertTrue(
du.same_dynamic_shape(scalar, scalar1).eval({
scalar1: 2.0
}))
# Vector
self.assertTrue(
du.same_dynamic_shape(vector, vector1).eval({
vector1: [2.0, 3.0, 4.0]
}))
self.assertTrue(
du.same_dynamic_shape(vector1, vector2).eval({
vector1: [2.0, 3.0, 4.0],
vector2: [2.0, 3.5, 6.0]
}))
# Multidimensional
self.assertTrue(
du.same_dynamic_shape(
multidimensional, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertTrue(
du.same_dynamic_shape(
multidimensional1, multidimensional2).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]],
multidimensional2: [[1.0, 3.5], [6.3, 2.3]]
}))
# Scalar, X
self.assertFalse(
du.same_dynamic_shape(scalar, vector1).eval({
vector1: [2.0, 3.0, 4.0]
}))
self.assertFalse(
du.same_dynamic_shape(scalar1, vector1).eval({
scalar1: 2.0,
vector1: [2.0, 3.0, 4.0]
}))
self.assertFalse(
du.same_dynamic_shape(scalar, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertFalse(
du.same_dynamic_shape(scalar1, multidimensional1).eval(
{
scalar1: 2.0,
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
# Vector, X
self.assertFalse(
du.same_dynamic_shape(vector, vector1).eval({
vector1: [2.0, 3.0]
}))
self.assertFalse(
du.same_dynamic_shape(vector1, vector2).eval({
vector1: [2.0, 3.0, 4.0],
vector2: [6.0]
}))
self.assertFalse(
du.same_dynamic_shape(vector, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertFalse(
du.same_dynamic_shape(vector1, multidimensional1).eval(
{
vector1: [2.0, 3.0, 4.0],
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
# Multidimensional, X
self.assertFalse(
du.same_dynamic_shape(
multidimensional, multidimensional1).eval({
multidimensional1: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]
}))
self.assertFalse(
du.same_dynamic_shape(
multidimensional1, multidimensional2).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]],
multidimensional2: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]
}))
class RotateTransposeTest(test.TestCase):
def _np_rotate_transpose(self, x, shift):
if not isinstance(x, np.ndarray):
x = np.array(x)
return np.transpose(x, np.roll(np.arange(len(x.shape)), shift))
@test_util.run_in_graph_and_eager_modes
def testRollStatic(self):
if context.executing_eagerly():
error_message = r"Attempt to convert a value \(None\)"
else:
error_message = "None values not supported."
with self.assertRaisesRegexp(ValueError, error_message):
du.rotate_transpose(None, 1)
for x in (np.ones(1), np.ones((2, 1)), np.ones((3, 2, 1))):
for shift in np.arange(-5, 5):
y = du.rotate_transpose(x, shift)
self.assertAllEqual(
self._np_rotate_transpose(x, shift), self.evaluate(y))
self.assertAllEqual(np.roll(x.shape, shift), y.get_shape().as_list())
@test_util.run_deprecated_v1
def testRollDynamic(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
shift = array_ops.placeholder(dtypes.int32)
for x_value in (np.ones(
1, dtype=x.dtype.as_numpy_dtype()), np.ones(
(2, 1), dtype=x.dtype.as_numpy_dtype()), np.ones(
(3, 2, 1), dtype=x.dtype.as_numpy_dtype())):
for shift_value in np.arange(-5, 5):
self.assertAllEqual(
self._np_rotate_transpose(x_value, shift_value),
sess.run(du.rotate_transpose(x, shift),
feed_dict={x: x_value,
shift: shift_value}))
class PickVectorTest(test.TestCase):
@test_util.run_deprecated_v1
def testCorrectlyPicksVector(self):
with self.cached_session():
x = np.arange(10, 12)
y = np.arange(15, 18)
self.assertAllEqual(
x, self.evaluate(du.pick_vector(math_ops.less(0, 5), x, y)))
self.assertAllEqual(
y, self.evaluate(du.pick_vector(math_ops.less(5, 0), x, y)))
self.assertAllEqual(x,
du.pick_vector(
constant_op.constant(True), x, y)) # No eval.
self.assertAllEqual(y,
du.pick_vector(
constant_op.constant(False), x, y)) # No eval.
class PreferStaticRankTest(test.TestCase):
@test_util.run_deprecated_v1
def testNonEmptyConstantTensor(self):
x = array_ops.zeros((2, 3, 4))
rank = du.prefer_static_rank(x)
self.assertIsInstance(rank, np.ndarray)
self.assertEqual(3, rank)
@test_util.run_deprecated_v1
def testEmptyConstantTensor(self):
x = constant_op.constant([])
rank = du.prefer_static_rank(x)
self.assertIsInstance(rank, np.ndarray)
self.assertEqual(1, rank)
@test_util.run_deprecated_v1
def testScalarTensor(self):
x = constant_op.constant(1.)
rank = du.prefer_static_rank(x)
self.assertIsInstance(rank, np.ndarray)
self.assertEqual(0, rank)
@test_util.run_deprecated_v1
def testDynamicRankEndsUpBeingNonEmpty(self):
x = array_ops.placeholder(np.float64, shape=None)
rank = du.prefer_static_rank(x)
with self.cached_session():
self.assertAllEqual(2, rank.eval(feed_dict={x: np.zeros((2, 3))}))
@test_util.run_deprecated_v1
def testDynamicRankEndsUpBeingEmpty(self):
x = array_ops.placeholder(np.int32, shape=None)
rank = du.prefer_static_rank(x)
with self.cached_session():
self.assertAllEqual(1, rank.eval(feed_dict={x: []}))
@test_util.run_deprecated_v1
def testDynamicRankEndsUpBeingScalar(self):
x = array_ops.placeholder(np.int32, shape=None)
rank = du.prefer_static_rank(x)
with self.cached_session():
self.assertAllEqual(0, rank.eval(feed_dict={x: 1}))
class PreferStaticShapeTest(test.TestCase):
@test_util.run_deprecated_v1
def testNonEmptyConstantTensor(self):
x = array_ops.zeros((2, 3, 4))
shape = du.prefer_static_shape(x)
self.assertIsInstance(shape, np.ndarray)
self.assertAllEqual(np.array([2, 3, 4]), shape)
@test_util.run_deprecated_v1
def testEmptyConstantTensor(self):
x = constant_op.constant([])
shape = du.prefer_static_shape(x)
self.assertIsInstance(shape, np.ndarray)
self.assertAllEqual(np.array([0]), shape)
@test_util.run_deprecated_v1
def testScalarTensor(self):
x = constant_op.constant(1.)
shape = du.prefer_static_shape(x)
self.assertIsInstance(shape, np.ndarray)
self.assertAllEqual(np.array([]), shape)
@test_util.run_deprecated_v1
def testDynamicShapeEndsUpBeingNonEmpty(self):
x = array_ops.placeholder(np.float64, shape=None)
shape = du.prefer_static_shape(x)
with self.cached_session():
self.assertAllEqual((2, 3), shape.eval(feed_dict={x: np.zeros((2, 3))}))
@test_util.run_deprecated_v1
def testDynamicShapeEndsUpBeingEmpty(self):
x = array_ops.placeholder(np.int32, shape=None)
shape = du.prefer_static_shape(x)
with self.cached_session():
self.assertAllEqual(np.array([0]), shape.eval(feed_dict={x: []}))
@test_util.run_deprecated_v1
def testDynamicShapeEndsUpBeingScalar(self):
x = array_ops.placeholder(np.int32, shape=None)
shape = du.prefer_static_shape(x)
with self.cached_session():
self.assertAllEqual(np.array([]), shape.eval(feed_dict={x: 1}))
class PreferStaticValueTest(test.TestCase):
@test_util.run_deprecated_v1
def testNonEmptyConstantTensor(self):
x = array_ops.zeros((2, 3, 4))
value = du.prefer_static_value(x)
self.assertIsInstance(value, np.ndarray)
self.assertAllEqual(np.zeros((2, 3, 4)), value)
@test_util.run_deprecated_v1
def testEmptyConstantTensor(self):
x = constant_op.constant([])
value = du.prefer_static_value(x)
self.assertIsInstance(value, np.ndarray)
self.assertAllEqual(np.array([]), value)
@test_util.run_deprecated_v1
def testScalarTensor(self):
x = constant_op.constant(1.)
value = du.prefer_static_value(x)
self.assertIsInstance(value, np.ndarray)
self.assertAllEqual(np.array(1.), value)
@test_util.run_deprecated_v1
def testDynamicValueEndsUpBeingNonEmpty(self):
x = array_ops.placeholder(np.float64, shape=None)
value = du.prefer_static_value(x)
with self.cached_session():
self.assertAllEqual(np.zeros((2, 3)),
value.eval(feed_dict={x: np.zeros((2, 3))}))
@test_util.run_deprecated_v1
def testDynamicValueEndsUpBeingEmpty(self):
x = array_ops.placeholder(np.int32, shape=None)
value = du.prefer_static_value(x)
with self.cached_session():
self.assertAllEqual(np.array([]), value.eval(feed_dict={x: []}))
@test_util.run_deprecated_v1
def testDynamicValueEndsUpBeingScalar(self):
x = array_ops.placeholder(np.int32, shape=None)
value = du.prefer_static_value(x)
with self.cached_session():
self.assertAllEqual(np.array(1), value.eval(feed_dict={x: 1}))
class FillTriangularTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _fill_triangular(self, x, upper=False):
"""Numpy implementation of `fill_triangular`."""
x = np.asarray(x)
# Formula derived by solving for n: m = n(n+1)/2.
m = np.int32(x.shape[-1])
n = np.sqrt(0.25 + 2. * m) - 0.5
if n != np.floor(n):
raise ValueError("Invalid shape.")
n = np.int32(n)
# We can't do: `x[..., -(n**2-m):]` because this doesn't correctly handle
# `m == n == 1`. Hence, we do absolute indexing.
x_tail = x[..., (m - (n * n - m)):]
y = np.concatenate(
[x, x_tail[..., ::-1]] if upper else [x_tail, x[..., ::-1]],
axis=-1)
y = y.reshape(np.concatenate([
np.int32(x.shape[:-1]),
np.int32([n, n]),
], axis=0))
return np.triu(y) if upper else np.tril(y)
def _run_test(self, x_, use_deferred_shape=False, **kwargs):
x_ = np.asarray(x_)
with self.cached_session() as sess:
static_shape = None if use_deferred_shape else x_.shape
x_pl = array_ops.placeholder_with_default(x_, shape=static_shape)
# Add `zeros_like(x)` such that x's value and gradient are identical. We
# do this so we can ensure each gradient value is mapped to the right
# gradient location. (Not doing this means the gradient wrt `x` is simple
# `ones_like(x)`.)
# Note:
# zeros_like_x_pl == zeros_like(x_pl)
# gradient(zeros_like_x_pl, x_pl) == x_pl - 1
zeros_like_x_pl = (x_pl * array_ops.stop_gradient(x_pl - 1.)
- array_ops.stop_gradient(x_pl * (x_pl - 1.)))
x = x_pl + zeros_like_x_pl
actual = du.fill_triangular(x, **kwargs)
grad_actual = gradients_impl.gradients(actual, x_pl)[0]
[actual_, grad_actual_] = sess.run([actual, grad_actual],
feed_dict={x_pl: x_})
expected = self._fill_triangular(x_, **kwargs)
if use_deferred_shape:
self.assertEqual(None, actual.shape)
else:
self.assertAllEqual(expected.shape, actual.shape)
self.assertAllClose(expected, actual_, rtol=1e-8, atol=1e-9)
self.assertAllClose(x_, grad_actual_, rtol=1e-8, atol=1e-9)
@test_util.run_deprecated_v1
def testCorrectlyMakes1x1TriLower(self):
self._run_test(self._rng.randn(3, int(1*2/2)))
@test_util.run_deprecated_v1
def testCorrectlyMakesNoBatchTriLower(self):
self._run_test(self._rng.randn(int(4*5/2)))
@test_util.run_deprecated_v1
def testCorrectlyMakesBatchTriLower(self):
self._run_test(self._rng.randn(2, 3, int(3*4/2)))
@test_util.run_deprecated_v1
def testCorrectlyMakesBatchTriLowerUnknownShape(self):
self._run_test(self._rng.randn(2, 3, int(3*4/2)), use_deferred_shape=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatch7x7TriLowerUnknownShape(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)), use_deferred_shape=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatch7x7TriLower(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)))
@test_util.run_deprecated_v1
def testCorrectlyMakes1x1TriUpper(self):
self._run_test(self._rng.randn(3, int(1*2/2)), upper=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesNoBatchTriUpper(self):
self._run_test(self._rng.randn(int(4*5/2)), upper=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatchTriUpper(self):
self._run_test(self._rng.randn(2, 2, int(3*4/2)), upper=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatchTriUpperUnknownShape(self):
self._run_test(self._rng.randn(2, 2, int(3*4/2)),
use_deferred_shape=True,
upper=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatch7x7TriUpperUnknownShape(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)),
use_deferred_shape=True,
upper=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatch7x7TriUpper(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)), upper=True)
class FillTriangularInverseTest(FillTriangularTest):
def _run_test(self, x_, use_deferred_shape=False, **kwargs):
x_ = np.asarray(x_)
with self.cached_session() as sess:
static_shape = None if use_deferred_shape else x_.shape
x_pl = array_ops.placeholder_with_default(x_, shape=static_shape)
zeros_like_x_pl = (x_pl * array_ops.stop_gradient(x_pl - 1.)
- array_ops.stop_gradient(x_pl * (x_pl - 1.)))
x = x_pl + zeros_like_x_pl
actual = du.fill_triangular(x, **kwargs)
inverse_actual = du.fill_triangular_inverse(actual, **kwargs)
inverse_actual_ = sess.run(
inverse_actual,
feed_dict={x_pl: x_})
if use_deferred_shape:
self.assertEqual(None, inverse_actual.shape)
else:
self.assertAllEqual(x_.shape, inverse_actual.shape)
self.assertAllEqual(x_, inverse_actual_)
class ReduceWeightedLogSumExp(test.TestCase):
def _reduce_weighted_logsumexp(self, logx, w, axis, keep_dims=False):
m = np.max(logx, axis=axis, keepdims=True)
sum_ = np.sum(w * np.exp(logx - m), axis=axis, keepdims=keep_dims)
sgn = np.sign(sum_)
if not keep_dims:
m = np.squeeze(m, axis=axis)
return m + np.log(sgn * sum_), sgn
@test_util.run_deprecated_v1
def testNoWeights(self):
logx_ = np.array([[0., -1, 1000.],
[0, 1, -1000.],
[-5, 0, 5]])
with self.cached_session() as sess:
logx = constant_op.constant(logx_)
expected = math_ops.reduce_logsumexp(logx, axis=-1)
grad_expected = gradients_impl.gradients(expected, logx)[0]
actual, actual_sgn = du.reduce_weighted_logsumexp(
logx, axis=-1, return_sign=True)
grad_actual = gradients_impl.gradients(actual, logx)[0]
[actual_, actual_sgn_, grad_actual_,
expected_, grad_expected_] = sess.run([
actual, actual_sgn, grad_actual,
expected, grad_expected])
self.assertAllEqual(expected_, actual_)
self.assertAllEqual(grad_expected_, grad_actual_)
self.assertAllEqual([1., 1, 1], actual_sgn_)
def testNegativeWeights(self):
logx_ = np.array([[0., -1, 1000.],
[0, 1, -1000.],
[-5, 0, 5]])
w_ = np.array([[1., 1, -1],
[1, -2, 1],
[1, 0, 1]])
expected, _ = self._reduce_weighted_logsumexp(logx_, w_, axis=-1)
with self.cached_session() as sess:
logx = constant_op.constant(logx_)
w = constant_op.constant(w_)
actual, actual_sgn = du.reduce_weighted_logsumexp(
logx, w, axis=-1, return_sign=True)
[actual_, actual_sgn_] = self.evaluate([actual, actual_sgn])
self.assertAllEqual(expected, actual_)
self.assertAllEqual([-1., -1, 1], actual_sgn_)
def testKeepDims(self):
logx_ = np.array([[0., -1, 1000.],
[0, 1, -1000.],
[-5, 0, 5]])
w_ = np.array([[1., 1, -1],
[1, -2, 1],
[1, 0, 1]])
expected, _ = self._reduce_weighted_logsumexp(
logx_, w_, axis=-1, keep_dims=True)
with self.cached_session() as sess:
logx = constant_op.constant(logx_)
w = constant_op.constant(w_)
actual, actual_sgn = du.reduce_weighted_logsumexp(
logx, w, axis=-1, return_sign=True, keep_dims=True)
[actual_, actual_sgn_] = self.evaluate([actual, actual_sgn])
self.assertAllEqual(expected, actual_)
self.assertAllEqual([[-1.], [-1], [1]], actual_sgn_)
def testDocString(self):
"""This test verifies the correctness of the docstring examples."""
with self.cached_session():
x = constant_op.constant([[0., 0, 0],
[0, 0, 0]])
w = constant_op.constant([[-1., 1, 1],
[1, 1, 1]])
self.assertAllClose(
np.log(4), self.evaluate(du.reduce_weighted_logsumexp(x, w)))
with np.errstate(divide="ignore"):
self.assertAllClose(
np.log([0, 2, 2]),
self.evaluate(du.reduce_weighted_logsumexp(x, w, axis=0)))
self.assertAllClose(
np.log([1, 3]),
self.evaluate(du.reduce_weighted_logsumexp(x, w, axis=1)))
self.assertAllClose(
np.log([[1], [3]]),
self.evaluate(
du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True)))
self.assertAllClose(
np.log(4),
self.evaluate(du.reduce_weighted_logsumexp(x, w, axis=[0, 1])))
class GenNewSeedTest(test.TestCase):
def testOnlyNoneReturnsNone(self):
self.assertFalse(du.gen_new_seed(0, "salt") is None)
self.assertTrue(du.gen_new_seed(None, "salt") is None)
# TODO(jvdillon): Merge this test back into:
# tensorflow/python/kernel_tests/softplus_op_test.py
# once TF core is accepting new ops.
class SoftplusTest(test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_features = np.asarray(np_features)
np_softplus = self._npSoftplus(np_features)
with self.session(use_gpu=use_gpu) as sess:
softplus = nn_ops.softplus(np_features)
softplus_inverse = du.softplus_inverse(softplus)
[tf_softplus, tf_softplus_inverse] = sess.run([
softplus, softplus_inverse])
self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
rtol = {"float16": 0.07, "float32": 0.003, "float64": 0.002}.get(
str(np_features.dtype), 1e-6)
# This will test that we correctly computed the inverse by verifying we
# recovered the original input.
self.assertAllCloseAccordingToType(
np_features, tf_softplus_inverse,
atol=0., rtol=rtol)
self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
tf_softplus > 0)
self.assertShapeEqual(np_softplus, softplus)
self.assertShapeEqual(np_softplus, softplus_inverse)
self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
np.isfinite(tf_softplus))
self.assertAllEqual(np.ones_like(tf_softplus_inverse).astype(np.bool),
np.isfinite(tf_softplus_inverse))
@test_util.run_deprecated_v1
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
lower = {np.float16: -15, np.float32: -50, np.float64: -50}.get(t, -100)
upper = {np.float16: 50, np.float32: 50, np.float64: 50}.get(t, 100)
self._testSoftplus(
np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(
[2, -1]),
use_gpu=False)
self._testSoftplus(
np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(
[2, -1]),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True)
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
tf_logging.vlog(2, "softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testInverseSoftplusGradientNeverNan(self):
with self.cached_session():
# Note that this range contains both zero and inf.
x = constant_op.constant(np.logspace(-8, 6).astype(np.float16))
y = du.softplus_inverse(x)
grads = self.evaluate(gradients_impl.gradients(y, x)[0])
# Equivalent to `assertAllFalse` (if it existed).
self.assertAllEqual(np.zeros_like(grads).astype(np.bool), np.isnan(grads))
@test_util.run_deprecated_v1
def testInverseSoftplusGradientFinite(self):
with self.cached_session():
# This range of x is all finite, and so is 1 / x. So the
# gradient and its approximations should be finite as well.
x = constant_op.constant(np.logspace(-4.8, 4.5).astype(np.float16))
y = du.softplus_inverse(x)
grads = self.evaluate(gradients_impl.gradients(y, x)[0])
# Equivalent to `assertAllTrue` (if it existed).
self.assertAllEqual(
np.ones_like(grads).astype(np.bool), np.isfinite(grads))
@test_util.run_all_in_graph_and_eager_modes
class ArgumentsTest(test.TestCase):
def testNoArguments(self):
def foo():
return du.parent_frame_arguments()
self.assertEqual({}, foo())
def testPositionalArguments(self):
def foo(a, b, c, d): # pylint: disable=unused-argument
return du.parent_frame_arguments()
self.assertEqual({"a": 1, "b": 2, "c": 3, "d": 4}, foo(1, 2, 3, 4))
# Tests that it does not matter where this function is called, and
# no other local variables are returned back.
def bar(a, b, c):
unused_x = a * b
unused_y = c * 3
return du.parent_frame_arguments()
self.assertEqual({"a": 1, "b": 2, "c": 3}, bar(1, 2, 3))
def testOverloadedArgumentValues(self):
def foo(a, b, c): # pylint: disable=unused-argument
a = 42
b = 31
c = 42
return du.parent_frame_arguments()
self.assertEqual({"a": 42, "b": 31, "c": 42}, foo(1, 2, 3))
def testKeywordArguments(self):
def foo(**kwargs): # pylint: disable=unused-argument
return du.parent_frame_arguments()
self.assertEqual({"a": 1, "b": 2, "c": 3, "d": 4}, foo(a=1, b=2, c=3, d=4))
def testPositionalKeywordArgs(self):
def foo(a, b, c, **kwargs): # pylint: disable=unused-argument
return du.parent_frame_arguments()
self.assertEqual({"a": 1, "b": 2, "c": 3}, foo(a=1, b=2, c=3))
self.assertEqual({"a": 1, "b": 2, "c": 3, "unicorn": None},
foo(a=1, b=2, c=3, unicorn=None))
def testNoVarargs(self):
def foo(a, b, c, *varargs, **kwargs): # pylint: disable=unused-argument
return du.parent_frame_arguments()
self.assertEqual({"a": 1, "b": 2, "c": 3}, foo(a=1, b=2, c=3))
self.assertEqual({"a": 1, "b": 2, "c": 3}, foo(1, 2, 3, *[1, 2, 3]))
self.assertEqual({"a": 1, "b": 2, "c": 3, "unicorn": None},
foo(1, 2, 3, unicorn=None))
self.assertEqual({"a": 1, "b": 2, "c": 3, "unicorn": None},
foo(1, 2, 3, *[1, 2, 3], unicorn=None))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/util_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Student t distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import math
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import student_t
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
@test_util.run_all_in_graph_and_eager_modes
class StudentTTest(test.TestCase):
def testStudentPDFAndLogPDF(self):
batch_size = 6
df = constant_op.constant([3.] * batch_size)
mu = constant_op.constant([7.] * batch_size)
sigma = constant_op.constant([8.] * batch_size)
df_v = 3.
mu_v = 7.
sigma_v = 8.
t = np.array([-2.5, 2.5, 8., 0., -1., 2.], dtype=np.float32)
student = student_t.StudentT(df, loc=mu, scale=-sigma)
log_pdf = student.log_prob(t)
self.assertEquals(log_pdf.get_shape(), (6,))
log_pdf_values = self.evaluate(log_pdf)
pdf = student.prob(t)
self.assertEquals(pdf.get_shape(), (6,))
pdf_values = self.evaluate(pdf)
if not stats:
return
expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v)
expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.log(expected_pdf), log_pdf_values)
self.assertAllClose(expected_pdf, pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testStudentLogPDFMultidimensional(self):
batch_size = 6
df = constant_op.constant([[1.5, 7.2]] * batch_size)
mu = constant_op.constant([[3., -3.]] * batch_size)
sigma = constant_op.constant(
[[-math.sqrt(10.), math.sqrt(15.)]] * batch_size)
df_v = np.array([1.5, 7.2])
mu_v = np.array([3., -3.])
sigma_v = np.array([np.sqrt(10.), np.sqrt(15.)])
t = np.array([[-2.5, 2.5, 4., 0., -1., 2.]], dtype=np.float32).T
student = student_t.StudentT(df, loc=mu, scale=sigma)
log_pdf = student.log_prob(t)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
pdf = student.prob(t)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
if not stats:
return
expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v)
expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.log(expected_pdf), log_pdf_values)
self.assertAllClose(expected_pdf, pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testStudentCDFAndLogCDF(self):
batch_size = 6
df = constant_op.constant([3.] * batch_size)
mu = constant_op.constant([7.] * batch_size)
sigma = constant_op.constant([-8.] * batch_size)
df_v = 3.
mu_v = 7.
sigma_v = 8.
t = np.array([-2.5, 2.5, 8., 0., -1., 2.], dtype=np.float32)
student = student_t.StudentT(df, loc=mu, scale=sigma)
log_cdf = student.log_cdf(t)
self.assertEquals(log_cdf.get_shape(), (6,))
log_cdf_values = self.evaluate(log_cdf)
cdf = student.cdf(t)
self.assertEquals(cdf.get_shape(), (6,))
cdf_values = self.evaluate(cdf)
if not stats:
return
expected_log_cdf = stats.t.logcdf(t, df_v, loc=mu_v, scale=sigma_v)
expected_cdf = stats.t.cdf(t, df_v, loc=mu_v, scale=sigma_v)
self.assertAllClose(expected_log_cdf, log_cdf_values, atol=0., rtol=1e-5)
self.assertAllClose(
np.log(expected_cdf), log_cdf_values, atol=0., rtol=1e-5)
self.assertAllClose(expected_cdf, cdf_values, atol=0., rtol=1e-5)
self.assertAllClose(
np.exp(expected_log_cdf), cdf_values, atol=0., rtol=1e-5)
def testStudentEntropy(self):
df_v = np.array([[2., 3., 7.]]) # 1x3
mu_v = np.array([[1., -1, 0]]) # 1x3
sigma_v = np.array([[1., -2., 3.]]).T # transposed => 3x1
student = student_t.StudentT(df=df_v, loc=mu_v, scale=sigma_v)
ent = student.entropy()
ent_values = self.evaluate(ent)
# Help scipy broadcast to 3x3
ones = np.array([[1, 1, 1]])
sigma_bc = np.abs(sigma_v) * ones
mu_bc = ones.T * mu_v
df_bc = ones.T * df_v
if not stats:
return
expected_entropy = stats.t.entropy(
np.reshape(df_bc, [-1]),
loc=np.reshape(mu_bc, [-1]),
scale=np.reshape(sigma_bc, [-1]))
expected_entropy = np.reshape(expected_entropy, df_bc.shape)
self.assertAllClose(expected_entropy, ent_values)
def testStudentSample(self):
df = constant_op.constant(4.)
mu = constant_op.constant(3.)
sigma = constant_op.constant(-math.sqrt(10.))
df_v = 4.
mu_v = 3.
sigma_v = np.sqrt(10.)
n = constant_op.constant(200000)
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
samples = student.sample(n, seed=123456)
sample_values = self.evaluate(samples)
n_val = 200000
self.assertEqual(sample_values.shape, (n_val,))
self.assertAllClose(sample_values.mean(), mu_v, rtol=0.1, atol=0)
self.assertAllClose(
sample_values.var(), sigma_v**2 * df_v / (df_v - 2), rtol=0.1, atol=0)
self._checkKLApprox(df_v, mu_v, sigma_v, sample_values)
# Test that sampling with the same seed twice gives the same results.
def testStudentSampleMultipleTimes(self):
df = constant_op.constant(4.)
mu = constant_op.constant(3.)
sigma = constant_op.constant(math.sqrt(10.))
n = constant_op.constant(100)
random_seed.set_random_seed(654321)
student = student_t.StudentT(df=df, loc=mu, scale=sigma, name="student_t1")
samples1 = self.evaluate(student.sample(n, seed=123456))
random_seed.set_random_seed(654321)
student2 = student_t.StudentT(df=df, loc=mu, scale=sigma, name="student_t2")
samples2 = self.evaluate(student2.sample(n, seed=123456))
self.assertAllClose(samples1, samples2)
def testStudentSampleSmallDfNoNan(self):
df_v = [1e-1, 1e-5, 1e-10, 1e-20]
df = constant_op.constant(df_v)
n = constant_op.constant(200000)
student = student_t.StudentT(df=df, loc=1., scale=1.)
samples = student.sample(n, seed=123456)
sample_values = self.evaluate(samples)
n_val = 200000
self.assertEqual(sample_values.shape, (n_val, 4))
self.assertTrue(np.all(np.logical_not(np.isnan(sample_values))))
def testStudentSampleMultiDimensional(self):
batch_size = 7
df = constant_op.constant([[5., 7.]] * batch_size)
mu = constant_op.constant([[3., -3.]] * batch_size)
sigma = constant_op.constant(
[[math.sqrt(10.), math.sqrt(15.)]] * batch_size)
df_v = [5., 7.]
mu_v = [3., -3.]
sigma_v = [np.sqrt(10.), np.sqrt(15.)]
n = constant_op.constant(200000)
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
samples = student.sample(n, seed=123456)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (200000, batch_size, 2))
self.assertAllClose(
sample_values[:, 0, 0].mean(), mu_v[0], rtol=0.1, atol=0)
self.assertAllClose(
sample_values[:, 0, 0].var(),
sigma_v[0]**2 * df_v[0] / (df_v[0] - 2),
rtol=0.2,
atol=0)
self._checkKLApprox(df_v[0], mu_v[0], sigma_v[0], sample_values[:, 0, 0])
self.assertAllClose(
sample_values[:, 0, 1].mean(), mu_v[1], rtol=0.1, atol=0)
self.assertAllClose(
sample_values[:, 0, 1].var(),
sigma_v[1]**2 * df_v[1] / (df_v[1] - 2),
rtol=0.2,
atol=0)
self._checkKLApprox(df_v[1], mu_v[1], sigma_v[1], sample_values[:, 0, 1])
def _checkKLApprox(self, df, mu, sigma, samples):
n = samples.size
np.random.seed(137)
if not stats:
return
sample_scipy = stats.t.rvs(df, loc=mu, scale=sigma, size=n)
covg = 0.99
r = stats.t.interval(covg, df, loc=mu, scale=sigma)
bins = 100
hist, _ = np.histogram(samples, bins=bins, range=r)
hist_scipy, _ = np.histogram(sample_scipy, bins=bins, range=r)
self.assertGreater(hist.sum(), n * (covg - .01))
self.assertGreater(hist_scipy.sum(), n * (covg - .01))
hist_min1 = hist + 1. # put at least one item in each bucket
hist_norm = hist_min1 / hist_min1.sum()
hist_scipy_min1 = hist_scipy + 1. # put at least one item in each bucket
hist_scipy_norm = hist_scipy_min1 / hist_scipy_min1.sum()
kl_appx = np.sum(np.log(hist_scipy_norm / hist_norm) * hist_scipy_norm)
self.assertLess(kl_appx, 1)
def testBroadcastingParams(self):
def _check(student):
self.assertEqual(student.mean().get_shape(), (3,))
self.assertEqual(student.variance().get_shape(), (3,))
self.assertEqual(student.entropy().get_shape(), (3,))
self.assertEqual(student.log_prob(2.).get_shape(), (3,))
self.assertEqual(student.prob(2.).get_shape(), (3,))
self.assertEqual(student.sample(37).get_shape(), (37, 3,))
_check(student_t.StudentT(df=[2., 3., 4.,], loc=2., scale=1.))
_check(student_t.StudentT(df=7., loc=[2., 3., 4.,], scale=1.))
_check(student_t.StudentT(df=7., loc=3., scale=[2., 3., 4.,]))
def testBroadcastingPdfArgs(self):
def _assert_shape(student, arg, shape):
self.assertEqual(student.log_prob(arg).get_shape(), shape)
self.assertEqual(student.prob(arg).get_shape(), shape)
def _check(student):
_assert_shape(student, 2., (3,))
xs = np.array([2., 3., 4.], dtype=np.float32)
_assert_shape(student, xs, (3,))
xs = np.array([xs])
_assert_shape(student, xs, (1, 3))
xs = xs.T
_assert_shape(student, xs, (3, 3))
_check(student_t.StudentT(df=[2., 3., 4.,], loc=2., scale=1.))
_check(student_t.StudentT(df=7., loc=[2., 3., 4.,], scale=1.))
_check(student_t.StudentT(df=7., loc=3., scale=[2., 3., 4.,]))
def _check2d(student):
_assert_shape(student, 2., (1, 3))
xs = np.array([2., 3., 4.], dtype=np.float32)
_assert_shape(student, xs, (1, 3))
xs = np.array([xs])
_assert_shape(student, xs, (1, 3))
xs = xs.T
_assert_shape(student, xs, (3, 3))
_check2d(student_t.StudentT(df=[[2., 3., 4.,]], loc=2., scale=1.))
_check2d(student_t.StudentT(df=7., loc=[[2., 3., 4.,]], scale=1.))
_check2d(student_t.StudentT(df=7., loc=3., scale=[[2., 3., 4.,]]))
def _check2d_rows(student):
_assert_shape(student, 2., (3, 1))
xs = np.array([2., 3., 4.], dtype=np.float32) # (3,)
_assert_shape(student, xs, (3, 3))
xs = np.array([xs]) # (1,3)
_assert_shape(student, xs, (3, 3))
xs = xs.T # (3,1)
_assert_shape(student, xs, (3, 1))
_check2d_rows(student_t.StudentT(df=[[2.], [3.], [4.]], loc=2., scale=1.))
_check2d_rows(student_t.StudentT(df=7., loc=[[2.], [3.], [4.]], scale=1.))
_check2d_rows(student_t.StudentT(df=7., loc=3., scale=[[2.], [3.], [4.]]))
def testMeanAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self):
mu = [1., 3.3, 4.4]
student = student_t.StudentT(df=[3., 5., 7.], loc=mu, scale=[3., 2., 1.])
mean = self.evaluate(student.mean())
self.assertAllClose([1., 3.3, 4.4], mean)
def testMeanAllowNanStatsIsFalseRaisesWhenBatchMemberIsUndefined(self):
mu = [1., 3.3, 4.4]
student = student_t.StudentT(
df=[0.5, 5., 7.], loc=mu, scale=[3., 2., 1.], allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
self.evaluate(student.mean())
def testMeanAllowNanStatsIsTrueReturnsNaNForUndefinedBatchMembers(self):
mu = [-2, 0., 1., 3.3, 4.4]
sigma = [5., 4., 3., 2., 1.]
student = student_t.StudentT(
df=[0.5, 1., 3., 5., 7.], loc=mu, scale=sigma, allow_nan_stats=True)
mean = self.evaluate(student.mean())
self.assertAllClose([np.nan, np.nan, 1., 3.3, 4.4], mean)
def testVarianceAllowNanStatsTrueReturnsNaNforUndefinedBatchMembers(self):
# df = 0.5 ==> undefined mean ==> undefined variance.
# df = 1.5 ==> infinite variance.
df = [0.5, 1.5, 3., 5., 7.]
mu = [-2, 0., 1., 3.3, 4.4]
sigma = [5., 4., 3., 2., 1.]
student = student_t.StudentT(
df=df, loc=mu, scale=sigma, allow_nan_stats=True)
var = self.evaluate(student.variance())
if not stats:
return
expected_var = [
stats.t.var(d, loc=m, scale=s) for (d, m, s) in zip(df, mu, sigma)
]
# Slicing off first element due to nan/inf mismatch in different SciPy
# versions.
self.assertAllClose(expected_var[1:], var[1:])
def testVarianceAllowNanStatsFalseGivesCorrectValueForDefinedBatchMembers(
self):
# df = 1.5 ==> infinite variance.
df = [1.5, 3., 5., 7.]
mu = [0., 1., 3.3, 4.4]
sigma = [4., 3., 2., 1.]
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
var = self.evaluate(student.variance())
if not stats:
return
expected_var = [
stats.t.var(d, loc=m, scale=s) for (d, m, s) in zip(df, mu, sigma)
]
self.assertAllClose(expected_var, var)
def testVarianceAllowNanStatsFalseRaisesForUndefinedBatchMembers(self):
# df <= 1 ==> variance not defined
student = student_t.StudentT(df=1., loc=0., scale=1., allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
self.evaluate(student.variance())
# df <= 1 ==> variance not defined
student = student_t.StudentT(
df=0.5, loc=0., scale=1., allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
self.evaluate(student.variance())
def testStd(self):
# Defined for all batch members.
df = [3.5, 5., 3., 5., 7.]
mu = [-2.2]
sigma = [5., 4., 3., 2., 1.]
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
# Test broadcast of mu across shape of df/sigma
stddev = self.evaluate(student.stddev())
mu *= len(df)
if not stats:
return
expected_stddev = [
stats.t.std(d, loc=m, scale=s) for (d, m, s) in zip(df, mu, sigma)
]
self.assertAllClose(expected_stddev, stddev)
def testMode(self):
df = [0.5, 1., 3]
mu = [-1, 0., 1]
sigma = [5., 4., 3.]
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
# Test broadcast of mu across shape of df/sigma
mode = self.evaluate(student.mode())
self.assertAllClose([-1., 0, 1], mode)
def testPdfOfSample(self):
student = student_t.StudentT(df=3., loc=np.pi, scale=1.)
num = 20000
samples = student.sample(num, seed=123456)
pdfs = student.prob(samples)
mean = student.mean()
mean_pdf = student.prob(student.mean())
sample_vals, pdf_vals, mean_val, mean_pdf_val = self.evaluate(
[samples, pdfs, student.mean(), mean_pdf])
self.assertEqual(samples.get_shape(), (num,))
self.assertEqual(pdfs.get_shape(), (num,))
self.assertEqual(mean.get_shape(), ())
self.assertNear(np.pi, np.mean(sample_vals), err=0.1)
self.assertNear(np.pi, mean_val, err=1e-6)
# Verify integral over sample*pdf ~= 1.
# Tolerance increased since eager was getting a value of 1.002041.
self._assertIntegral(sample_vals, pdf_vals, err=5e-2)
if not stats:
return
self.assertNear(stats.t.pdf(np.pi, 3., loc=np.pi), mean_pdf_val, err=1e-6)
def testFullyReparameterized(self):
df = constant_op.constant(2.0)
mu = constant_op.constant(1.0)
sigma = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
tape.watch(df)
tape.watch(mu)
tape.watch(sigma)
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
samples = student.sample(100)
grad_df, grad_mu, grad_sigma = tape.gradient(samples, [df, mu, sigma])
self.assertIsNotNone(grad_df)
self.assertIsNotNone(grad_mu)
self.assertIsNotNone(grad_sigma)
def testPdfOfSampleMultiDims(self):
student = student_t.StudentT(df=[7., 11.], loc=[[5.], [6.]], scale=3.)
self.assertAllEqual([], student.event_shape)
self.assertAllEqual([], self.evaluate(student.event_shape_tensor()))
self.assertAllEqual([2, 2], student.batch_shape)
self.assertAllEqual([2, 2], self.evaluate(student.batch_shape_tensor()))
num = 50000
samples = student.sample(num, seed=123456)
pdfs = student.prob(samples)
sample_vals, pdf_vals = self.evaluate([samples, pdfs])
self.assertEqual(samples.get_shape(), (num, 2, 2))
self.assertEqual(pdfs.get_shape(), (num, 2, 2))
self.assertNear(5., np.mean(sample_vals[:, 0, :]), err=0.1)
self.assertNear(6., np.mean(sample_vals[:, 1, :]), err=0.1)
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.05)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.05)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.05)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.05)
if not stats:
return
self.assertNear(
stats.t.var(7., loc=0., scale=3.), # loc d.n. effect var
np.var(sample_vals[:, :, 0]),
err=1.0)
self.assertNear(
stats.t.var(11., loc=0., scale=3.), # loc d.n. effect var
np.var(sample_vals[:, :, 1]),
err=1.0)
def _assertIntegral(self, sample_vals, pdf_vals, err=1.5e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (sample_vals.min() - 1000, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testNegativeDofFails(self):
with self.assertRaisesOpError(r"Condition x > 0 did not hold"):
student = student_t.StudentT(
df=[2, -5.], loc=0., scale=1., validate_args=True, name="S")
self.evaluate(student.mean())
def testStudentTWithAbsDfSoftplusScale(self):
df = constant_op.constant([-3.2, -4.6])
mu = constant_op.constant([-4.2, 3.4])
sigma = constant_op.constant([-6.4, -8.8])
student = student_t.StudentTWithAbsDfSoftplusScale(
df=df, loc=mu, scale=sigma)
self.assertAllClose(
math_ops.floor(self.evaluate(math_ops.abs(df))),
self.evaluate(student.df))
self.assertAllClose(self.evaluate(mu), self.evaluate(student.loc))
self.assertAllClose(
self.evaluate(nn_ops.softplus(sigma)), self.evaluate(student.scale))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/student_t_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Bernoulli distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import bernoulli
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
special = try_import("scipy.special")
def make_bernoulli(batch_shape, dtype=dtypes.int32):
p = np.random.uniform(size=list(batch_shape))
p = constant_op.constant(p, dtype=dtypes.float32)
return bernoulli.Bernoulli(probs=p, dtype=dtype)
def entropy(p):
q = 1. - p
return -q * np.log(q) - p * np.log(p)
class BernoulliTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testP(self):
p = [0.2, 0.4]
dist = bernoulli.Bernoulli(probs=p)
self.assertAllClose(p, self.evaluate(dist.probs))
@test_util.run_in_graph_and_eager_modes
def testLogits(self):
logits = [-42., 42.]
dist = bernoulli.Bernoulli(logits=logits)
self.assertAllClose(logits, self.evaluate(dist.logits))
if not special:
return
self.assertAllClose(special.expit(logits), self.evaluate(dist.probs))
p = [0.01, 0.99, 0.42]
dist = bernoulli.Bernoulli(probs=p)
self.assertAllClose(special.logit(p), self.evaluate(dist.logits))
@test_util.run_in_graph_and_eager_modes
def testInvalidP(self):
invalid_ps = [1.01, 2.]
for p in invalid_ps:
with self.assertRaisesOpError("probs has components greater than 1"):
dist = bernoulli.Bernoulli(probs=p, validate_args=True)
self.evaluate(dist.probs)
invalid_ps = [-0.01, -3.]
for p in invalid_ps:
with self.assertRaisesOpError("Condition x >= 0"):
dist = bernoulli.Bernoulli(probs=p, validate_args=True)
self.evaluate(dist.probs)
valid_ps = [0.0, 0.5, 1.0]
for p in valid_ps:
dist = bernoulli.Bernoulli(probs=p)
self.assertEqual(p, self.evaluate(dist.probs)) # Should not fail
@test_util.run_in_graph_and_eager_modes
def testShapes(self):
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_bernoulli(batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape.as_list())
self.assertAllEqual(batch_shape, self.evaluate(dist.batch_shape_tensor()))
self.assertAllEqual([], dist.event_shape.as_list())
self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))
@test_util.run_in_graph_and_eager_modes
def testDtype(self):
dist = make_bernoulli([])
self.assertEqual(dist.dtype, dtypes.int32)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
self.assertEqual(dist.probs.dtype, dist.mean().dtype)
self.assertEqual(dist.probs.dtype, dist.variance().dtype)
self.assertEqual(dist.probs.dtype, dist.stddev().dtype)
self.assertEqual(dist.probs.dtype, dist.entropy().dtype)
self.assertEqual(dist.probs.dtype, dist.prob(0).dtype)
self.assertEqual(dist.probs.dtype, dist.prob(0.5).dtype)
self.assertEqual(dist.probs.dtype, dist.log_prob(0).dtype)
self.assertEqual(dist.probs.dtype, dist.log_prob(0.5).dtype)
dist64 = make_bernoulli([], dtypes.int64)
self.assertEqual(dist64.dtype, dtypes.int64)
self.assertEqual(dist64.dtype, dist64.sample(5).dtype)
self.assertEqual(dist64.dtype, dist64.mode().dtype)
@test_util.run_in_graph_and_eager_modes
def _testPmf(self, **kwargs):
dist = bernoulli.Bernoulli(**kwargs)
# pylint: disable=bad-continuation
xs = [
0,
[1],
[1, 0],
[[1, 0]],
[[1, 0], [1, 1]],
]
expected_pmfs = [
[[0.8, 0.6], [0.7, 0.4]],
[[0.2, 0.4], [0.3, 0.6]],
[[0.2, 0.6], [0.3, 0.4]],
[[0.2, 0.6], [0.3, 0.4]],
[[0.2, 0.6], [0.3, 0.6]],
]
# pylint: enable=bad-continuation
for x, expected_pmf in zip(xs, expected_pmfs):
self.assertAllClose(self.evaluate(dist.prob(x)), expected_pmf)
self.assertAllClose(self.evaluate(dist.log_prob(x)), np.log(expected_pmf))
@test_util.run_deprecated_v1
def testPmfCorrectBroadcastDynamicShape(self):
with self.cached_session():
p = array_ops.placeholder(dtype=dtypes.float32)
dist = bernoulli.Bernoulli(probs=p)
event1 = [1, 0, 1]
event2 = [[1, 0, 1]]
self.assertAllClose(
dist.prob(event1).eval({
p: [0.2, 0.3, 0.4]
}), [0.2, 0.7, 0.4])
self.assertAllClose(
dist.prob(event2).eval({
p: [0.2, 0.3, 0.4]
}), [[0.2, 0.7, 0.4]])
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def testPmfInvalid(self):
p = [0.1, 0.2, 0.7]
dist = bernoulli.Bernoulli(probs=p, validate_args=True)
with self.assertRaisesOpError("must be non-negative."):
self.evaluate(dist.prob([1, 1, -1]))
with self.assertRaisesOpError("Elements cannot exceed 1."):
self.evaluate(dist.prob([2, 0, 1]))
@test_util.run_in_graph_and_eager_modes
def testPmfWithP(self):
p = [[0.2, 0.4], [0.3, 0.6]]
self._testPmf(probs=p)
if not special:
return
self._testPmf(logits=special.logit(p))
@test_util.run_in_graph_and_eager_modes
def testPmfWithFloatArgReturnsXEntropy(self):
p = [[0.2], [0.4], [0.3], [0.6]]
samps = [0, 0.1, 0.8]
self.assertAllClose(
np.float32(samps) * np.log(np.float32(p)) +
(1 - np.float32(samps)) * np.log(1 - np.float32(p)),
self.evaluate(
bernoulli.Bernoulli(probs=p, validate_args=False).log_prob(samps)))
@test_util.run_deprecated_v1
def testBroadcasting(self):
with self.cached_session():
p = array_ops.placeholder(dtypes.float32)
dist = bernoulli.Bernoulli(probs=p)
self.assertAllClose(np.log(0.5), dist.log_prob(1).eval({p: 0.5}))
self.assertAllClose(
np.log([0.5, 0.5, 0.5]), dist.log_prob([1, 1, 1]).eval({
p: 0.5
}))
self.assertAllClose(
np.log([0.5, 0.5, 0.5]), dist.log_prob(1).eval({
p: [0.5, 0.5, 0.5]
}))
@test_util.run_deprecated_v1
def testPmfShapes(self):
with self.cached_session():
p = array_ops.placeholder(dtypes.float32, shape=[None, 1])
dist = bernoulli.Bernoulli(probs=p)
self.assertEqual(2, len(dist.log_prob(1).eval({p: [[0.5], [0.5]]}).shape))
dist = bernoulli.Bernoulli(probs=0.5)
self.assertEqual(2, len(self.evaluate(dist.log_prob([[1], [1]])).shape))
dist = bernoulli.Bernoulli(probs=0.5)
self.assertEqual((), dist.log_prob(1).get_shape())
self.assertEqual((1), dist.log_prob([1]).get_shape())
self.assertEqual((2, 1), dist.log_prob([[1], [1]]).get_shape())
dist = bernoulli.Bernoulli(probs=[[0.5], [0.5]])
self.assertEqual((2, 1), dist.log_prob(1).get_shape())
@test_util.run_in_graph_and_eager_modes
def testBoundaryConditions(self):
dist = bernoulli.Bernoulli(probs=1.0)
self.assertAllClose(np.nan, self.evaluate(dist.log_prob(0)))
self.assertAllClose([np.nan], [self.evaluate(dist.log_prob(1))])
@test_util.run_in_graph_and_eager_modes
def testEntropyNoBatch(self):
p = 0.2
dist = bernoulli.Bernoulli(probs=p)
self.assertAllClose(self.evaluate(dist.entropy()), entropy(p))
@test_util.run_in_graph_and_eager_modes
def testEntropyWithBatch(self):
p = [[0.1, 0.7], [0.2, 0.6]]
dist = bernoulli.Bernoulli(probs=p, validate_args=False)
self.assertAllClose(
self.evaluate(dist.entropy()),
[[entropy(0.1), entropy(0.7)], [entropy(0.2),
entropy(0.6)]])
@test_util.run_in_graph_and_eager_modes
def testSampleN(self):
p = [0.2, 0.6]
dist = bernoulli.Bernoulli(probs=p)
n = 100000
samples = dist.sample(n)
samples.set_shape([n, 2])
self.assertEqual(samples.dtype, dtypes.int32)
sample_values = self.evaluate(samples)
self.assertTrue(np.all(sample_values >= 0))
self.assertTrue(np.all(sample_values <= 1))
# Note that the standard error for the sample mean is ~ sqrt(p * (1 - p) /
# n). This means that the tolerance is very sensitive to the value of p
# as well as n.
self.assertAllClose(p, np.mean(sample_values, axis=0), atol=1e-2)
self.assertEqual(set([0, 1]), set(sample_values.flatten()))
# In this test we're just interested in verifying there isn't a crash
# owing to mismatched types. b/30940152
dist = bernoulli.Bernoulli(np.log([.2, .4]))
self.assertAllEqual((1, 2), dist.sample(1, seed=42).get_shape().as_list())
@test_util.run_in_graph_and_eager_modes
def testNotReparameterized(self):
p = constant_op.constant([0.2, 0.6])
with backprop.GradientTape() as tape:
tape.watch(p)
dist = bernoulli.Bernoulli(probs=p)
samples = dist.sample(100)
grad_p = tape.gradient(samples, p)
self.assertIsNone(grad_p)
@test_util.run_deprecated_v1
def testSampleActsLikeSampleN(self):
with self.cached_session() as sess:
p = [0.2, 0.6]
dist = bernoulli.Bernoulli(probs=p)
n = 1000
seed = 42
self.assertAllEqual(
self.evaluate(dist.sample(n, seed)),
self.evaluate(dist.sample(n, seed)))
n = array_ops.placeholder(dtypes.int32)
sample1, sample2 = sess.run([dist.sample(n, seed), dist.sample(n, seed)],
feed_dict={n: 1000})
self.assertAllEqual(sample1, sample2)
@test_util.run_in_graph_and_eager_modes
def testMean(self):
p = np.array([[0.2, 0.7], [0.5, 0.4]], dtype=np.float32)
dist = bernoulli.Bernoulli(probs=p)
self.assertAllEqual(self.evaluate(dist.mean()), p)
@test_util.run_in_graph_and_eager_modes
def testVarianceAndStd(self):
var = lambda p: p * (1. - p)
p = [[0.2, 0.7], [0.5, 0.4]]
dist = bernoulli.Bernoulli(probs=p)
self.assertAllClose(
self.evaluate(dist.variance()),
np.array([[var(0.2), var(0.7)], [var(0.5), var(0.4)]],
dtype=np.float32))
self.assertAllClose(
self.evaluate(dist.stddev()),
np.array([[np.sqrt(var(0.2)), np.sqrt(var(0.7))],
[np.sqrt(var(0.5)), np.sqrt(var(0.4))]],
dtype=np.float32))
@test_util.run_in_graph_and_eager_modes
def testBernoulliBernoulliKL(self):
batch_size = 6
a_p = np.array([0.5] * batch_size, dtype=np.float32)
b_p = np.array([0.4] * batch_size, dtype=np.float32)
a = bernoulli.Bernoulli(probs=a_p)
b = bernoulli.Bernoulli(probs=b_p)
kl = kullback_leibler.kl_divergence(a, b)
kl_val = self.evaluate(kl)
kl_expected = (a_p * np.log(a_p / b_p) + (1. - a_p) * np.log(
(1. - a_p) / (1. - b_p)))
self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/bernoulli_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Categorical distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal
from tensorflow.python.platform import test
def make_categorical(batch_shape, num_classes, dtype=dtypes.int32):
logits = random_ops.random_uniform(
list(batch_shape) + [num_classes], -10, 10, dtype=dtypes.float32) - 50.
return categorical.Categorical(logits, dtype=dtype)
class CategoricalTest(test.TestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def testP(self):
p = [0.2, 0.8]
dist = categorical.Categorical(probs=p)
with self.cached_session():
self.assertAllClose(p, dist.probs.eval())
self.assertAllEqual([2], dist.logits.get_shape())
@test_util.run_deprecated_v1
def testLogits(self):
p = np.array([0.2, 0.8], dtype=np.float32)
logits = np.log(p) - 50.
dist = categorical.Categorical(logits=logits)
with self.cached_session():
self.assertAllEqual([2], dist.probs.get_shape())
self.assertAllEqual([2], dist.logits.get_shape())
self.assertAllClose(dist.probs.eval(), p)
self.assertAllClose(dist.logits.eval(), logits)
@test_util.run_deprecated_v1
def testShapes(self):
with self.cached_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_categorical(batch_shape, 10)
self.assertAllEqual(batch_shape, dist.batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertEqual(10, dist.event_size.eval())
# event_size is available as a constant because the shape is
# known at graph build time.
self.assertEqual(10, tensor_util.constant_value(dist.event_size))
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_categorical(
batch_shape, constant_op.constant(
10, dtype=dtypes.int32))
self.assertAllEqual(len(batch_shape), dist.batch_shape.ndims)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertEqual(10, dist.event_size.eval())
def testDtype(self):
dist = make_categorical([], 5, dtype=dtypes.int32)
self.assertEqual(dist.dtype, dtypes.int32)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
dist = make_categorical([], 5, dtype=dtypes.int64)
self.assertEqual(dist.dtype, dtypes.int64)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
self.assertEqual(dist.probs.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dist.entropy().dtype)
self.assertEqual(
dist.logits.dtype, dist.prob(np.array(
0, dtype=np.int64)).dtype)
self.assertEqual(
dist.logits.dtype, dist.log_prob(np.array(
0, dtype=np.int64)).dtype)
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
dist = make_categorical([], 5, dtype=dtype)
self.assertEqual(dist.dtype, dtype)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
@test_util.run_deprecated_v1
def testUnknownShape(self):
with self.cached_session():
logits = array_ops.placeholder(dtype=dtypes.float32)
dist = categorical.Categorical(logits)
sample = dist.sample()
# Will sample class 1.
sample_value = sample.eval(feed_dict={logits: [-1000.0, 1000.0]})
self.assertEqual(1, sample_value)
# Batch entry 0 will sample class 1, batch entry 1 will sample class 0.
sample_value_batch = sample.eval(
feed_dict={logits: [[-1000.0, 1000.0], [1000.0, -1000.0]]})
self.assertAllEqual([1, 0], sample_value_batch)
@test_util.run_deprecated_v1
def testPMFWithBatch(self):
histograms = [[0.2, 0.8], [0.6, 0.4]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
with self.cached_session():
self.assertAllClose(dist.prob([0, 1]).eval(), [0.2, 0.4])
@test_util.run_deprecated_v1
def testPMFNoBatch(self):
histograms = [0.2, 0.8]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
with self.cached_session():
self.assertAllClose(dist.prob(0).eval(), 0.2)
@test_util.run_deprecated_v1
def testCDFWithDynamicEventShapeKnownNdims(self):
"""Test that dynamically-sized events with unknown shape work."""
batch_size = 2
histograms = array_ops.placeholder(dtype=dtypes.float32,
shape=(batch_size, None))
event = array_ops.placeholder(dtype=dtypes.float32, shape=(batch_size,))
dist = categorical.Categorical(probs=histograms)
cdf_op = dist.cdf(event)
# Feed values into the placeholder with different shapes
# three classes.
event_feed_one = [0, 1]
histograms_feed_one = [[0.5, 0.3, 0.2], [1.0, 0.0, 0.0]]
expected_cdf_one = [0.0, 1.0]
feed_dict_one = {
histograms: histograms_feed_one,
event: event_feed_one
}
# six classes.
event_feed_two = [2, 5]
histograms_feed_two = [[0.9, 0.0, 0.0, 0.0, 0.0, 0.1],
[0.15, 0.2, 0.05, 0.35, 0.13, 0.12]]
expected_cdf_two = [0.9, 0.88]
feed_dict_two = {
histograms: histograms_feed_two,
event: event_feed_two
}
with self.cached_session() as sess:
actual_cdf_one = sess.run(cdf_op, feed_dict=feed_dict_one)
actual_cdf_two = sess.run(cdf_op, feed_dict=feed_dict_two)
self.assertAllClose(actual_cdf_one, expected_cdf_one)
self.assertAllClose(actual_cdf_two, expected_cdf_two)
@parameterized.named_parameters(
("test1", [0, 1], [[0.5, 0.3, 0.2], [1.0, 0.0, 0.0]], [0.0, 1.0]),
("test2", [2, 5], [[0.9, 0.0, 0.0, 0.0, 0.0, 0.1],
[0.15, 0.2, 0.05, 0.35, 0.13, 0.12]], [0.9, 0.88]))
def testCDFWithDynamicEventShapeUnknownNdims(
self, events, histograms, expected_cdf):
"""Test that dynamically-sized events with unknown shape work."""
event_ph = array_ops.placeholder_with_default(events, shape=None)
histograms_ph = array_ops.placeholder_with_default(histograms, shape=None)
dist = categorical.Categorical(probs=histograms_ph)
cdf_op = dist.cdf(event_ph)
actual_cdf = self.evaluate(cdf_op)
self.assertAllClose(actual_cdf, expected_cdf)
@test_util.run_deprecated_v1
def testCDFWithBatch(self):
histograms = [[0.1, 0.2, 0.3, 0.25, 0.15],
[0.0, 0.75, 0.2, 0.05, 0.0]]
event = [0, 3]
expected_cdf = [0.0, 0.95]
dist = categorical.Categorical(probs=histograms)
cdf_op = dist.cdf(event)
with self.cached_session():
self.assertAllClose(cdf_op.eval(), expected_cdf)
@test_util.run_deprecated_v1
def testCDFNoBatch(self):
histogram = [0.1, 0.2, 0.3, 0.4]
event = 2
expected_cdf = 0.3
dist = categorical.Categorical(probs=histogram)
cdf_op = dist.cdf(event)
with self.cached_session():
self.assertAlmostEqual(cdf_op.eval(), expected_cdf)
@test_util.run_deprecated_v1
def testCDFBroadcasting(self):
# shape: [batch=2, n_bins=3]
histograms = [[0.2, 0.1, 0.7],
[0.3, 0.45, 0.25]]
# shape: [batch=3, batch=2]
devent = [
[0, 0],
[1, 1],
[2, 2]
]
dist = categorical.Categorical(probs=histograms)
# We test that the probabilities are correctly broadcasted over the
# additional leading batch dimension of size 3.
expected_cdf_result = np.zeros((3, 2))
expected_cdf_result[0, 0] = 0
expected_cdf_result[0, 1] = 0
expected_cdf_result[1, 0] = 0.2
expected_cdf_result[1, 1] = 0.3
expected_cdf_result[2, 0] = 0.3
expected_cdf_result[2, 1] = 0.75
with self.cached_session():
self.assertAllClose(dist.cdf(devent).eval(), expected_cdf_result)
def testBroadcastWithBatchParamsAndBiggerEvent(self):
## The parameters have a single batch dimension, and the event has two.
# param shape is [3 x 4], where 4 is the number of bins (non-batch dim).
cat_params_py = [
[0.2, 0.15, 0.35, 0.3],
[0.1, 0.05, 0.68, 0.17],
[0.1, 0.05, 0.68, 0.17]
]
# event shape = [5, 3], both are "batch" dimensions.
disc_event_py = [
[0, 1, 2],
[1, 2, 3],
[0, 0, 0],
[1, 1, 1],
[2, 1, 0]
]
# shape is [3]
normal_params_py = [
-10.0,
120.0,
50.0
]
# shape is [5, 3]
real_event_py = [
[-1.0, 0.0, 1.0],
[100.0, 101, -50],
[90, 90, 90],
[-4, -400, 20.0],
[0.0, 0.0, 0.0]
]
cat_params_tf = array_ops.constant(cat_params_py)
disc_event_tf = array_ops.constant(disc_event_py)
cat = categorical.Categorical(probs=cat_params_tf)
normal_params_tf = array_ops.constant(normal_params_py)
real_event_tf = array_ops.constant(real_event_py)
norm = normal.Normal(loc=normal_params_tf, scale=1.0)
# Check that normal and categorical have the same broadcasting behaviour.
to_run = {
"cat_prob": cat.prob(disc_event_tf),
"cat_log_prob": cat.log_prob(disc_event_tf),
"cat_cdf": cat.cdf(disc_event_tf),
"cat_log_cdf": cat.log_cdf(disc_event_tf),
"norm_prob": norm.prob(real_event_tf),
"norm_log_prob": norm.log_prob(real_event_tf),
"norm_cdf": norm.cdf(real_event_tf),
"norm_log_cdf": norm.log_cdf(real_event_tf),
}
with self.cached_session() as sess:
run_result = self.evaluate(to_run)
self.assertAllEqual(run_result["cat_prob"].shape,
run_result["norm_prob"].shape)
self.assertAllEqual(run_result["cat_log_prob"].shape,
run_result["norm_log_prob"].shape)
self.assertAllEqual(run_result["cat_cdf"].shape,
run_result["norm_cdf"].shape)
self.assertAllEqual(run_result["cat_log_cdf"].shape,
run_result["norm_log_cdf"].shape)
@test_util.run_deprecated_v1
def testLogPMF(self):
logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.
dist = categorical.Categorical(logits)
with self.cached_session():
self.assertAllClose(dist.log_prob([0, 1]).eval(), np.log([0.2, 0.4]))
self.assertAllClose(dist.log_prob([0.0, 1.0]).eval(), np.log([0.2, 0.4]))
@test_util.run_deprecated_v1
def testEntropyNoBatch(self):
logits = np.log([0.2, 0.8]) - 50.
dist = categorical.Categorical(logits)
with self.cached_session():
self.assertAllClose(dist.entropy().eval(),
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)))
@test_util.run_deprecated_v1
def testEntropyWithBatch(self):
logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.
dist = categorical.Categorical(logits)
with self.cached_session():
self.assertAllClose(dist.entropy().eval(), [
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)),
-(0.6 * np.log(0.6) + 0.4 * np.log(0.4))
])
@test_util.run_deprecated_v1
def testEntropyGradient(self):
with self.cached_session() as sess:
logits = constant_op.constant([[1., 2., 3.], [2., 5., 1.]])
probabilities = nn_ops.softmax(logits)
log_probabilities = nn_ops.log_softmax(logits)
true_entropy = - math_ops.reduce_sum(
probabilities * log_probabilities, axis=-1)
categorical_distribution = categorical.Categorical(probs=probabilities)
categorical_entropy = categorical_distribution.entropy()
# works
true_entropy_g = gradients_impl.gradients(true_entropy, [logits])
categorical_entropy_g = gradients_impl.gradients(
categorical_entropy, [logits])
res = sess.run({"true_entropy": true_entropy,
"categorical_entropy": categorical_entropy,
"true_entropy_g": true_entropy_g,
"categorical_entropy_g": categorical_entropy_g})
self.assertAllClose(res["true_entropy"],
res["categorical_entropy"])
self.assertAllClose(res["true_entropy_g"],
res["categorical_entropy_g"])
def testSample(self):
with self.cached_session():
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
n = 10000
samples = dist.sample(n, seed=123)
samples.set_shape([n, 1, 2])
self.assertEqual(samples.dtype, dtypes.int32)
sample_values = self.evaluate(samples)
self.assertFalse(np.any(sample_values < 0))
self.assertFalse(np.any(sample_values > 1))
self.assertAllClose(
[[0.2, 0.4]], np.mean(
sample_values == 0, axis=0), atol=1e-2)
self.assertAllClose(
[[0.8, 0.6]], np.mean(
sample_values == 1, axis=0), atol=1e-2)
def testSampleWithSampleShape(self):
with self.cached_session():
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
samples = dist.sample((100, 100), seed=123)
prob = dist.prob(samples)
prob_val = self.evaluate(prob)
self.assertAllClose(
[0.2**2 + 0.8**2], [prob_val[:, :, :, 0].mean()], atol=1e-2)
self.assertAllClose(
[0.4**2 + 0.6**2], [prob_val[:, :, :, 1].mean()], atol=1e-2)
def testNotReparameterized(self):
p = constant_op.constant([0.3, 0.3, 0.4])
with backprop.GradientTape() as tape:
tape.watch(p)
dist = categorical.Categorical(p)
samples = dist.sample(100)
grad_p = tape.gradient(samples, p)
self.assertIsNone(grad_p)
def testLogPMFBroadcasting(self):
with self.cached_session():
# 1 x 2 x 2
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
prob = dist.prob(1)
self.assertAllClose([[0.8, 0.6]], self.evaluate(prob))
prob = dist.prob([1])
self.assertAllClose([[0.8, 0.6]], self.evaluate(prob))
prob = dist.prob([0, 1])
self.assertAllClose([[0.2, 0.6]], self.evaluate(prob))
prob = dist.prob([[0, 1]])
self.assertAllClose([[0.2, 0.6]], self.evaluate(prob))
prob = dist.prob([[[0, 1]]])
self.assertAllClose([[[0.2, 0.6]]], self.evaluate(prob))
prob = dist.prob([[1, 0], [0, 1]])
self.assertAllClose([[0.8, 0.4], [0.2, 0.6]], self.evaluate(prob))
prob = dist.prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])
self.assertAllClose([[[0.8, 0.6], [0.8, 0.4]], [[0.8, 0.4], [0.2, 0.6]]],
self.evaluate(prob))
def testLogPMFShape(self):
with self.cached_session():
# shape [1, 2, 2]
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms))
log_prob = dist.log_prob([0, 1])
self.assertEqual(2, log_prob.get_shape().ndims)
self.assertAllEqual([1, 2], log_prob.get_shape())
log_prob = dist.log_prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])
self.assertEqual(3, log_prob.get_shape().ndims)
self.assertAllEqual([2, 2, 2], log_prob.get_shape())
def testLogPMFShapeNoBatch(self):
histograms = [0.2, 0.8]
dist = categorical.Categorical(math_ops.log(histograms))
log_prob = dist.log_prob(0)
self.assertEqual(0, log_prob.get_shape().ndims)
self.assertAllEqual([], log_prob.get_shape())
log_prob = dist.log_prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])
self.assertEqual(3, log_prob.get_shape().ndims)
self.assertAllEqual([2, 2, 2], log_prob.get_shape())
@test_util.run_deprecated_v1
def testMode(self):
with self.cached_session():
histograms = [[[0.2, 0.8], [0.6, 0.4]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
self.assertAllEqual(dist.mode().eval(), [[1, 0]])
@test_util.run_deprecated_v1
def testCategoricalCategoricalKL(self):
def np_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / exp_logits.sum(axis=-1, keepdims=True)
with self.cached_session() as sess:
for categories in [2, 4]:
for batch_size in [1, 10]:
a_logits = np.random.randn(batch_size, categories)
b_logits = np.random.randn(batch_size, categories)
a = categorical.Categorical(logits=a_logits)
b = categorical.Categorical(logits=b_logits)
kl = kullback_leibler.kl_divergence(a, b)
kl_val = self.evaluate(kl)
# Make sure KL(a||a) is 0
kl_same = sess.run(kullback_leibler.kl_divergence(a, a))
prob_a = np_softmax(a_logits)
prob_b = np_softmax(b_logits)
kl_expected = np.sum(prob_a * (np.log(prob_a) - np.log(prob_b)),
axis=-1)
self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/categorical_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Identity Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.ops.distributions import bijector_test_util
from tensorflow.python.ops.distributions import identity_bijector
from tensorflow.python.platform import test
class IdentityBijectorTest(test.TestCase):
"""Tests correctness of the Y = g(X) = X transformation."""
def testBijector(self):
bijector = identity_bijector.Identity(validate_args=True)
self.assertEqual("identity", bijector.name)
x = [[[0.], [1.]]]
self.assertAllEqual(x, self.evaluate(bijector.forward(x)))
self.assertAllEqual(x, self.evaluate(bijector.inverse(x)))
self.assertAllEqual(
0.,
self.evaluate(
bijector.inverse_log_det_jacobian(x, event_ndims=3)))
self.assertAllEqual(
0.,
self.evaluate(
bijector.forward_log_det_jacobian(x, event_ndims=3)))
@test_util.run_deprecated_v1
def testScalarCongruency(self):
with self.cached_session():
bijector = identity_bijector.Identity()
bijector_test_util.assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/identity_bijector_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class BaseBijectorTest(test.TestCase):
"""Tests properties of the Bijector base-class."""
def testIsAbstract(self):
with self.assertRaisesRegexp(TypeError,
("Can't instantiate abstract class Bijector "
"with abstract methods __init__")):
bijector.Bijector() # pylint: disable=abstract-class-instantiated
def testDefaults(self):
class _BareBonesBijector(bijector.Bijector):
"""Minimal specification of a `Bijector`."""
def __init__(self):
super(_BareBonesBijector, self).__init__(forward_min_event_ndims=0)
bij = _BareBonesBijector()
self.assertEqual([], bij.graph_parents)
self.assertEqual(False, bij.is_constant_jacobian)
self.assertEqual(False, bij.validate_args)
self.assertEqual(None, bij.dtype)
self.assertEqual("bare_bones_bijector", bij.name)
for shape in [[], [1, 2], [1, 2, 3]]:
forward_event_shape_ = self.evaluate(
bij.inverse_event_shape_tensor(shape))
inverse_event_shape_ = self.evaluate(
bij.forward_event_shape_tensor(shape))
self.assertAllEqual(shape, forward_event_shape_)
self.assertAllEqual(shape, bij.forward_event_shape(shape))
self.assertAllEqual(shape, inverse_event_shape_)
self.assertAllEqual(shape, bij.inverse_event_shape(shape))
with self.assertRaisesRegexp(
NotImplementedError, "inverse not implemented"):
bij.inverse(0)
with self.assertRaisesRegexp(
NotImplementedError, "forward not implemented"):
bij.forward(0)
with self.assertRaisesRegexp(
NotImplementedError, "inverse_log_det_jacobian not implemented"):
bij.inverse_log_det_jacobian(0, event_ndims=0)
with self.assertRaisesRegexp(
NotImplementedError, "forward_log_det_jacobian not implemented"):
bij.forward_log_det_jacobian(0, event_ndims=0)
class IntentionallyMissingError(Exception):
pass
class BrokenBijector(bijector.Bijector):
"""Forward and inverse are not inverses of each other."""
def __init__(
self, forward_missing=False, inverse_missing=False, validate_args=False):
super(BrokenBijector, self).__init__(
validate_args=validate_args, forward_min_event_ndims=0, name="broken")
self._forward_missing = forward_missing
self._inverse_missing = inverse_missing
def _forward(self, x):
if self._forward_missing:
raise IntentionallyMissingError
return 2 * x
def _inverse(self, y):
if self._inverse_missing:
raise IntentionallyMissingError
return y / 2.
def _inverse_log_det_jacobian(self, y): # pylint:disable=unused-argument
if self._inverse_missing:
raise IntentionallyMissingError
return -math_ops.log(2.)
def _forward_log_det_jacobian(self, x): # pylint:disable=unused-argument
if self._forward_missing:
raise IntentionallyMissingError
return math_ops.log(2.)
class BijectorTestEventNdims(test.TestCase):
def testBijectorNonIntegerEventNdims(self):
bij = BrokenBijector()
with self.assertRaisesRegexp(ValueError, "Expected integer"):
bij.forward_log_det_jacobian(1., event_ndims=1.5)
with self.assertRaisesRegexp(ValueError, "Expected integer"):
bij.inverse_log_det_jacobian(1., event_ndims=1.5)
def testBijectorArrayEventNdims(self):
bij = BrokenBijector()
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
bij.forward_log_det_jacobian(1., event_ndims=(1, 2))
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
bij.inverse_log_det_jacobian(1., event_ndims=(1, 2))
@test_util.run_deprecated_v1
def testBijectorDynamicEventNdims(self):
bij = BrokenBijector(validate_args=True)
event_ndims = array_ops.placeholder(dtype=np.int32, shape=None)
with self.cached_session():
with self.assertRaisesOpError("Expected scalar"):
bij.forward_log_det_jacobian(1., event_ndims=event_ndims).eval({
event_ndims: (1, 2)})
with self.assertRaisesOpError("Expected scalar"):
bij.inverse_log_det_jacobian(1., event_ndims=event_ndims).eval({
event_ndims: (1, 2)})
@six.add_metaclass(abc.ABCMeta)
class BijectorCachingTestBase(object):
@abc.abstractproperty
def broken_bijector_cls(self):
# return a BrokenBijector type Bijector, since this will test the caching.
raise IntentionallyMissingError("Not implemented")
def testCachingOfForwardResults(self):
broken_bijector = self.broken_bijector_cls(inverse_missing=True)
x = constant_op.constant(1.1)
# Call forward and forward_log_det_jacobian one-by-one (not together).
y = broken_bijector.forward(x)
_ = broken_bijector.forward_log_det_jacobian(x, event_ndims=0)
# Now, everything should be cached if the argument is y.
broken_bijector.inverse_log_det_jacobian(y, event_ndims=0)
try:
broken_bijector.inverse(y)
broken_bijector.inverse_log_det_jacobian(y, event_ndims=0)
except IntentionallyMissingError:
raise AssertionError("Tests failed! Cached values not used.")
# Different event_ndims should not be cached.
with self.assertRaises(IntentionallyMissingError):
broken_bijector.inverse_log_det_jacobian(y, event_ndims=1)
def testCachingOfInverseResults(self):
broken_bijector = self.broken_bijector_cls(forward_missing=True)
y = constant_op.constant(1.1)
# Call inverse and inverse_log_det_jacobian one-by-one (not together).
x = broken_bijector.inverse(y)
_ = broken_bijector.inverse_log_det_jacobian(y, event_ndims=0)
# Now, everything should be cached if the argument is x.
try:
broken_bijector.forward(x)
broken_bijector.forward_log_det_jacobian(x, event_ndims=0)
except IntentionallyMissingError:
raise AssertionError("Tests failed! Cached values not used.")
# Different event_ndims should not be cached.
with self.assertRaises(IntentionallyMissingError):
broken_bijector.forward_log_det_jacobian(x, event_ndims=1)
class BijectorCachingTest(BijectorCachingTestBase, test.TestCase):
"""Test caching with BrokenBijector."""
@property
def broken_bijector_cls(self):
return BrokenBijector
class ExpOnlyJacobian(bijector.Bijector):
"""Only used for jacobian calculations."""
def __init__(self, forward_min_event_ndims=0):
super(ExpOnlyJacobian, self).__init__(
validate_args=False,
is_constant_jacobian=False,
forward_min_event_ndims=forward_min_event_ndims,
name="exp")
def _inverse_log_det_jacobian(self, y):
return -math_ops.log(y)
def _forward_log_det_jacobian(self, x):
return math_ops.log(x)
class ConstantJacobian(bijector.Bijector):
"""Only used for jacobian calculations."""
def __init__(self, forward_min_event_ndims=0):
super(ConstantJacobian, self).__init__(
validate_args=False,
is_constant_jacobian=True,
forward_min_event_ndims=forward_min_event_ndims,
name="c")
def _inverse_log_det_jacobian(self, y):
return constant_op.constant(2., y.dtype)
def _forward_log_det_jacobian(self, x):
return constant_op.constant(-2., x.dtype)
class BijectorReduceEventDimsTest(test.TestCase):
"""Test caching with BrokenBijector."""
def testReduceEventNdimsForward(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian()
self.assertAllClose(
np.log(x),
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
np.sum(np.log(x), axis=-1),
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
np.sum(np.log(x), axis=(-1, -2)),
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=2)))
def testReduceEventNdimsForwardRaiseError(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian(forward_min_event_ndims=1)
with self.assertRaisesRegexp(ValueError, "must be larger than"):
bij.forward_log_det_jacobian(x, event_ndims=0)
def testReduceEventNdimsInverse(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian()
self.assertAllClose(
-np.log(x),
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
np.sum(-np.log(x), axis=-1),
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
np.sum(-np.log(x), axis=(-1, -2)),
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=2)))
def testReduceEventNdimsInverseRaiseError(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian(forward_min_event_ndims=1)
with self.assertRaisesRegexp(ValueError, "must be larger than"):
bij.inverse_log_det_jacobian(x, event_ndims=0)
def testReduceEventNdimsForwardConstJacobian(self):
x = [[[1., 2.], [3., 4.]]]
bij = ConstantJacobian()
self.assertAllClose(
-2.,
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
-4.,
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
-8.,
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=2)))
def testReduceEventNdimsInverseConstJacobian(self):
x = [[[1., 2.], [3., 4.]]]
bij = ConstantJacobian()
self.assertAllClose(
2.,
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
4.,
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
8.,
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=2)))
@test_util.run_deprecated_v1
def testHandlesNonStaticEventNdims(self):
x_ = [[[1., 2.], [3., 4.]]]
x = array_ops.placeholder_with_default(x_, shape=None)
event_ndims = array_ops.placeholder(dtype=np.int32, shape=[])
bij = ExpOnlyJacobian(forward_min_event_ndims=1)
bij.inverse_log_det_jacobian(x, event_ndims=event_ndims)
with self.cached_session() as sess:
ildj = sess.run(bij.inverse_log_det_jacobian(x, event_ndims=event_ndims),
feed_dict={event_ndims: 1})
self.assertAllClose(-np.log(x_), ildj)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/bijector_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import dirichlet_multinomial
from tensorflow.python.platform import test
ds = dirichlet_multinomial
class DirichletMultinomialTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
@test_util.run_deprecated_v1
def testSimpleShapes(self):
with self.cached_session():
alpha = np.random.rand(3)
dist = ds.DirichletMultinomial(1., alpha)
self.assertEqual(3, dist.event_shape_tensor().eval())
self.assertAllEqual([], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape)
@test_util.run_deprecated_v1
def testComplexShapes(self):
with self.cached_session():
alpha = np.random.rand(3, 2, 2)
n = [[3., 2], [4, 5], [6, 7]]
dist = ds.DirichletMultinomial(n, alpha)
self.assertEqual(2, dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape)
@test_util.run_deprecated_v1
def testNproperty(self):
alpha = [[1., 2, 3]]
n = [[5.]]
with self.cached_session():
dist = ds.DirichletMultinomial(n, alpha)
self.assertEqual([1, 1], dist.total_count.get_shape())
self.assertAllClose(n, dist.total_count.eval())
@test_util.run_deprecated_v1
def testAlphaProperty(self):
alpha = [[1., 2, 3]]
with self.cached_session():
dist = ds.DirichletMultinomial(1, alpha)
self.assertEqual([1, 3], dist.concentration.get_shape())
self.assertAllClose(alpha, dist.concentration.eval())
@test_util.run_deprecated_v1
def testPmfNandCountsAgree(self):
alpha = [[1., 2, 3]]
n = [[5.]]
with self.cached_session():
dist = ds.DirichletMultinomial(n, alpha, validate_args=True)
dist.prob([2., 3, 0]).eval()
dist.prob([3., 0, 2]).eval()
with self.assertRaisesOpError("must be non-negative"):
dist.prob([-1., 4, 2]).eval()
with self.assertRaisesOpError(
"last-dimension must sum to `self.total_count`"):
dist.prob([3., 3, 0]).eval()
@test_util.run_deprecated_v1
def testPmfNonIntegerCounts(self):
alpha = [[1., 2, 3]]
n = [[5.]]
with self.cached_session():
dist = ds.DirichletMultinomial(n, alpha, validate_args=True)
dist.prob([2., 3, 0]).eval()
dist.prob([3., 0, 2]).eval()
dist.prob([3.0, 0, 2.0]).eval()
# Both equality and integer checking fail.
placeholder = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(
"cannot contain fractional components"):
dist.prob(placeholder).eval(feed_dict={placeholder: [1.0, 2.5, 1.5]})
dist = ds.DirichletMultinomial(n, alpha, validate_args=False)
dist.prob([1., 2., 3.]).eval()
# Non-integer arguments work.
dist.prob([1.0, 2.5, 1.5]).eval()
def testPmfBothZeroBatches(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.cached_session():
# Both zero-batches. No broadcast
alpha = [1., 2]
counts = [1., 0]
dist = ds.DirichletMultinomial(1., alpha)
pmf = dist.prob(counts)
self.assertAllClose(1 / 3., self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesNontrivialN(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.cached_session():
# Both zero-batches. No broadcast
alpha = [1., 2]
counts = [3., 2]
dist = ds.DirichletMultinomial(5., alpha)
pmf = dist.prob(counts)
self.assertAllClose(1 / 7., self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesMultidimensionalN(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.cached_session():
alpha = [1., 2]
counts = [3., 2]
n = np.full([4, 3], 5., dtype=np.float32)
dist = ds.DirichletMultinomial(n, alpha)
pmf = dist.prob(counts)
self.assertAllClose([[1 / 7., 1 / 7., 1 / 7.]] * 4, self.evaluate(pmf))
self.assertEqual((4, 3), pmf.get_shape())
def testPmfAlphaStretchedInBroadcastWhenSameRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.cached_session():
alpha = [[1., 2]]
counts = [[1., 0], [0., 1]]
dist = ds.DirichletMultinomial([1.], alpha)
pmf = dist.prob(counts)
self.assertAllClose([1 / 3., 2 / 3.], self.evaluate(pmf))
self.assertAllEqual([2], pmf.get_shape())
def testPmfAlphaStretchedInBroadcastWhenLowerRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.cached_session():
alpha = [1., 2]
counts = [[1., 0], [0., 1]]
pmf = ds.DirichletMultinomial(1., alpha).prob(counts)
self.assertAllClose([1 / 3., 2 / 3.], self.evaluate(pmf))
self.assertAllEqual([2], pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenSameRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.cached_session():
alpha = [[1., 2], [2., 3]]
counts = [[1., 0]]
pmf = ds.DirichletMultinomial([1., 1.], alpha).prob(counts)
self.assertAllClose([1 / 3., 2 / 5.], self.evaluate(pmf))
self.assertAllEqual([2], pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenLowerRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.cached_session():
alpha = [[1., 2], [2., 3]]
counts = [1., 0]
pmf = ds.DirichletMultinomial(1., alpha).prob(counts)
self.assertAllClose([1 / 3., 2 / 5.], self.evaluate(pmf))
self.assertAllEqual([2], pmf.get_shape())
@test_util.run_deprecated_v1
def testPmfForOneVoteIsTheMeanWithOneRecordInput(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
alpha = [1., 2, 3]
with self.cached_session():
for class_num in range(3):
counts = np.zeros([3], dtype=np.float32)
counts[class_num] = 1
dist = ds.DirichletMultinomial(1., alpha)
mean = dist.mean().eval()
pmf = dist.prob(counts).eval()
self.assertAllClose(mean[class_num], pmf)
self.assertAllEqual([3], mean.shape)
self.assertAllEqual([], pmf.shape)
@test_util.run_deprecated_v1
def testMeanDoubleTwoVotes(self):
# The probabilities of two votes falling into class k for
# DirichletMultinomial(2, alpha) is twice as much as the probability of one
# vote falling into class k for DirichletMultinomial(1, alpha)
alpha = [1., 2, 3]
with self.cached_session():
for class_num in range(3):
counts_one = np.zeros([3], dtype=np.float32)
counts_one[class_num] = 1.
counts_two = np.zeros([3], dtype=np.float32)
counts_two[class_num] = 2
dist1 = ds.DirichletMultinomial(1., alpha)
dist2 = ds.DirichletMultinomial(2., alpha)
mean1 = dist1.mean().eval()
mean2 = dist2.mean().eval()
self.assertAllClose(mean2[class_num], 2 * mean1[class_num])
self.assertAllEqual([3], mean1.shape)
@test_util.run_deprecated_v1
def testCovarianceFromSampling(self):
# We will test mean, cov, var, stddev on a DirichletMultinomial constructed
# via broadcast between alpha, n.
alpha = np.array([[1., 2, 3],
[2.5, 4, 0.01]], dtype=np.float32)
# Ideally we'd be able to test broadcasting but, the multinomial sampler
# doesn't support different total counts.
n = np.float32(5)
with self.cached_session() as sess:
# batch_shape=[2], event_shape=[3]
dist = ds.DirichletMultinomial(n, alpha)
x = dist.sample(int(250e3), seed=1)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean[array_ops.newaxis, ...]
sample_cov = math_ops.reduce_mean(math_ops.matmul(
x_centered[..., array_ops.newaxis],
x_centered[..., array_ops.newaxis, :]), 0)
sample_var = array_ops.matrix_diag_part(sample_cov)
sample_stddev = math_ops.sqrt(sample_var)
[
sample_mean_,
sample_cov_,
sample_var_,
sample_stddev_,
analytic_mean,
analytic_cov,
analytic_var,
analytic_stddev,
] = sess.run([
sample_mean,
sample_cov,
sample_var,
sample_stddev,
dist.mean(),
dist.covariance(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(sample_mean_, analytic_mean, atol=0.04, rtol=0.)
self.assertAllClose(sample_cov_, analytic_cov, atol=0.05, rtol=0.)
self.assertAllClose(sample_var_, analytic_var, atol=0.05, rtol=0.)
self.assertAllClose(sample_stddev_, analytic_stddev, atol=0.02, rtol=0.)
def testCovariance(self):
# Shape [2]
alpha = [1., 2]
ns = [2., 3., 4., 5.]
alpha_0 = np.sum(alpha)
# Diagonal entries are of the form:
# Var(X_i) = n * alpha_i / alpha_sum * (1 - alpha_i / alpha_sum) *
# (alpha_sum + n) / (alpha_sum + 1)
variance_entry = lambda a, a_sum: a / a_sum * (1 - a / a_sum)
# Off diagonal entries are of the form:
# Cov(X_i, X_j) = -n * alpha_i * alpha_j / (alpha_sum ** 2) *
# (alpha_sum + n) / (alpha_sum + 1)
covariance_entry = lambda a, b, a_sum: -a * b / a_sum**2
# Shape [2, 2].
shared_matrix = np.array([[
variance_entry(alpha[0], alpha_0),
covariance_entry(alpha[0], alpha[1], alpha_0)
], [
covariance_entry(alpha[1], alpha[0], alpha_0),
variance_entry(alpha[1], alpha_0)
]])
with self.cached_session():
for n in ns:
# n is shape [] and alpha is shape [2].
dist = ds.DirichletMultinomial(n, alpha)
covariance = dist.covariance()
expected_covariance = n * (n + alpha_0) / (1 + alpha_0) * shared_matrix
self.assertEqual([2, 2], covariance.get_shape())
self.assertAllClose(expected_covariance, self.evaluate(covariance))
def testCovarianceNAlphaBroadcast(self):
alpha_v = [1., 2, 3]
alpha_0 = 6.
# Shape [4, 3]
alpha = np.array(4 * [alpha_v], dtype=np.float32)
# Shape [4, 1]
ns = np.array([[2.], [3.], [4.], [5.]], dtype=np.float32)
variance_entry = lambda a, a_sum: a / a_sum * (1 - a / a_sum)
covariance_entry = lambda a, b, a_sum: -a * b / a_sum**2
# Shape [4, 3, 3]
shared_matrix = np.array(
4 * [[[
variance_entry(alpha_v[0], alpha_0),
covariance_entry(alpha_v[0], alpha_v[1], alpha_0),
covariance_entry(alpha_v[0], alpha_v[2], alpha_0)
], [
covariance_entry(alpha_v[1], alpha_v[0], alpha_0),
variance_entry(alpha_v[1], alpha_0),
covariance_entry(alpha_v[1], alpha_v[2], alpha_0)
], [
covariance_entry(alpha_v[2], alpha_v[0], alpha_0),
covariance_entry(alpha_v[2], alpha_v[1], alpha_0),
variance_entry(alpha_v[2], alpha_0)
]]],
dtype=np.float32)
with self.cached_session():
# ns is shape [4, 1], and alpha is shape [4, 3].
dist = ds.DirichletMultinomial(ns, alpha)
covariance = dist.covariance()
expected_covariance = shared_matrix * (
ns * (ns + alpha_0) / (1 + alpha_0))[..., array_ops.newaxis]
self.assertEqual([4, 3, 3], covariance.get_shape())
self.assertAllClose(expected_covariance, self.evaluate(covariance))
def testCovarianceMultidimensional(self):
alpha = np.random.rand(3, 5, 4).astype(np.float32)
alpha2 = np.random.rand(6, 3, 3).astype(np.float32)
ns = np.random.randint(low=1, high=11, size=[3, 5, 1]).astype(np.float32)
ns2 = np.random.randint(low=1, high=11, size=[6, 1, 1]).astype(np.float32)
with self.cached_session():
dist = ds.DirichletMultinomial(ns, alpha)
dist2 = ds.DirichletMultinomial(ns2, alpha2)
covariance = dist.covariance()
covariance2 = dist2.covariance()
self.assertEqual([3, 5, 4, 4], covariance.get_shape())
self.assertEqual([6, 3, 3, 3], covariance2.get_shape())
def testZeroCountsResultsInPmfEqualToOne(self):
# There is only one way for zero items to be selected, and this happens with
# probability 1.
alpha = [5, 0.5]
counts = [0., 0]
with self.cached_session():
dist = ds.DirichletMultinomial(0., alpha)
pmf = dist.prob(counts)
self.assertAllClose(1.0, self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testLargeTauGivesPreciseProbabilities(self):
# If tau is large, we are doing coin flips with probability mu.
mu = np.array([0.1, 0.1, 0.8], dtype=np.float32)
tau = np.array([100.], dtype=np.float32)
alpha = tau * mu
# One (three sided) coin flip. Prob[coin 3] = 0.8.
# Note that since it was one flip, value of tau didn't matter.
counts = [0., 0, 1]
with self.cached_session():
dist = ds.DirichletMultinomial(1., alpha)
pmf = dist.prob(counts)
self.assertAllClose(0.8, self.evaluate(pmf), atol=1e-4)
self.assertEqual((), pmf.get_shape())
# Two (three sided) coin flips. Prob[coin 3] = 0.8.
counts = [0., 0, 2]
with self.cached_session():
dist = ds.DirichletMultinomial(2., alpha)
pmf = dist.prob(counts)
self.assertAllClose(0.8**2, self.evaluate(pmf), atol=1e-2)
self.assertEqual((), pmf.get_shape())
# Three (three sided) coin flips.
counts = [1., 0, 2]
with self.cached_session():
dist = ds.DirichletMultinomial(3., alpha)
pmf = dist.prob(counts)
self.assertAllClose(3 * 0.1 * 0.8 * 0.8, self.evaluate(pmf), atol=1e-2)
self.assertEqual((), pmf.get_shape())
def testSmallTauPrefersCorrelatedResults(self):
# If tau is small, then correlation between draws is large, so draws that
# are both of the same class are more likely.
mu = np.array([0.5, 0.5], dtype=np.float32)
tau = np.array([0.1], dtype=np.float32)
alpha = tau * mu
# If there is only one draw, it is still a coin flip, even with small tau.
counts = [1., 0]
with self.cached_session():
dist = ds.DirichletMultinomial(1., alpha)
pmf = dist.prob(counts)
self.assertAllClose(0.5, self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
# If there are two draws, it is much more likely that they are the same.
counts_same = [2., 0]
counts_different = [1, 1.]
with self.cached_session():
dist = ds.DirichletMultinomial(2., alpha)
pmf_same = dist.prob(counts_same)
pmf_different = dist.prob(counts_different)
self.assertLess(5 * self.evaluate(pmf_different), self.evaluate(pmf_same))
self.assertEqual((), pmf_same.get_shape())
@test_util.run_deprecated_v1
def testNonStrictTurnsOffAllChecks(self):
# Make totally invalid input.
with self.cached_session():
alpha = [[-1., 2]] # alpha should be positive.
counts = [[1., 0], [0., -1]] # counts should be non-negative.
n = [-5.3] # n should be a non negative integer equal to counts.sum.
dist = ds.DirichletMultinomial(n, alpha, validate_args=False)
dist.prob(counts).eval() # Should not raise.
@test_util.run_deprecated_v1
def testSampleUnbiasedNonScalarBatch(self):
with self.cached_session() as sess:
dist = ds.DirichletMultinomial(
total_count=5.,
concentration=1. + 2. * self._rng.rand(4, 3, 2).astype(np.float32))
n = int(3e3)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
# Cyclically rotate event dims left.
x_centered = array_ops.transpose(x - sample_mean, [1, 2, 3, 0])
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_b=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.covariance(),
])
self.assertAllEqual([4, 3, 2], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.20)
self.assertAllEqual([4, 3, 2, 2], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.20)
@test_util.run_deprecated_v1
def testSampleUnbiasedScalarBatch(self):
with self.cached_session() as sess:
dist = ds.DirichletMultinomial(
total_count=5.,
concentration=1. + 2. * self._rng.rand(4).astype(np.float32))
n = int(5e3)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean # Already transposed to [n, 2].
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_a=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.covariance(),
])
self.assertAllEqual([4], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.20)
self.assertAllEqual([4, 4], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.20)
def testNotReparameterized(self):
total_count = constant_op.constant(5.0)
concentration = constant_op.constant([0.1, 0.1, 0.1])
with backprop.GradientTape() as tape:
tape.watch(total_count)
tape.watch(concentration)
dist = ds.DirichletMultinomial(
total_count=total_count,
concentration=concentration)
samples = dist.sample(100)
grad_total_count, grad_concentration = tape.gradient(
samples, [total_count, concentration])
self.assertIsNone(grad_total_count)
self.assertIsNone(grad_concentration)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/dirichlet_multinomial_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import dirichlet as dirichlet_lib
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
special = try_import("scipy.special")
stats = try_import("scipy.stats")
@test_util.run_all_in_graph_and_eager_modes
class DirichletTest(test.TestCase):
def testSimpleShapes(self):
alpha = np.random.rand(3)
dist = dirichlet_lib.Dirichlet(alpha)
self.assertEqual(3, self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual([], self.evaluate(dist.batch_shape_tensor()))
self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape)
def testComplexShapes(self):
alpha = np.random.rand(3, 2, 2)
dist = dirichlet_lib.Dirichlet(alpha)
self.assertEqual(2, self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual([3, 2], self.evaluate(dist.batch_shape_tensor()))
self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape)
def testConcentrationProperty(self):
alpha = [[1., 2, 3]]
dist = dirichlet_lib.Dirichlet(alpha)
self.assertEqual([1, 3], dist.concentration.get_shape())
self.assertAllClose(alpha, self.evaluate(dist.concentration))
def testPdfXProper(self):
alpha = [[1., 2, 3]]
dist = dirichlet_lib.Dirichlet(alpha, validate_args=True)
self.evaluate(dist.prob([.1, .3, .6]))
self.evaluate(dist.prob([.2, .3, .5]))
# Either condition can trigger.
with self.assertRaisesOpError("samples must be positive"):
self.evaluate(dist.prob([-1., 1.5, 0.5]))
with self.assertRaisesOpError("samples must be positive"):
self.evaluate(dist.prob([0., .1, .9]))
with self.assertRaisesOpError("sample last-dimension must sum to `1`"):
self.evaluate(dist.prob([.1, .2, .8]))
def testLogPdfOnBoundaryIsFiniteWhenAlphaIsOne(self):
# Test concentration = 1. for each dimension.
concentration = 3 * np.ones((10, 10)).astype(np.float32)
concentration[range(10), range(10)] = 1.
x = 1 / 9. * np.ones((10, 10)).astype(np.float32)
x[range(10), range(10)] = 0.
dist = dirichlet_lib.Dirichlet(concentration)
log_prob = self.evaluate(dist.log_prob(x))
self.assertAllEqual(
np.ones_like(log_prob, dtype=np.bool), np.isfinite(log_prob))
# Test when concentration[k] = 1., and x is zero at various dimensions.
dist = dirichlet_lib.Dirichlet(10 * [1.])
log_prob = self.evaluate(dist.log_prob(x))
self.assertAllEqual(
np.ones_like(log_prob, dtype=np.bool), np.isfinite(log_prob))
def testPdfZeroBatches(self):
alpha = [1., 2]
x = [.5, .5]
dist = dirichlet_lib.Dirichlet(alpha)
pdf = dist.prob(x)
self.assertAllClose(1., self.evaluate(pdf))
self.assertEqual((), pdf.get_shape())
def testPdfZeroBatchesNontrivialX(self):
alpha = [1., 2]
x = [.3, .7]
dist = dirichlet_lib.Dirichlet(alpha)
pdf = dist.prob(x)
self.assertAllClose(7. / 5, self.evaluate(pdf))
self.assertEqual((), pdf.get_shape())
def testPdfUniformZeroBatches(self):
# Corresponds to a uniform distribution
alpha = [1., 1, 1]
x = [[.2, .5, .3], [.3, .4, .3]]
dist = dirichlet_lib.Dirichlet(alpha)
pdf = dist.prob(x)
self.assertAllClose([2., 2.], self.evaluate(pdf))
self.assertEqual((2), pdf.get_shape())
def testPdfAlphaStretchedInBroadcastWhenSameRank(self):
alpha = [[1., 2]]
x = [[.5, .5], [.3, .7]]
dist = dirichlet_lib.Dirichlet(alpha)
pdf = dist.prob(x)
self.assertAllClose([1., 7. / 5], self.evaluate(pdf))
self.assertEqual((2), pdf.get_shape())
def testPdfAlphaStretchedInBroadcastWhenLowerRank(self):
alpha = [1., 2]
x = [[.5, .5], [.2, .8]]
pdf = dirichlet_lib.Dirichlet(alpha).prob(x)
self.assertAllClose([1., 8. / 5], self.evaluate(pdf))
self.assertEqual((2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenSameRank(self):
alpha = [[1., 2], [2., 3]]
x = [[.5, .5]]
pdf = dirichlet_lib.Dirichlet(alpha).prob(x)
self.assertAllClose([1., 3. / 2], self.evaluate(pdf))
self.assertEqual((2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenLowerRank(self):
alpha = [[1., 2], [2., 3]]
x = [.5, .5]
pdf = dirichlet_lib.Dirichlet(alpha).prob(x)
self.assertAllClose([1., 3. / 2], self.evaluate(pdf))
self.assertEqual((2), pdf.get_shape())
def testMean(self):
alpha = [1., 2, 3]
dirichlet = dirichlet_lib.Dirichlet(concentration=alpha)
self.assertEqual(dirichlet.mean().get_shape(), [3])
if not stats:
return
expected_mean = stats.dirichlet.mean(alpha)
self.assertAllClose(self.evaluate(dirichlet.mean()), expected_mean)
def testCovarianceFromSampling(self):
alpha = np.array([[1., 2, 3],
[2.5, 4, 0.01]], dtype=np.float32)
dist = dirichlet_lib.Dirichlet(alpha) # batch_shape=[2], event_shape=[3]
x = dist.sample(int(250e3), seed=1)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean[None, ...]
sample_cov = math_ops.reduce_mean(math_ops.matmul(
x_centered[..., None], x_centered[..., None, :]), 0)
sample_var = array_ops.matrix_diag_part(sample_cov)
sample_stddev = math_ops.sqrt(sample_var)
[
sample_mean_,
sample_cov_,
sample_var_,
sample_stddev_,
analytic_mean,
analytic_cov,
analytic_var,
analytic_stddev,
] = self.evaluate([
sample_mean,
sample_cov,
sample_var,
sample_stddev,
dist.mean(),
dist.covariance(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(sample_mean_, analytic_mean, atol=0.04, rtol=0.)
self.assertAllClose(sample_cov_, analytic_cov, atol=0.06, rtol=0.)
self.assertAllClose(sample_var_, analytic_var, atol=0.03, rtol=0.)
self.assertAllClose(sample_stddev_, analytic_stddev, atol=0.02, rtol=0.)
def testVariance(self):
alpha = [1., 2, 3]
denominator = np.sum(alpha)**2 * (np.sum(alpha) + 1)
dirichlet = dirichlet_lib.Dirichlet(concentration=alpha)
self.assertEqual(dirichlet.covariance().get_shape(), (3, 3))
if not stats:
return
expected_covariance = np.diag(stats.dirichlet.var(alpha))
expected_covariance += [[0., -2, -3], [-2, 0, -6], [-3, -6, 0]
] / denominator
self.assertAllClose(
self.evaluate(dirichlet.covariance()), expected_covariance)
def testMode(self):
alpha = np.array([1.1, 2, 3])
expected_mode = (alpha - 1) / (np.sum(alpha) - 3)
dirichlet = dirichlet_lib.Dirichlet(concentration=alpha)
self.assertEqual(dirichlet.mode().get_shape(), [3])
self.assertAllClose(self.evaluate(dirichlet.mode()), expected_mode)
def testModeInvalid(self):
alpha = np.array([1., 2, 3])
dirichlet = dirichlet_lib.Dirichlet(
concentration=alpha, allow_nan_stats=False)
with self.assertRaisesOpError("Condition x < y.*"):
self.evaluate(dirichlet.mode())
def testModeEnableAllowNanStats(self):
alpha = np.array([1., 2, 3])
dirichlet = dirichlet_lib.Dirichlet(
concentration=alpha, allow_nan_stats=True)
expected_mode = np.zeros_like(alpha) + np.nan
self.assertEqual(dirichlet.mode().get_shape(), [3])
self.assertAllClose(self.evaluate(dirichlet.mode()), expected_mode)
def testEntropy(self):
alpha = [1., 2, 3]
dirichlet = dirichlet_lib.Dirichlet(concentration=alpha)
self.assertEqual(dirichlet.entropy().get_shape(), ())
if not stats:
return
expected_entropy = stats.dirichlet.entropy(alpha)
self.assertAllClose(self.evaluate(dirichlet.entropy()), expected_entropy)
def testSample(self):
alpha = [1., 2]
dirichlet = dirichlet_lib.Dirichlet(alpha)
n = constant_op.constant(100000)
samples = dirichlet.sample(n)
sample_values = self.evaluate(samples)
self.assertEqual(sample_values.shape, (100000, 2))
self.assertTrue(np.all(sample_values > 0.0))
if not stats:
return
self.assertLess(
stats.kstest(
# Beta is a univariate distribution.
sample_values[:, 0],
stats.beta(a=1., b=2.).cdf)[0],
0.01)
def testDirichletFullyReparameterized(self):
alpha = constant_op.constant([1.0, 2.0, 3.0])
with backprop.GradientTape() as tape:
tape.watch(alpha)
dirichlet = dirichlet_lib.Dirichlet(alpha)
samples = dirichlet.sample(100)
grad_alpha = tape.gradient(samples, alpha)
self.assertIsNotNone(grad_alpha)
def testDirichletDirichletKL(self):
conc1 = np.array([[1., 2., 3., 1.5, 2.5, 3.5],
[1.5, 2.5, 3.5, 4.5, 5.5, 6.5]])
conc2 = np.array([[0.5, 1., 1.5, 2., 2.5, 3.]])
d1 = dirichlet_lib.Dirichlet(conc1)
d2 = dirichlet_lib.Dirichlet(conc2)
x = d1.sample(int(1e4), seed=0)
kl_sample = math_ops.reduce_mean(d1.log_prob(x) - d2.log_prob(x), 0)
kl_actual = kullback_leibler.kl_divergence(d1, d2)
kl_sample_val = self.evaluate(kl_sample)
kl_actual_val = self.evaluate(kl_actual)
self.assertEqual(conc1.shape[:-1], kl_actual.get_shape())
if not special:
return
kl_expected = (
special.gammaln(np.sum(conc1, -1))
- special.gammaln(np.sum(conc2, -1))
- np.sum(special.gammaln(conc1) - special.gammaln(conc2), -1)
+ np.sum((conc1 - conc2) * (special.digamma(conc1) - special.digamma(
np.sum(conc1, -1, keepdims=True))), -1))
self.assertAllClose(kl_expected, kl_actual_val, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_val, kl_actual_val, atol=0., rtol=1e-1)
# Make sure KL(d1||d1) is 0
kl_same = self.evaluate(kullback_leibler.kl_divergence(d1, d1))
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/distributions/dirichlet_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for creating RPC clusters on localhost."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import device_setter
class CreateLocalClusterTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testCreateLocalCluster(self):
workers, _ = test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [session_lib.Session(w.target) for w in workers]
with ops.device("/job:ps/task:0"):
var0 = variables.Variable(0.0)
with ops.device("/job:ps/task:1"):
var1 = variables.Variable(1.0)
worker_sessions[0].run([var0.initializer, var1.initializer])
with ops.device("/job:ps/task:0"):
var2 = variables.Variable(2.0)
with ops.device("/job:ps/task:1"):
var3 = variables.Variable(3.0)
worker_sessions[1].run([var2.initializer, var3.initializer])
# Read values back in the opposite session
self.assertAllEqual(0.0, var0.eval(session=worker_sessions[1]))
self.assertAllEqual(1.0, var1.eval(session=worker_sessions[1]))
self.assertAllEqual(2.0, var2.eval(session=worker_sessions[0]))
self.assertAllEqual(3.0, var3.eval(session=worker_sessions[0]))
class CreateLocalClusterBenchmark(test.Benchmark):
def benchmarkCreateLocalCluster(self):
deltas = []
iters = 5
for _ in range(iters):
start_time = time.time()
test.create_local_cluster(num_workers=1, num_ps=10)
end_time = time.time()
deltas.append(end_time - start_time)
median_deltas = np.median(deltas)
print("\n\nbenchmark_create_local_cluster_1_worker_10_ps. "
"iterations: %d, median wall time: %g\n\n" % (iters, median_deltas))
self.report_benchmark(
iters=iters,
wall_time=median_deltas,
name="benchmark_create_local_cluster_1_worker_10_ps")
class PartitionedVariablesBenchmark(test.Benchmark):
def benchmark_create_1000_partitions_with_100_parameter_servers(self):
workers, _ = test.create_local_cluster(num_workers=1, num_ps=100)
worker_sessions = [session_lib.Session(w.target) for w in workers]
worker = worker_sessions[0]
partition_sizes = (1, 512, 1024 * 32, 1024 * 128)
partitioned = []
for partition_size in partition_sizes:
# max_shard_bytes is 4, shape is 1000*partition_size float32s which should
# partition into 1000 shards, each containing partition_size float32s.
print("Building partitioned variable with %d floats per partition" %
partition_size)
with ops.device(device_setter.replica_device_setter(ps_tasks=100)):
partitioned_ix = variable_scope.get_variable(
"partitioned_%d" % partition_size,
shape=[1000 * partition_size],
dtype=dtypes.float32,
# Each partition to have exactly N float32s
partitioner=partitioned_variables.variable_axis_size_partitioner(
max_shard_bytes=4 * partition_size))
# Concatenates along axis 0
partitioned.append(ops.convert_to_tensor(partitioned_ix))
variables.global_variables_initializer().run(session=worker)
for ix, partition_size in enumerate(partition_sizes):
print("Running benchmark having partitions with %d floats" %
partition_size)
self.run_op_benchmark(
worker,
partitioned[ix],
name=("read_concat_1000_partitions_from_"
"100_parameter_servers_partsize_%d_floats" % partition_size))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/localhost_cluster_performance_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sync_replicas_optimizer.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.test_util import create_local_cluster
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import training
# Creates the workers and return their sessions, graphs, train_ops.
def get_workers(num_workers, replicas_to_aggregate, workers):
sessions = []
graphs = []
train_ops = []
for worker_id in range(num_workers):
graph = ops.Graph()
is_chief = (worker_id == 0)
with graph.as_default():
with ops.device("/job:ps/task:0"):
global_step = variables.VariableV1(
0, name="global_step", trainable=False)
var_0 = variables.VariableV1(0.0, name="v0")
with ops.device("/job:ps/task:1"):
var_1 = variables.VariableV1(1.0, name="v1")
var_sparse = variables.VariableV1([[3.0], [4.0]], name="v_sparse")
with ops.device("/job:worker/task:" + str(worker_id)):
grads_0 = constant_op.constant(0.1 + worker_id * 0.2)
grads_1 = constant_op.constant(0.9 + worker_id * 0.2)
# This is to test against sparse gradients.
grads_sparse = ops.IndexedSlices(
constant_op.constant(
[0.1 + worker_id * 0.2], shape=[1, 1]),
constant_op.constant([1]),
constant_op.constant([2, 1]))
sgd_opt = gradient_descent.GradientDescentOptimizer(2.0)
sync_rep_opt = training.SyncReplicasOptimizer(
sgd_opt,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers)
train_op = [
sync_rep_opt.apply_gradients(
zip([grads_0, grads_1, grads_sparse],
[var_0, var_1, var_sparse]),
global_step=global_step)
]
sync_replicas_hook = sync_rep_opt.make_session_run_hook(
is_chief, num_tokens=num_workers)
# Creates MonitoredSession
session = training.MonitoredTrainingSession(
master=workers[worker_id].target,
is_chief=is_chief,
hooks=[sync_replicas_hook])
sessions.append(session)
graphs.append(graph)
train_ops.append(train_op)
return sessions, graphs, train_ops
class SyncReplicasOptimizerTest(test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
@test_util.run_v1_only("b/120545219")
def test2Workers(self):
num_workers = 2
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate, workers)
# Chief should have already initialized all the variables.
var_0_g_0 = graphs[0].get_tensor_by_name("v0:0")
var_1_g_0 = graphs[0].get_tensor_by_name("v1:0")
local_step_0 = graphs[0].get_tensor_by_name("sync_rep_local_step:0")
self.assertAllEqual(0.0, sessions[0].run(var_0_g_0))
self.assertAllEqual(1.0, sessions[0].run(var_1_g_0))
self.assertAllEqual(0, sessions[0].run(local_step_0))
# Will just use session 1 to verify all the variables later.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
var_sparse_g_1 = graphs[1].get_tensor_by_name("v_sparse:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initialized.
self.assertAllEqual(0, sessions[1].run(global_step))
self.assertAllEqual(0, sessions[1].run(local_step_1))
self.assertAllClose([[3.0], [4.0]], sessions[1].run(var_sparse_g_1))
# We have initial tokens in the queue so we can call this one by one. After
# the first step, this will no longer work as there will be no more extra
# tokens in the queue.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# The global step should have been updated and the variables should now have
# the new values after the average of the gradients are applied.
while sessions[1].run(global_step) != 1:
time.sleep(0.01)
self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))
self.assertAllClose([[3.0], [4.0 - (0.1 + 0.3) / 2 * 2.0]],
sessions[1].run(var_sparse_g_1))
# The local step for both workers should still be 0 because the initial
# tokens in the token queue are 0s. This means that the following
# computation of the gradients will be wasted as local_step is smaller than
# the current global step. However, this only happens once when the system
# just starts and this is necessary to make the system robust for the case
# when chief gets restarted by errors/preemption/...
self.assertAllEqual(0, sessions[0].run(local_step_0))
self.assertAllEqual(0, sessions[1].run(local_step_1))
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. The variables are still the same.
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllEqual(1, sessions[0].run(local_step_0))
self.assertAllEqual(1, sessions[1].run(local_step_1))
self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))
# At this step, the token queue is empty. So the 2 workers need to work
# together to proceed.
threads = []
threads.append(
self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0])))
threads.append(
self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1])))
# The two workers starts to execute the train op.
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# The global step should now be 2 and the gradients should have been
# applied twice.
self.assertAllEqual(2, sessions[1].run(global_step))
self.assertAllClose(0 - 2 * (0.1 + 0.3) / 2 * 2.0,
sessions[1].run(var_0_g_1))
self.assertAllClose(1 - 2 * (0.9 + 1.1) / 2 * 2.0,
sessions[1].run(var_1_g_1))
# 3 workers and one of them is backup.
@test_util.run_v1_only("b/120545219")
def test3Workers1Backup(self):
num_workers = 3
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate, workers)
# Chief should have already initialized all the variables.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initilized.
self.assertAllEqual(0, sessions[1].run(global_step))
self.assertAllEqual(0, sessions[1].run(local_step_1))
# We have initial tokens in the queue so we can call this one by one. After
# the token queue becomes empty, they should be called concurrently.
# Here worker 0 and worker 2 finished first.
sessions[0].run(train_ops[0])
sessions[2].run(train_ops[2])
# The global step should have been updated since we only need to collect 2
# gradients. The variables should now have the new values after the average
# of the gradients from worker 0/2 are applied.
while sessions[1].run(global_step) != 1:
time.sleep(0.01)
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllClose(0 - (0.1 + 0.5) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.3) / 2 * 2.0, sessions[1].run(var_1_g_1))
# Worker 1 finished later and its gradients will now be dropped as it is
# stale.
sessions[1].run(train_ops[1])
# As shown in the previous test, the local_step for all workers should be
# still 0 so their next computation will also be dropped.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
sessions[2].run(train_ops[2])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. Just check worker 1 as an example.
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllEqual(1, sessions[1].run(local_step_1))
thread_0 = self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1]))
# Lets worker 0 execute first.
# It will wait as we need 2 workers to finish this step and the global step
# should be still 1.
thread_0.start()
self.assertAllEqual(1, sessions[1].run(global_step))
# Starts worker 1.
thread_1.start()
thread_1.join()
thread_0.join()
# The global step should now be 2 and the gradients should have been
# applied again.
self.assertAllEqual(2, sessions[1].run(global_step))
self.assertAllClose(-0.6 - (0.1 + 0.3) / 2 * 2.0,
sessions[1].run(var_0_g_1))
self.assertAllClose(-1.2 - (0.9 + 1.1) / 2 * 2.0,
sessions[1].run(var_1_g_1))
class SyncReplicasOptimizerHookTest(test.TestCase):
def testErrorIfUsedBeforeMinimizeCalled(self):
opt = training.SyncReplicasOptimizer(
opt=gradient_descent.GradientDescentOptimizer(1.0),
replicas_to_aggregate=1,
total_num_replicas=1)
hook = opt.make_session_run_hook(True)
with self.assertRaisesRegexp(ValueError,
"apply_gradient should be called"):
hook.begin()
@test_util.run_v1_only("b/120545219")
def testCanCreatedBeforeMinimizeCalled(self):
"""This behavior is required to be integrated with Estimators."""
opt = training.SyncReplicasOptimizer(
opt=gradient_descent.GradientDescentOptimizer(1.0),
replicas_to_aggregate=1,
total_num_replicas=1)
hook = opt.make_session_run_hook(True)
v = variables.VariableV1([0.])
global_step = variables.VariableV1(0, name="global_step", trainable=False)
opt.minimize(v, global_step=global_step)
hook.begin()
@test_util.run_v1_only("b/120545219")
def testFetchVariableList(self):
opt = training.SyncReplicasOptimizer(
opt=adam.AdamOptimizer(0.01),
replicas_to_aggregate=1,
total_num_replicas=1)
v = variables.VariableV1([0.], name="fetch_variable_test")
global_step = variables.VariableV1(0, name="global_step", trainable=False)
opt.minimize(v, global_step=global_step)
opt_variables = opt.variables()
beta1_power, beta2_power = opt._opt._get_beta_accumulators()
self.assertIn(beta1_power, opt_variables)
self.assertIn(beta2_power, opt_variables)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/sync_replicas_optimizer_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class SameVariablesClearTest(test.TestCase):
# Verifies behavior of tf.Session.reset().
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
@test_util.run_deprecated_v1
def testSameVariablesClear(self):
server = server_lib.Server.create_local_server()
# Creates a graph with 2 variables.
v0 = variables.Variable([[2, 1]], name="v0")
v1 = variables.Variable([[1], [2]], name="v1")
v2 = math_ops.matmul(v0, v1)
# Verifies that both sessions connecting to the same target return
# the same results.
sess_1 = session.Session(server.target)
sess_2 = session.Session(server.target)
sess_1.run(variables.global_variables_initializer())
self.assertAllEqual([[4]], sess_1.run(v2))
self.assertAllEqual([[4]], sess_2.run(v2))
# Resets target. sessions abort. Use sess_2 to verify.
session.Session.reset(server.target)
with self.assertRaises(errors_impl.AbortedError):
self.assertAllEqual([[4]], sess_2.run(v2))
# Connects to the same target. Device memory for the variables would have
# been released, so they will be uninitialized.
sess_2 = session.Session(server.target)
with self.assertRaises(errors_impl.FailedPreconditionError):
sess_2.run(v2)
# Reinitializes the variables.
sess_2.run(variables.global_variables_initializer())
self.assertAllEqual([[4]], sess_2.run(v2))
sess_2.close()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/server_lib_same_variables_clear_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for training ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import gen_training_ops # pylint: disable=unused-import
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.training.gen_training_ops import *
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/python/training/training_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for device function for replicated training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import device_setter
from tensorflow.python.training import server_lib
class DeviceSetterTest(test.TestCase):
_cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
@test_util.run_deprecated_v1
def testCPUOverride(self):
with ops.device(
device_setter.replica_device_setter(cluster=self._cluster_spec)):
with ops.device("/cpu:0"):
v = variables.Variable([1, 2])
w = variables.Variable([2, 1])
with ops.device("/cpu:0"):
a = v + w
self.assertDeviceEqual("/job:ps/task:0/cpu:0", v.device)
self.assertDeviceEqual("/job:ps/task:0/cpu:0", v.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", w.device)
self.assertDeviceEqual("/job:ps/task:1", w.initializer.device)
self.assertDeviceEqual("/job:worker/cpu:0", a.device)
@test_util.run_deprecated_v1
def testResource(self):
with ops.device(
device_setter.replica_device_setter(cluster=self._cluster_spec)):
v = resource_variable_ops.ResourceVariable([1, 2])
self.assertDeviceEqual("/job:ps/task:0", v.device)
@test_util.run_deprecated_v1
def testPS2TasksWithClusterSpecClass(self):
with ops.device(
device_setter.replica_device_setter(cluster=self._cluster_spec)):
v = variables.Variable([1, 2])
w = variables.Variable([2, 1])
a = v + w
self.assertDeviceEqual("/job:ps/task:0", v.device)
self.assertDeviceEqual("/job:ps/task:0", v.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", w.device)
self.assertDeviceEqual("/job:ps/task:1", w.initializer.device)
self.assertDeviceEqual("/job:worker", a.device)
@test_util.run_deprecated_v1
def testPS2TasksPinVariableToJob(self):
with ops.device(
device_setter.replica_device_setter(cluster=self._cluster_spec)):
v = variables.Variable([1, 2])
with ops.device("/job:moon"):
w = variables.Variable([2, 1])
with ops.device("/job:ps"): # Explicit PS job will get task set.
x = variables.Variable([0, 1])
a = v + w + x
self.assertDeviceEqual("/job:ps/task:0", v.device)
self.assertDeviceEqual("/job:ps/task:0", v.initializer.device)
self.assertDeviceEqual("/job:moon", w.device)
self.assertDeviceEqual("/job:moon", w.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", x.device)
self.assertDeviceEqual("/job:ps/task:1", x.initializer.device)
self.assertDeviceEqual("/job:worker", a.device)
@test_util.run_deprecated_v1
def testPS2TasksUseCpuForPS(self):
with ops.device(
device_setter.replica_device_setter(ps_tasks=1, ps_device="/cpu:0")):
v = variables.Variable([1, 2])
with ops.device("/job:moon"):
w = variables.Variable([2, 1])
a = v + w
self.assertDeviceEqual("/cpu:0", v.device)
self.assertDeviceEqual("/cpu:0", v.initializer.device)
self.assertDeviceEqual("/job:moon/cpu:0", w.device)
self.assertDeviceEqual("/job:moon/cpu:0", w.initializer.device)
self.assertDeviceEqual("/job:worker", a.device)
@test_util.run_deprecated_v1
def testPS2TasksNoMerging(self):
with ops.device(
device_setter.replica_device_setter(
cluster=self._cluster_spec, merge_devices=False)):
v = variables.Variable([1, 2])
with ops.device("/job:ps"): # Won't assign task when merge_devices=False.
w = variables.Variable([2, 1])
a = v + w
self.assertDeviceEqual("/job:ps/task:0", v.device)
self.assertDeviceEqual("/job:ps/task:0", v.initializer.device)
self.assertDeviceEqual("/job:ps", w.device)
self.assertDeviceEqual("/job:ps", w.initializer.device)
self.assertDeviceEqual("/job:worker", a.device)
@test_util.run_deprecated_v1
def testPS2TasksWithClusterSpecDict(self):
with ops.device(
device_setter.replica_device_setter(cluster=self._cluster_spec.as_dict(
))):
v = variables.Variable([1, 2])
w = variables.Variable([2, 1])
a = v + w
self.assertDeviceEqual("/job:ps/task:0", v.device)
self.assertDeviceEqual("/job:ps/task:0", v.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", w.device)
self.assertDeviceEqual("/job:ps/task:1", w.initializer.device)
self.assertDeviceEqual("/job:worker", a.device)
@test_util.run_deprecated_v1
def testPS2TasksWithClusterDef(self):
with ops.device(
device_setter.replica_device_setter(
cluster=self._cluster_spec.as_cluster_def())):
v = variables.Variable([1, 2])
w = variables.Variable([2, 1])
a = v + w
self.assertDeviceEqual("/job:ps/task:0", v.device)
self.assertDeviceEqual("/job:ps/task:0", v.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", w.device)
self.assertDeviceEqual("/job:ps/task:1", w.initializer.device)
self.assertDeviceEqual("/job:worker", a.device)
@test_util.run_deprecated_v1
def testPS2TasksWithDevice(self):
cluster_spec = server_lib.ClusterSpec({
"sun": ["sun0:2222", "sun1:2222", "sun2:2222"],
"moon": ["moon0:2222", "moon1:2222"]
})
with ops.device(
device_setter.replica_device_setter(
ps_device="/job:moon",
worker_device="/job:sun",
cluster=cluster_spec.as_cluster_def())):
v = variables.Variable([1, 2])
w = variables.Variable([2, 1])
a = v + w
self.assertDeviceEqual("/job:moon/task:0", v.device)
self.assertDeviceEqual("/job:moon/task:0", v.initializer.device)
self.assertDeviceEqual("/job:moon/task:1", w.device)
self.assertDeviceEqual("/job:moon/task:1", w.initializer.device)
self.assertDeviceEqual("/job:sun", a.device)
@test_util.run_deprecated_v1
def testPS2TasksWithCPUConstraint(self):
cluster_spec = server_lib.ClusterSpec({
"sun": ["sun0:2222", "sun1:2222", "sun2:2222"],
"moon": ["moon0:2222", "moon1:2222"]
})
with ops.device(
device_setter.replica_device_setter(
ps_device="/job:moon/cpu:0",
worker_device="/job:sun",
cluster=cluster_spec.as_cluster_def())):
v = variables.Variable([1, 2])
w = variables.Variable([2, 1])
a = v + w
self.assertDeviceEqual("/job:moon/task:0/cpu:0", v.device)
self.assertDeviceEqual("/job:moon/task:0/cpu:0", v.initializer.device)
self.assertDeviceEqual("/job:moon/task:1/cpu:0", w.device)
self.assertDeviceEqual("/job:moon/task:1/cpu:0", w.initializer.device)
self.assertDeviceEqual("/job:sun", a.device)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/device_setter_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Maintain moving averages of parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import slot_creator
from tensorflow.python.util.tf_export import tf_export
# TODO(touts): switch to variables.Variable.
def assign_moving_average(variable, value, decay, zero_debias=True, name=None):
"""Compute the moving average of a variable.
The moving average of 'variable' updated with 'value' is:
variable * decay + value * (1 - decay)
The returned Operation sets 'variable' to the newly computed moving average,
by performing this subtraction:
variable -= (1 - decay) * (variable - value)
Since variables that are initialized to a `0` value will be `0` biased,
`zero_debias` optionally enables scaling by the mathematically correct
debiasing factor of
1 - decay ** num_updates
See `ADAM: A Method for Stochastic Optimization` Section 3 for more details
(https://arxiv.org/abs/1412.6980).
The names of the debias shadow variables, by default, include both the scope
they were created in and the scope of the variables they debias. They are also
given a uniquifying-suffix.
E.g.:
```
with tf.compat.v1.variable_scope('scope1'):
with tf.compat.v1.variable_scope('scope2'):
var = tf.compat.v1.get_variable('foo')
update_1 = tf.assign_moving_average(var, 0.0, 1.0)
update_2 = tf.assign_moving_average(var, 0.0, 0.9)
# var.name: 'scope1/scope2/foo'
# shadow var names: 'scope1/scope2/scope1/scope2/foo/biased'
# 'scope1/scope2/scope1/scope2/foo/biased_1'
```
Args:
variable: A Variable.
value: A tensor with the same shape as 'variable'.
decay: A float Tensor or float value. The moving average decay.
zero_debias: A python bool. If true, assume the variable is 0-initialized
and unbias it, as in https://arxiv.org/abs/1412.6980. See docstring in
`_zero_debias` for more details.
name: Optional name of the returned operation.
Returns:
A tensor which if evaluated will compute and return the new moving average.
"""
with ops.name_scope(name, "AssignMovingAvg",
[variable, value, decay]) as scope:
decay = ops.convert_to_tensor(1.0 - decay, name="decay")
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
def update_fn(v, value):
return state_ops.assign_sub(v, (v - value) * decay, name=scope)
def update(strategy, v, value):
if zero_debias:
return _zero_debias(strategy, v, value, decay)
else:
return strategy.extended.update(v, update_fn, args=(value,))
replica_context = distribution_strategy_context.get_replica_context()
if replica_context:
# In a replica context, we update variable using the mean of value across
# replicas.
def merge_fn(strategy, v, value):
value = strategy.extended.reduce_to(ds_reduce_util.ReduceOp.MEAN, value,
v)
return update(strategy, v, value)
return replica_context.merge_call(merge_fn, args=(variable, value))
else:
strategy = distribution_strategy_context.get_cross_replica_context()
return update(strategy, variable, value)
def weighted_moving_average(value,
decay,
weight,
truediv=True,
collections=None,
name=None):
"""Compute the weighted moving average of `value`.
Conceptually, the weighted moving average is:
`moving_average(value * weight) / moving_average(weight)`,
where a moving average updates by the rule
`new_value = decay * old_value + (1 - decay) * update`
Internally, this Op keeps moving average variables of both `value * weight`
and `weight`.
Args:
value: A numeric `Tensor`.
decay: A float `Tensor` or float value. The moving average decay.
weight: `Tensor` that keeps the current value of a weight. Shape should be
able to multiply `value`.
truediv: Boolean, if `True`, dividing by `moving_average(weight)` is
floating point division. If `False`, use division implied by dtypes.
collections: List of graph collections keys to add the internal variables
`value * weight` and `weight` to. Defaults to
`[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name of the returned operation. Defaults to
"WeightedMovingAvg".
Returns:
An Operation that updates and returns the weighted moving average.
"""
# Unlike assign_moving_average, the weighted moving average doesn't modify
# user-visible variables. It is the ratio of two internal variables, which are
# moving averages of the updates. Thus, the signature of this function is
# quite different than assign_moving_average.
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
with variable_scope.variable_scope(name, "WeightedMovingAvg",
[value, weight, decay]) as scope:
value_x_weight_var = variable_scope.get_variable(
"value_x_weight",
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
weight_var = variable_scope.get_variable(
"weight",
shape=weight.get_shape(),
dtype=weight.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
numerator = assign_moving_average(
value_x_weight_var, value * weight, decay, zero_debias=False)
denominator = assign_moving_average(
weight_var, weight, decay, zero_debias=False)
if truediv:
return math_ops.truediv(numerator, denominator, name=scope.name)
else:
return math_ops.div(numerator, denominator, name=scope.name)
def _zero_debias(strategy, unbiased_var, value, decay):
"""Compute the delta required for a debiased Variable.
All exponential moving averages initialized with Tensors are initialized to 0,
and therefore are biased to 0. Variables initialized to 0 and used as EMAs are
similarly biased. This function creates the debias updated amount according to
a scale factor, as in https://arxiv.org/abs/1412.6980.
To demonstrate the bias the results from 0-initialization, take an EMA that
was initialized to `0` with decay `b`. After `t` timesteps of seeing the
constant `c`, the variable have the following value:
```
EMA = 0*b^(t) + c*(1 - b)*b^(t-1) + c*(1 - b)*b^(t-2) + ...
= c*(1 - b^t)
```
To have the true value `c`, we would divide by the scale factor `1 - b^t`.
In order to perform debiasing, we use two shadow variables. One keeps track of
the biased estimate, and the other keeps track of the number of updates that
have occurred.
Args:
strategy: `Strategy` used to create and update variables.
unbiased_var: A Variable representing the current value of the unbiased EMA.
value: A Tensor representing the most recent value.
decay: A Tensor representing `1-decay` for the EMA.
Returns:
Operation which updates unbiased_var to the debiased moving average value.
"""
with variable_scope.variable_scope(
unbiased_var.name[:-len(":0")], values=[unbiased_var, value, decay]):
with ops.init_scope():
biased_initializer = init_ops.zeros_initializer()
local_step_initializer = init_ops.zeros_initializer()
def _maybe_get_unique(name):
"""Get name for a unique variable, if not `reuse=True`."""
if variable_scope.get_variable_scope().reuse:
return name
vs_vars = [
x.op.name
for x in variable_scope.get_variable_scope().global_variables()
]
full_name = variable_scope.get_variable_scope().name + "/" + name
if full_name not in vs_vars:
return name
idx = 1
while full_name + ("_%d" % idx) in vs_vars:
idx += 1
return name + ("_%d" % idx)
with strategy.extended.colocate_vars_with(unbiased_var):
biased_var = variable_scope.get_variable(
_maybe_get_unique("biased"),
initializer=biased_initializer,
shape=unbiased_var.get_shape(),
dtype=unbiased_var.dtype,
trainable=False)
local_step = variable_scope.get_variable(
_maybe_get_unique("local_step"),
shape=[],
dtype=unbiased_var.dtype,
initializer=local_step_initializer,
trainable=False)
def update_fn(v, value, biased_var, local_step):
update_biased = state_ops.assign_sub(biased_var,
(biased_var - value) * decay)
update_local_step = local_step.assign_add(1)
# This function gets `1 - decay`, so use `1.0 - decay` in the exponent.
bias_factor = 1 - math_ops.pow(1.0 - decay, update_local_step)
return state_ops.assign(
v, update_biased / bias_factor, name=ops.get_name_scope() + "/")
return strategy.extended.update(
unbiased_var, update_fn, args=(value, biased_var, local_step))
@tf_export("train.ExponentialMovingAverage")
class ExponentialMovingAverage(object):
"""Maintains moving averages of variables by employing an exponential decay.
When training a model, it is often beneficial to maintain moving averages of
the trained parameters. Evaluations that use averaged parameters sometimes
produce significantly better results than the final trained values.
The `apply()` method adds shadow copies of trained variables and add ops that
maintain a moving average of the trained variables in their shadow copies.
It is used when building the training model. The ops that maintain moving
averages are typically run after each training step.
The `average()` and `average_name()` methods give access to the shadow
variables and their names. They are useful when building an evaluation
model, or when restoring a model from a checkpoint file. They help use the
moving averages in place of the last trained values for evaluations.
The moving averages are computed using exponential decay. You specify the
decay value when creating the `ExponentialMovingAverage` object. The shadow
variables are initialized with the same initial values as the trained
variables. When you run the ops to maintain the moving averages, each
shadow variable is updated with the formula:
`shadow_variable -= (1 - decay) * (shadow_variable - variable)`
This is mathematically equivalent to the classic formula below, but the use
of an `assign_sub` op (the `"-="` in the formula) allows concurrent lockless
updates to the variables:
`shadow_variable = decay * shadow_variable + (1 - decay) * variable`
Reasonable values for `decay` are close to 1.0, typically in the
multiple-nines range: 0.999, 0.9999, etc.
Example usage when creating a training model:
```python
# Create variables.
var0 = tf.Variable(...)
var1 = tf.Variable(...)
# ... use the variables to build a training model...
...
# Create an op that applies the optimizer. This is what we usually
# would use as a training op.
opt_op = opt.minimize(my_loss, [var0, var1])
# Create an ExponentialMovingAverage object
ema = tf.train.ExponentialMovingAverage(decay=0.9999)
with tf.control_dependencies([opt_op]):
# Create the shadow variables, and add ops to maintain moving averages
# of var0 and var1. This also creates an op that will update the moving
# averages after each training step. This is what we will use in place
# of the usual training op.
training_op = ema.apply([var0, var1])
...train the model by running training_op...
```
There are two ways to use the moving averages for evaluations:
* Build a model that uses the shadow variables instead of the variables.
For this, use the `average()` method which returns the shadow variable
for a given variable.
* Build a model normally but load the checkpoint files to evaluate by using
the shadow variable names. For this use the `average_name()` method. See
the `tf.compat.v1.train.Saver` for more
information on restoring saved variables.
Example of restoring the shadow variable values:
```python
# Create a Saver that loads variables from their saved shadow values.
shadow_var0_name = ema.average_name(var0)
shadow_var1_name = ema.average_name(var1)
saver = tf.compat.v1.train.Saver({shadow_var0_name: var0, shadow_var1_name:
var1})
saver.restore(...checkpoint filename...)
# var0 and var1 now hold the moving average values
```
"""
def __init__(self,
decay,
num_updates=None,
zero_debias=False,
name="ExponentialMovingAverage"):
"""Creates a new ExponentialMovingAverage object.
The `apply()` method has to be called to create shadow variables and add
ops to maintain moving averages.
The optional `num_updates` parameter allows one to tweak the decay rate
dynamically. It is typical to pass the count of training steps, usually
kept in a variable that is incremented at each step, in which case the
decay rate is lower at the start of training. This makes moving averages
move faster. If passed, the actual decay rate used is:
`min(decay, (1 + num_updates) / (10 + num_updates))`
Args:
decay: Float. The decay to use.
num_updates: Optional count of number of updates applied to variables.
zero_debias: If `True`, zero debias moving-averages that are initialized
with tensors.
name: String. Optional prefix name to use for the name of ops added in
`apply()`.
"""
self._decay = decay
self._num_updates = num_updates
self._zero_debias = zero_debias
self._name = name
self._averages = {}
@property
def name(self):
"""The name of this ExponentialMovingAverage object."""
return self._name
def apply(self, var_list=None):
"""Maintains moving averages of variables.
`var_list` must be a list of `Variable` or `Tensor` objects. This method
creates shadow variables for all elements of `var_list`. Shadow variables
for `Variable` objects are initialized to the variable's initial value.
They will be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.
For `Tensor` objects, the shadow variables are initialized to 0 and zero
debiased (see docstring in `assign_moving_average` for more details).
shadow variables are created with `trainable=False` and added to the
`GraphKeys.ALL_VARIABLES` collection. They will be returned by calls to
`tf.compat.v1.global_variables()`.
Returns an op that updates all shadow variables from the current value of
their associated variables.
Note that `apply()` can be called multiple times. When eager execution is
enabled each call to apply will update the variables once, so this needs to
be called in a loop.
Args:
var_list: A list of Variable or Tensor objects. The variables and Tensors
must be of types bfloat16, float16, float32, or float64.
Returns:
An Operation that updates the moving averages.
Raises:
TypeError: If the arguments are not an allowed type.
"""
# TODO(touts): op_scope
if var_list is None:
var_list = variables.trainable_variables()
for v in var_list:
if isinstance(v, ops.EagerTensor):
raise TypeError(
"tf.train.ExponentialMovingAverage does not support non-Variable"
" tensors when eager execution is enabled.")
zero_debias_true = set() # set of vars to set `zero_debias=True`
for var in var_list:
if var.dtype.base_dtype not in [
dtypes.bfloat16, dtypes.float16, dtypes.float32, dtypes.float64
]:
raise TypeError("The variables must be half, float, or double: %s" %
var.name)
if var not in self._averages:
# For variables: to lower communication bandwidth across devices we keep
# the moving averages on the same device as the variables. For other
# tensors, we rely on the existing device allocation mechanism.
with ops.init_scope():
if isinstance(var, variables.Variable):
avg = slot_creator.create_slot(
var,
var.initialized_value(),
self.name,
colocate_with_primary=True)
# NOTE(mrry): We only add `tf.Variable` objects to the
# `MOVING_AVERAGE_VARIABLES` collection.
ops.add_to_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, var)
else:
avg = slot_creator.create_zeros_slot(
var,
self.name,
colocate_with_primary=(var.op.type in [
"Variable", "VariableV2", "VarHandleOp"
]))
if self._zero_debias:
zero_debias_true.add(avg)
self._averages[var] = avg
with ops.name_scope(self.name) as scope:
decay = ops.convert_to_tensor(self._decay, name="decay")
if self._num_updates is not None:
num_updates = math_ops.cast(
self._num_updates, dtypes.float32, name="num_updates")
decay = math_ops.minimum(decay,
(1.0 + num_updates) / (10.0 + num_updates))
updates = []
for var in var_list:
zero_debias = self._averages[var] in zero_debias_true
updates.append(
assign_moving_average(
self._averages[var], var, decay, zero_debias=zero_debias))
return control_flow_ops.group(*updates, name=scope)
def average(self, var):
"""Returns the `Variable` holding the average of `var`.
Args:
var: A `Variable` object.
Returns:
A `Variable` object or `None` if the moving average of `var`
is not maintained.
"""
return self._averages.get(var, None)
def average_name(self, var):
"""Returns the name of the `Variable` holding the average for `var`.
The typical scenario for `ExponentialMovingAverage` is to compute moving
averages of variables during training, and restore the variables from the
computed moving averages during evaluations.
To restore variables, you have to know the name of the shadow variables.
That name and the original variable can then be passed to a `Saver()` object
to restore the variable from the moving average value with:
`saver = tf.compat.v1.train.Saver({ema.average_name(var): var})`
`average_name()` can be called whether or not `apply()` has been called.
Args:
var: A `Variable` object.
Returns:
A string: The name of the variable that will be used or was used
by the `ExponentialMovingAverage class` to hold the moving average of
`var`.
"""
if var in self._averages:
return self._averages[var].op.name
return ops.get_default_graph().unique_name(
var.op.name + "/" + self.name, mark_as_used=False)
def variables_to_restore(self, moving_avg_variables=None):
"""Returns a map of names to `Variables` to restore.
If a variable has a moving average, use the moving average variable name as
the restore name; otherwise, use the variable name.
For example,
```python
variables_to_restore = ema.variables_to_restore()
saver = tf.compat.v1.train.Saver(variables_to_restore)
```
Below is an example of such mapping:
```
conv/batchnorm/gamma/ExponentialMovingAverage: conv/batchnorm/gamma,
conv_4/conv2d_params/ExponentialMovingAverage: conv_4/conv2d_params,
global_step: global_step
```
Args:
moving_avg_variables: a list of variables that require to use of the
moving average variable name to be restored. If None, it will default to
variables.moving_average_variables() + variables.trainable_variables()
Returns:
A map from restore_names to variables. The restore_name is either the
original or the moving average version of the variable name, depending
on whether the variable name is in the `moving_avg_variables`.
"""
name_map = {}
if moving_avg_variables is None:
# Include trainable variables and variables which have been explicitly
# added to the moving_average_variables collection.
moving_avg_variables = variables.trainable_variables()
moving_avg_variables += variables.moving_average_variables()
# Remove duplicates
moving_avg_variables = set(moving_avg_variables)
# Collect all the variables with moving average,
for v in moving_avg_variables:
name_map[self.average_name(v)] = v
# Make sure we restore variables without moving averages as well.
moving_avg_variable_names = set([v.name for v in moving_avg_variables])
for v in list(set(variables.global_variables())):
if v.name not in moving_avg_variable_names and v.op.name not in name_map:
name_map[v.op.name] = v
return name_map
|
tensorflow-master
|
tensorflow/python/training/moving_averages.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""One-line documentation for rmsprop module.
rmsprop algorithm [tieleman2012rmsprop]
A detailed description of rmsprop.
- maintain a moving (discounted) average of the square of gradients
- divide gradient by the root of this average
mean_square = decay * mean_square{t-1} + (1-decay) * gradient ** 2
mom = momentum * mom{t-1} + learning_rate * g_t / sqrt(mean_square + epsilon)
delta = - mom
This implementation of RMSProp uses plain momentum, not Nesterov momentum.
The centered version additionally maintains a moving (discounted) average of the
gradients, and uses that average to estimate the variance:
mean_grad = decay * mean_square{t-1} + (1-decay) * gradient
mean_square = decay * mean_square{t-1} + (1-decay) * gradient ** 2
mom = momentum * mom{t-1} + learning_rate * g_t /
sqrt(mean_square - mean_grad**2 + epsilon)
delta = - mom
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.RMSPropOptimizer"])
class RMSPropOptimizer(optimizer.Optimizer):
"""Optimizer that implements the RMSProp algorithm.
See the
[paper](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf).
"""
def __init__(self,
learning_rate,
decay=0.9,
momentum=0.0,
epsilon=1e-10,
use_locking=False,
centered=False,
name="RMSProp"):
"""Construct a new RMSProp optimizer.
Note that in the dense implementation of this algorithm, variables and their
corresponding accumulators (momentum, gradient moving average, square
gradient moving average) will be updated even if the gradient is zero
(i.e. accumulators will decay, momentum will be applied). The sparse
implementation (used when the gradient is an `IndexedSlices` object,
typically because of `tf.gather` or an embedding lookup in the forward pass)
will not update variable slices or their accumulators unless those slices
were used in the forward pass (nor is there an "eventual" correction to
account for these omitted updates). This leads to more efficient updates for
large embedding lookup tables (where most of the slices are not accessed in
a particular graph execution), but differs from the published algorithm.
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
decay: Discounting factor for the history/coming gradient
momentum: A scalar tensor.
epsilon: Small value to avoid zero denominator.
use_locking: If True use locks for update operation.
centered: If True, gradients are normalized by the estimated variance of
the gradient; if False, by the uncentered second moment. Setting this to
True may help with training, but is slightly more expensive in terms of
computation and memory. Defaults to False.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "RMSProp".
@compatibility(eager)
When eager execution is enabled, `learning_rate`, `decay`, `momentum`, and
`epsilon` can each be a callable that takes no arguments and returns the
actual value to use. This can be useful for changing these values across
different invocations of optimizer functions.
@end_compatibility
"""
super(RMSPropOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._decay = decay
self._momentum = momentum
self._epsilon = epsilon
self._centered = centered
# Tensors for learning rate and momentum. Created in _prepare.
self._learning_rate_tensor = None
self._decay_tensor = None
self._momentum_tensor = None
self._epsilon_tensor = None
def _create_slots(self, var_list):
for v in var_list:
if v.get_shape().is_fully_defined():
init_rms = init_ops.ones_initializer(dtype=v.dtype.base_dtype)
else:
init_rms = array_ops.ones_like(v)
self._get_or_make_slot_with_initializer(v, init_rms, v.get_shape(),
v.dtype.base_dtype, "rms",
self._name)
if self._centered:
self._zeros_slot(v, "mg", self._name)
self._zeros_slot(v, "momentum", self._name)
def _prepare(self):
lr = self._call_if_callable(self._learning_rate)
decay = self._call_if_callable(self._decay)
momentum = self._call_if_callable(self._momentum)
epsilon = self._call_if_callable(self._epsilon)
self._learning_rate_tensor = ops.convert_to_tensor(lr, name="learning_rate")
self._decay_tensor = ops.convert_to_tensor(decay, name="decay")
self._momentum_tensor = ops.convert_to_tensor(momentum, name="momentum")
self._epsilon_tensor = ops.convert_to_tensor(epsilon, name="epsilon")
def _apply_dense(self, grad, var):
rms = self.get_slot(var, "rms")
mom = self.get_slot(var, "momentum")
if self._centered:
mg = self.get_slot(var, "mg")
return training_ops.apply_centered_rms_prop(
var,
mg,
rms,
mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._decay_tensor, var.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
else:
return training_ops.apply_rms_prop(
var,
rms,
mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._decay_tensor, var.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
rms = self.get_slot(var, "rms")
mom = self.get_slot(var, "momentum")
if self._centered:
mg = self.get_slot(var, "mg")
return training_ops.resource_apply_centered_rms_prop(
var.handle,
mg.handle,
rms.handle,
mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
math_ops.cast(self._decay_tensor, grad.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking)
else:
return training_ops.resource_apply_rms_prop(
var.handle,
rms.handle,
mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
math_ops.cast(self._decay_tensor, grad.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
rms = self.get_slot(var, "rms")
mom = self.get_slot(var, "momentum")
if self._centered:
mg = self.get_slot(var, "mg")
return training_ops.sparse_apply_centered_rms_prop(
var,
mg,
rms,
mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._decay_tensor, var.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype),
grad.values,
grad.indices,
use_locking=self._use_locking)
else:
return training_ops.sparse_apply_rms_prop(
var,
rms,
mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._decay_tensor, var.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype),
grad.values,
grad.indices,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
rms = self.get_slot(var, "rms")
mom = self.get_slot(var, "momentum")
if self._centered:
mg = self.get_slot(var, "mg")
return training_ops.resource_sparse_apply_centered_rms_prop(
var.handle,
mg.handle,
rms.handle,
mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._decay_tensor, grad.dtype),
math_ops.cast(self._momentum_tensor, grad.dtype),
math_ops.cast(self._epsilon_tensor, grad.dtype),
grad,
indices,
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_rms_prop(
var.handle,
rms.handle,
mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._decay_tensor, grad.dtype),
math_ops.cast(self._momentum_tensor, grad.dtype),
math_ops.cast(self._epsilon_tensor, grad.dtype),
grad,
indices,
use_locking=self._use_locking)
|
tensorflow-master
|
tensorflow/python/training/rmsprop.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Coordinator to help multiple threads stop when requested."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import sys
import threading
import time
import six
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.Coordinator")
class Coordinator(object):
"""A coordinator for threads.
This class implements a simple mechanism to coordinate the termination of a
set of threads.
#### Usage:
```python
# Create a coordinator.
coord = Coordinator()
# Start a number of threads, passing the coordinator to each of them.
...start thread 1...(coord, ...)
...start thread N...(coord, ...)
# Wait for all the threads to terminate.
coord.join(threads)
```
Any of the threads can call `coord.request_stop()` to ask for all the threads
to stop. To cooperate with the requests, each thread must check for
`coord.should_stop()` on a regular basis. `coord.should_stop()` returns
`True` as soon as `coord.request_stop()` has been called.
A typical thread running with a coordinator will do something like:
```python
while not coord.should_stop():
...do some work...
```
#### Exception handling:
A thread can report an exception to the coordinator as part of the
`request_stop()` call. The exception will be re-raised from the
`coord.join()` call.
Thread code:
```python
try:
while not coord.should_stop():
...do some work...
except Exception as e:
coord.request_stop(e)
```
Main code:
```python
try:
...
coord = Coordinator()
# Start a number of threads, passing the coordinator to each of them.
...start thread 1...(coord, ...)
...start thread N...(coord, ...)
# Wait for all the threads to terminate.
coord.join(threads)
except Exception as e:
...exception that was passed to coord.request_stop()
```
To simplify the thread implementation, the Coordinator provides a
context handler `stop_on_exception()` that automatically requests a stop if
an exception is raised. Using the context handler the thread code above
can be written as:
```python
with coord.stop_on_exception():
while not coord.should_stop():
...do some work...
```
#### Grace period for stopping:
After a thread has called `coord.request_stop()` the other threads have a
fixed time to stop, this is called the 'stop grace period' and defaults to 2
minutes. If any of the threads is still alive after the grace period expires
`coord.join()` raises a RuntimeError reporting the laggards.
```python
try:
...
coord = Coordinator()
# Start a number of threads, passing the coordinator to each of them.
...start thread 1...(coord, ...)
...start thread N...(coord, ...)
# Wait for all the threads to terminate, give them 10s grace period
coord.join(threads, stop_grace_period_secs=10)
except RuntimeError:
...one of the threads took more than 10s to stop after request_stop()
...was called.
except Exception:
...exception that was passed to coord.request_stop()
```
"""
def __init__(self, clean_stop_exception_types=None):
"""Create a new Coordinator.
Args:
clean_stop_exception_types: Optional tuple of Exception types that should
cause a clean stop of the coordinator. If an exception of one of these
types is reported to `request_stop(ex)` the coordinator will behave as
if `request_stop(None)` was called. Defaults to
`(tf.errors.OutOfRangeError,)` which is used by input queues to signal
the end of input. When feeding training data from a Python iterator it
is common to add `StopIteration` to this list.
"""
if clean_stop_exception_types is None:
clean_stop_exception_types = (errors.OutOfRangeError,)
self._clean_stop_exception_types = tuple(clean_stop_exception_types)
# Protects all attributes.
self._lock = threading.Lock()
# Event set when threads must stop.
self._stop_event = threading.Event()
# Python exc_info to report.
# If not None, it should hold the returned value of sys.exc_info(), which is
# a tuple containing exception (type, value, traceback).
self._exc_info_to_raise = None
# True if we have called join() already.
self._joined = False
# Set of threads registered for joining when join() is called. These
# threads will be joined in addition to the threads passed to the join()
# call. It's ok if threads are both registered and passed to the join()
# call.
self._registered_threads = set()
def _filter_exception(self, ex):
"""Check if the exception indicated in 'ex' should be ignored.
This method examines `ex` to check if it is an exception that should be
reported to the users. If yes, it returns `ex` as is, otherwise it returns
None.
The code returns None for exception types listed in
`_clean_stop_exception_types`.
Args:
ex: None, an `Exception`, or a Python `exc_info` tuple as returned by
`sys.exc_info()`.
Returns:
ex or None.
"""
if isinstance(ex, tuple):
ex2 = ex[1]
else:
ex2 = ex
if isinstance(ex2, self._clean_stop_exception_types):
# Ignore the exception.
ex = None
return ex
def request_stop(self, ex=None):
"""Request that the threads stop.
After this is called, calls to `should_stop()` will return `True`.
Note: If an exception is being passed in, in must be in the context of
handling the exception (i.e. `try: ... except Exception as ex: ...`) and not
a newly created one.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
with self._lock:
ex = self._filter_exception(ex)
# If we have already joined the coordinator the exception will not have a
# chance to be reported, so just raise it normally. This can happen if
# you continue to use a session have having stopped and joined the
# coordinator threads.
if self._joined:
if isinstance(ex, tuple):
six.reraise(*ex)
elif ex is not None:
# NOTE(touts): This is bogus if request_stop() is not called
# from the exception handler that raised ex.
six.reraise(*sys.exc_info())
if not self._stop_event.is_set():
if ex and self._exc_info_to_raise is None:
if isinstance(ex, tuple):
logging.info("Error reported to Coordinator: %s",
compat.as_str_any(ex[1]),
exc_info=ex)
self._exc_info_to_raise = ex
else:
logging.info("Error reported to Coordinator: %s, %s",
type(ex),
compat.as_str_any(ex))
self._exc_info_to_raise = sys.exc_info()
# self._exc_info_to_raise should contain a tuple containing exception
# (type, value, traceback)
if (len(self._exc_info_to_raise) != 3 or
not self._exc_info_to_raise[0] or
not self._exc_info_to_raise[1]):
# Raise, catch and record the exception here so that error happens
# where expected.
try:
raise ValueError(
"ex must be a tuple or sys.exc_info must return the current "
"exception: %s"
% self._exc_info_to_raise)
except ValueError:
# Record this error so it kills the coordinator properly.
# NOTE(touts): As above, this is bogus if request_stop() is not
# called from the exception handler that raised ex.
self._exc_info_to_raise = sys.exc_info()
self._stop_event.set()
def clear_stop(self):
"""Clears the stop flag.
After this is called, calls to `should_stop()` will return `False`.
"""
with self._lock:
self._joined = False
self._exc_info_to_raise = None
if self._stop_event.is_set():
self._stop_event.clear()
def should_stop(self):
"""Check if stop was requested.
Returns:
True if a stop was requested.
"""
return self._stop_event.is_set()
@contextlib.contextmanager
def stop_on_exception(self):
"""Context manager to request stop when an Exception is raised.
Code that uses a coordinator must catch exceptions and pass
them to the `request_stop()` method to stop the other threads
managed by the coordinator.
This context handler simplifies the exception handling.
Use it as follows:
```python
with coord.stop_on_exception():
# Any exception raised in the body of the with
# clause is reported to the coordinator before terminating
# the execution of the body.
...body...
```
This is completely equivalent to the slightly longer code:
```python
try:
...body...
except:
coord.request_stop(sys.exc_info())
```
Yields:
nothing.
"""
try:
yield
except: # pylint: disable=bare-except
self.request_stop(ex=sys.exc_info())
def wait_for_stop(self, timeout=None):
"""Wait till the Coordinator is told to stop.
Args:
timeout: Float. Sleep for up to that many seconds waiting for
should_stop() to become True.
Returns:
True if the Coordinator is told stop, False if the timeout expired.
"""
return self._stop_event.wait(timeout)
def register_thread(self, thread):
"""Register a thread to join.
Args:
thread: A Python thread to join.
"""
with self._lock:
self._registered_threads.add(thread)
def join(self, threads=None, stop_grace_period_secs=120,
ignore_live_threads=False):
"""Wait for threads to terminate.
This call blocks until a set of threads have terminated. The set of thread
is the union of the threads passed in the `threads` argument and the list
of threads that registered with the coordinator by calling
`Coordinator.register_thread()`.
After the threads stop, if an `exc_info` was passed to `request_stop`, that
exception is re-raised.
Grace period handling: When `request_stop()` is called, threads are given
'stop_grace_period_secs' seconds to terminate. If any of them is still
alive after that period expires, a `RuntimeError` is raised. Note that if
an `exc_info` was passed to `request_stop()` then it is raised instead of
that `RuntimeError`.
Args:
threads: List of `threading.Threads`. The started threads to join in
addition to the registered threads.
stop_grace_period_secs: Number of seconds given to threads to stop after
`request_stop()` has been called.
ignore_live_threads: If `False`, raises an error if any of the threads are
still alive after `stop_grace_period_secs`.
Raises:
RuntimeError: If any thread is still alive after `request_stop()`
is called and the grace period expires.
"""
# Threads registered after this call will not be joined.
with self._lock:
if threads is None:
threads = self._registered_threads
else:
threads = self._registered_threads.union(set(threads))
# Copy the set into a list to avoid race conditions where a new thread
# is added while we are waiting.
threads = list(threads)
# Wait for all threads to stop or for request_stop() to be called.
while any(t.is_alive() for t in threads) and not self.wait_for_stop(1.0):
pass
# If any thread is still alive, wait for the grace period to expire.
# By the time this check is executed, threads may still be shutting down,
# so we add a sleep of increasing duration to give them a chance to shut
# down without losing too many cycles.
# The sleep duration is limited to the remaining grace duration.
stop_wait_secs = 0.001
while any(t.is_alive() for t in threads) and stop_grace_period_secs >= 0.0:
time.sleep(stop_wait_secs)
stop_grace_period_secs -= stop_wait_secs
stop_wait_secs = 2 * stop_wait_secs
# Keep the waiting period within sane bounds.
# The minimum value is to avoid decreasing stop_wait_secs to a value
# that could cause stop_grace_period_secs to remain unchanged.
stop_wait_secs = max(min(stop_wait_secs, stop_grace_period_secs), 0.001)
# List the threads still alive after the grace period.
stragglers = [t.name for t in threads if t.is_alive()]
# Terminate with an exception if appropriate.
with self._lock:
self._joined = True
self._registered_threads = set()
if self._exc_info_to_raise:
six.reraise(*self._exc_info_to_raise)
elif stragglers:
if ignore_live_threads:
logging.info("Coordinator stopped with threads still running: %s",
" ".join(stragglers))
else:
raise RuntimeError(
"Coordinator stopped with threads still running: %s" %
" ".join(stragglers))
@property
def joined(self):
return self._joined
def raise_requested_exception(self):
"""If an exception has been passed to `request_stop`, this raises it."""
with self._lock:
if self._exc_info_to_raise:
six.reraise(*self._exc_info_to_raise)
# Threads for the standard services.
@tf_export(v1=["train.LooperThread"])
class LooperThread(threading.Thread):
"""A thread that runs code repeatedly, optionally on a timer.
This thread class is intended to be used with a `Coordinator`. It repeatedly
runs code specified either as `target` and `args` or by the `run_loop()`
method.
Before each run the thread checks if the coordinator has requested stop. In
that case the looper thread terminates immediately.
If the code being run raises an exception, that exception is reported to the
coordinator and the thread terminates. The coordinator will then request all
the other threads it coordinates to stop.
You typically pass looper threads to the supervisor `Join()` method.
"""
def __init__(self, coord, timer_interval_secs, target=None, args=None,
kwargs=None):
"""Create a LooperThread.
Args:
coord: A Coordinator.
timer_interval_secs: Time boundaries at which to call Run(), or None
if it should be called back to back.
target: Optional callable object that will be executed in the thread.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Raises:
ValueError: If one of the arguments is invalid.
"""
if not isinstance(coord, Coordinator):
raise ValueError("'coord' argument must be a Coordinator: %s" % coord)
super(LooperThread, self).__init__()
self.daemon = True
self._coord = coord
self._timer_interval_secs = timer_interval_secs
self._target = target
if self._target:
self._args = args or ()
self._kwargs = kwargs or {}
elif args or kwargs:
raise ValueError("'args' and 'kwargs' argument require that you also "
"pass 'target'")
self._coord.register_thread(self)
@staticmethod
def loop(coord, timer_interval_secs, target, args=None, kwargs=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(args)`
repeatedly. Otherwise `target(args)` is called every `timer_interval_secs`
seconds. The thread terminates when a stop of the coordinator is
requested.
Args:
coord: A Coordinator.
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = LooperThread(coord, timer_interval_secs, target=target, args=args,
kwargs=kwargs)
looper.start()
return looper
def run(self):
with self._coord.stop_on_exception():
self.start_loop()
if self._timer_interval_secs is None:
# Call back-to-back.
while not self._coord.should_stop():
self.run_loop()
else:
# Next time at which to call run_loop(), starts as 'now'.
next_timer_time = time.time()
while not self._coord.wait_for_stop(next_timer_time - time.time()):
next_timer_time += self._timer_interval_secs
self.run_loop()
self.stop_loop()
def start_loop(self):
"""Called when the thread starts."""
pass
def stop_loop(self):
"""Called when the thread stops."""
pass
def run_loop(self):
"""Called at 'timer_interval_secs' boundaries."""
if self._target:
self._target(*self._args, **self._kwargs)
|
tensorflow-master
|
tensorflow/python/training/coordinator.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Standard functions for creating slots.
A slot is a `Variable` created with the same shape as a primary variable or
`Tensor`. A slot is always scoped in the namespace of the primary object and
typically has the same device and type.
Slots are typically used as accumulators to track values associated with
the primary object:
```python
# Optimizers can create a slot for each variable to track accumulators
accumulators = {var : create_zeros_slot(var, "momentum") for var in vs}
for var in vs:
apply_momentum(var, accumulators[var], lr, grad, momentum_tensor)
# Slots can also be used for moving averages
mavg = create_slot(var, var.initialized_value(), "exponential_moving_avg")
update_mavg = mavg.assign_sub((mavg - var) * (1 - decay))
```
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
def _create_slot_var(primary, val, scope, validate_shape, shape, dtype):
"""Helper function for creating a slot variable."""
# TODO(lukaszkaiser): Consider allowing partitioners to be set in the current
# scope.
current_partitioner = variable_scope.get_variable_scope().partitioner
variable_scope.get_variable_scope().set_partitioner(None)
# When init from val instead of callable initializer, the shape is expected to
# be None, not <unknown> or any fully defined shape.
shape = shape if callable(val) else None
if resource_variable_ops.is_resource_variable(primary):
use_resource = True
elif isinstance(primary, variables.RefVariable):
use_resource = False
else:
use_resource = None
slot = variable_scope.get_variable(
scope,
initializer=val,
trainable=False,
use_resource=use_resource,
shape=shape,
dtype=dtype,
validate_shape=validate_shape)
variable_scope.get_variable_scope().set_partitioner(current_partitioner)
# pylint: disable=protected-access
if isinstance(primary, variables.Variable) and primary._save_slice_info:
# Primary is a partitioned variable, so we need to also indicate that
# the slot is a partitioned variable. Slots have the same partitioning
# as their primaries.
# For examples when using AdamOptimizer in linear model, slot.name
# here can be "linear//weights/Adam:0", while primary.op.name is
# "linear//weight". We want to get 'Adam' as real_slot_name, so we
# remove "'linear//weight' + '/'" and ':0'.
real_slot_name = slot.name[len(primary.op.name + "/"):-2]
slice_info = primary._save_slice_info
slot._set_save_slice_info(variables.Variable.SaveSliceInfo(
slice_info.full_name + "/" + real_slot_name,
slice_info.full_shape[:],
slice_info.var_offset[:],
slice_info.var_shape[:]))
# pylint: enable=protected-access
return slot
def create_slot(primary, val, name, colocate_with_primary=True):
"""Create a slot initialized to the given value.
The type of the slot is determined by the given value.
Args:
primary: The primary `Variable` or `Tensor`.
val: A `Tensor` specifying the initial value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
# Scope the slot name in the namespace of the primary variable.
# Set "primary.op.name + '/' + name" as default name, so the scope name of
# optimizer can be shared when reuse is True. Meanwhile when reuse is False
# and the same name has been previously used, the scope name will add '_N'
# as suffix for unique identifications.
validate_shape = val.get_shape().is_fully_defined()
if context.executing_eagerly():
prefix = primary._shared_name # pylint: disable=protected-access
else:
prefix = primary.op.name
with variable_scope.variable_scope(None, prefix + "/" + name):
if colocate_with_primary:
distribution_strategy = distribution_strategy_context.get_strategy()
with distribution_strategy.extended.colocate_vars_with(primary):
return _create_slot_var(primary, val, "", validate_shape, None, None)
else:
return _create_slot_var(primary, val, "", validate_shape, None, None)
def create_slot_with_initializer(primary, initializer, shape, dtype, name,
colocate_with_primary=True):
"""Creates a slot initialized using an `Initializer`.
The type of the slot is determined by the given value.
Args:
primary: The primary `Variable` or `Tensor`.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
# Scope the slot name in the namespace of the primary variable.
# Set "primary.op.name + '/' + name" as default name, so the scope name of
# optimizer can be shared when reuse is True. Meanwhile when reuse is False
# and the same name has been previously used, the scope name will add '_N'
# as suffix for unique identifications.
validate_shape = shape.is_fully_defined()
if context.executing_eagerly():
prefix = primary._shared_name # pylint: disable=protected-access
else:
prefix = primary.op.name
with variable_scope.variable_scope(None, prefix + "/" + name):
if colocate_with_primary:
distribution_strategy = distribution_strategy_context.get_strategy()
with distribution_strategy.extended.colocate_vars_with(primary):
return _create_slot_var(primary, initializer, "", validate_shape, shape,
dtype)
else:
return _create_slot_var(primary, initializer, "", validate_shape, shape,
dtype)
def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True):
"""Create a slot initialized to 0 with same shape as the primary object.
Args:
primary: The primary `Variable` or `Tensor`.
name: Name to use for the slot variable.
dtype: Type of the slot variable. Defaults to the type of `primary`.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
if dtype is None:
dtype = primary.dtype
slot_shape = primary.get_shape()
if slot_shape.is_fully_defined():
initializer = init_ops.zeros_initializer()
return create_slot_with_initializer(
primary, initializer, slot_shape, dtype, name,
colocate_with_primary=colocate_with_primary)
else:
if isinstance(primary, variables.Variable):
slot_shape = array_ops.shape(primary.initialized_value())
else:
slot_shape = array_ops.shape(primary)
val = array_ops.zeros(slot_shape, dtype=dtype)
return create_slot(primary, val, name,
colocate_with_primary=colocate_with_primary)
|
tensorflow-master
|
tensorflow/python/training/slot_creator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class SameVariablesNoClearTest(test.TestCase):
# Verifies behavior of multiple variables with multiple sessions connecting to
# the same server.
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
@test_util.run_v1_only("b/120545219")
def testSameVariablesNoClear(self):
server = server_lib.Server.create_local_server()
with session.Session(server.target) as sess_1:
v0 = variables.VariableV1([[2, 1]], name="v0")
v1 = variables.VariableV1([[1], [2]], name="v1")
v2 = math_ops.matmul(v0, v1)
sess_1.run([v0.initializer, v1.initializer])
self.assertAllEqual([[4]], sess_1.run(v2))
with session.Session(server.target) as sess_2:
new_v0 = ops.get_default_graph().get_tensor_by_name("v0:0")
new_v1 = ops.get_default_graph().get_tensor_by_name("v1:0")
new_v2 = math_ops.matmul(new_v0, new_v1)
self.assertAllEqual([[4]], sess_2.run(new_v2))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/server_lib_same_variables_no_clear_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class OptimizerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testBasic(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
def loss():
return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
# Note that for eager execution, minimize expects a function instead of a
# Tensor.
global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int64), name='global_step_%d' % i)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd_op.minimize(loss, global_step, [var0, var1])
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_deprecated_v1
def testAggregationMethod(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(
cost,
global_step, [var0, var1],
aggregation_method=gradients_util.AggregationMethod.
EXPERIMENTAL_ACCUMULATE_N)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_deprecated_v1
def testPrecomputedGradient(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
grad_loss = constant_op.constant([42, -42], dtype=dtype)
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(
cost, global_step, [var0, var1], grad_loss=grad_loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)],
self.evaluate(var0))
self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testNoVariables(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# pylint: disable=cell-var-from-loop
def loss():
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype, trainable=False, name='a')
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtype, trainable=False, name='b')
return 5 * var0 + var1
# pylint: enable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError, 'No.*variables'):
sgd_op.minimize(loss)
@test_util.run_in_graph_and_eager_modes
def testNoGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b%d' % i)
# pylint: disable=cell-var-from-loop
def loss():
return 5 * var0
# pylint: enable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError, 'No gradients'):
# var1 has no gradient
sgd_op.minimize(loss, var_list=[var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_Minimize(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
def loss():
return constant_op.constant(5.0)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError,
'No gradients provided for any variable'):
sgd_op.minimize(loss, var_list=[var0, var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_ApplyGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError,
'No gradients provided for any variable'):
sgd_op.apply_gradients([(None, var0), (None, var1)])
@test_util.run_in_graph_and_eager_modes
def testGradientsAsVariables(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b%d' % i)
def loss():
return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
grads_and_vars = sgd_op.compute_gradients(loss, [var0, var1])
# Convert gradients to tf.Variables
converted_grads = [
resource_variable_ops.ResourceVariable(array_ops.zeros([2], dtype),
name='c_%d_%d' % (i, j))
for j, gv in enumerate(grads_and_vars)
]
convert_ops = [
state_ops.assign(converted_grads[j], gv[0])
for j, gv in enumerate(grads_and_vars)
]
self.evaluate(variables.global_variables_initializer())
# Run convert_ops to achieve the gradietns converting
self.evaluate(convert_ops)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
converted_grads_and_vars = list(zip(converted_grads, [var0, var1]))
opt_op = sgd_op.apply_gradients(converted_grads_and_vars)
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testComputeGradientsWithTensors(self):
x = ops.convert_to_tensor(1.0)
def f():
return x * x
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
grads_and_vars = sgd_op.compute_gradients(f, [x])
self.assertEqual(1, len(grads_and_vars))
grad, x_as_var = grads_and_vars[0]
self.assertIs(x, x_as_var)
self.assertEqual(2.0, self.evaluate(grad))
with self.assertRaises(NotImplementedError):
sgd_op.apply_gradients(grads_and_vars)
@test_util.run_deprecated_v1
def testTrainOp(self):
with self.cached_session():
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([3.0, 4.0])
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
self.assertTrue(opt_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
@test_util.run_deprecated_v1
def testConstraint(self):
constraint_01 = lambda x: clip_ops.clip_by_value(x, -0.1, 0.)
constraint_0 = lambda x: clip_ops.clip_by_value(x, 0., 1.)
with self.cached_session():
var0 = variables.Variable([1.0, 2.0],
constraint=constraint_01)
var1 = variables.Variable([3.0, 4.0],
constraint=constraint_0)
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-0.1, -0.1], self.evaluate(var0))
self.assertAllClose([0., 0.], self.evaluate(var1))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/training/optimizer_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A SessionRunHook extends `session.run()` calls for the `MonitoredSession`.
SessionRunHooks are useful to track training, report progress, request early
stopping and more. SessionRunHooks use the observer pattern and notify at the
following points:
- when a session starts being used
- before a call to the `session.run()`
- after a call to the `session.run()`
- when the session closed
A SessionRunHook encapsulates a piece of reusable/composable computation that
can piggyback a call to `MonitoredSession.run()`. A hook can add any
ops-or-tensor/feeds to the run call, and when the run call finishes with success
gets the outputs it requested. Hooks are allowed to add ops to the graph in
`hook.begin()`. The graph is finalized after the `begin()` method is called.
There are a few pre-defined hooks:
- StopAtStepHook: Request stop based on global_step
- CheckpointSaverHook: saves checkpoint
- LoggingTensorHook: outputs one or more tensor values to log
- NanTensorHook: Request stop if given `Tensor` contains Nans.
- SummarySaverHook: saves summaries to a summary writer
For more specific needs, you can create custom hooks:
class ExampleHook(SessionRunHook):
def begin(self):
# You can add ops to the graph here.
print('Starting the session.')
self.your_tensor = ...
def after_create_session(self, session, coord):
# When this is called, the graph is finalized and
# ops can no longer be added to the graph.
print('Session created.')
def before_run(self, run_context):
print('Before calling session.run().')
return SessionRunArgs(self.your_tensor)
def after_run(self, run_context, run_values):
print('Done running one step. The value of my tensor: %s',
run_values.results)
if you-need-to-stop-loop:
run_context.request_stop()
def end(self, session):
print('Done with the session.')
To understand how hooks interact with calls to `MonitoredSession.run()`,
look at following code:
with MonitoredTrainingSession(hooks=your_hooks, ...) as sess:
while not sess.should_stop():
sess.run(your_fetches)
Above user code leads to following execution:
call hooks.begin()
sess = tf.compat.v1.Session()
call hooks.after_create_session()
while not stop is requested:
call hooks.before_run()
try:
results = sess.run(merged_fetches, feed_dict=merged_feeds)
except (errors.OutOfRangeError, StopIteration):
break
call hooks.after_run()
call hooks.end()
sess.close()
Note that if sess.run() raises OutOfRangeError or StopIteration then
hooks.after_run() will not be called but hooks.end() will still be called.
If sess.run() raises any other exception then neither hooks.after_run() nor
hooks.end() will be called.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.SessionRunHook"])
class SessionRunHook(object):
"""Hook to extend calls to MonitoredSession.run()."""
def begin(self):
"""Called once before using the session.
When called, the default graph is the one that will be launched in the
session. The hook can modify the graph by adding new operations to it.
After the `begin()` call the graph will be finalized and the other callbacks
can not modify the graph anymore. Second call of `begin()` on the same
graph, should not change the graph.
"""
pass
def after_create_session(self, session, coord): # pylint: disable=unused-argument
"""Called when new TensorFlow session is created.
This is called to signal the hooks that a new session has been created. This
has two essential differences with the situation in which `begin` is called:
* When this is called, the graph is finalized and ops can no longer be added
to the graph.
* This method will also be called as a result of recovering a wrapped
session, not only at the beginning of the overall session.
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
pass
def before_run(self, run_context): # pylint: disable=unused-argument
"""Called before each call to run().
You can return from this call a `SessionRunArgs` object indicating ops or
tensors to add to the upcoming `run()` call. These ops/tensors will be run
together with the ops/tensors originally passed to the original run() call.
The run args you return can also contain feeds to be added to the run()
call.
The `run_context` argument is a `SessionRunContext` that provides
information about the upcoming `run()` call: the originally requested
op/tensors, the TensorFlow Session.
At this point graph is finalized and you can not add ops.
Args:
run_context: A `SessionRunContext` object.
Returns:
None or a `SessionRunArgs` object.
"""
return None
def after_run(self,
run_context, # pylint: disable=unused-argument
run_values): # pylint: disable=unused-argument
"""Called after each call to run().
The `run_values` argument contains results of requested ops/tensors by
`before_run()`.
The `run_context` argument is the same one send to `before_run` call.
`run_context.request_stop()` can be called to stop the iteration.
If `session.run()` raises any exceptions then `after_run()` is not called.
Args:
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
pass
def end(self, session): # pylint: disable=unused-argument
"""Called at the end of session.
The `session` argument can be used in case the hook wants to run final ops,
such as saving a last checkpoint.
If `session.run()` raises exception other than OutOfRangeError or
StopIteration then `end()` is not called.
Note the difference between `end()` and `after_run()` behavior when
`session.run()` raises OutOfRangeError or StopIteration. In that case
`end()` is called but `after_run()` is not called.
Args:
session: A TensorFlow Session that will be soon closed.
"""
pass
@tf_export(v1=["train.SessionRunArgs"])
class SessionRunArgs(
collections.namedtuple("SessionRunArgs",
["fetches", "feed_dict", "options"])):
"""Represents arguments to be added to a `Session.run()` call.
Args:
fetches: Exactly like the 'fetches' argument to Session.Run().
Can be a single tensor or op, a list of 'fetches' or a dictionary
of fetches. For example:
fetches = global_step_tensor
fetches = [train_op, summary_op, global_step_tensor]
fetches = {'step': global_step_tensor, 'summ': summary_op}
Note that this can recurse as expected:
fetches = {'step': global_step_tensor,
'ops': [train_op, check_nan_op]}
feed_dict: Exactly like the `feed_dict` argument to `Session.Run()`
options: Exactly like the `options` argument to `Session.run()`, i.e., a
config_pb2.RunOptions proto.
"""
def __new__(cls, fetches, feed_dict=None, options=None):
return super(SessionRunArgs, cls).__new__(cls, fetches, feed_dict, options)
@tf_export(v1=["train.SessionRunContext"])
class SessionRunContext(object):
"""Provides information about the `session.run()` call being made.
Provides information about original request to `Session.Run()` function.
SessionRunHook objects can stop the loop by calling `request_stop()` of
`run_context`. In the future we may use this object to add more information
about run without changing the Hook API.
"""
def __init__(self, original_args, session):
"""Initializes SessionRunContext."""
self._original_args = original_args
self._session = session
self._stop_requested = False
@property
def original_args(self):
"""A `SessionRunArgs` object holding the original arguments of `run()`.
If user called `MonitoredSession.run(fetches=a, feed_dict=b)`, then this
field is equal to SessionRunArgs(a, b).
Returns:
A `SessionRunArgs` object
"""
return self._original_args
@property
def session(self):
"""A TensorFlow session object which will execute the `run`."""
return self._session
@property
def stop_requested(self):
"""Returns whether a stop is requested or not.
If true, `MonitoredSession` stops iterations.
Returns:
A `bool`
"""
return self._stop_requested
def request_stop(self):
"""Sets stop requested field.
Hooks can use this function to request stop of iterations.
`MonitoredSession` checks whether this is called or not.
"""
self._stop_requested = True
@tf_export(v1=["train.SessionRunValues"])
class SessionRunValues(
collections.namedtuple("SessionRunValues",
["results", "options", "run_metadata"])):
"""Contains the results of `Session.run()`.
In the future we may use this object to add more information about result of
run without changing the Hook API.
Args:
results: The return values from `Session.run()` corresponding to the fetches
attribute returned in the RunArgs. Note that this has the same shape as
the RunArgs fetches. For example:
fetches = global_step_tensor
=> results = nparray(int)
fetches = [train_op, summary_op, global_step_tensor]
=> results = [None, nparray(string), nparray(int)]
fetches = {'step': global_step_tensor, 'summ': summary_op}
=> results = {'step': nparray(int), 'summ': nparray(string)}
options: `RunOptions` from the `Session.run()` call.
run_metadata: `RunMetadata` from the `Session.run()` call.
"""
|
tensorflow-master
|
tensorflow/python/training/session_run_hook.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create threads to run multiple enqueue ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import weakref
from tensorflow.core.protobuf import queue_runner_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
_DEPRECATION_INSTRUCTION = (
"To construct input pipelines, use the `tf.data` module.")
@tf_export(v1=["train.queue_runner.QueueRunner", "train.QueueRunner"])
class QueueRunner(object):
"""Holds a list of enqueue operations for a queue, each to be run in a thread.
Queues are a convenient TensorFlow mechanism to compute tensors
asynchronously using multiple threads. For example in the canonical 'Input
Reader' setup one set of threads generates filenames in a queue; a second set
of threads read records from the files, processes them, and enqueues tensors
on a second queue; a third set of threads dequeues these input records to
construct batches and runs them through training operations.
There are several delicate issues when running multiple threads that way:
closing the queues in sequence as the input is exhausted, correctly catching
and reporting exceptions, etc.
The `QueueRunner`, combined with the `Coordinator`, helps handle these issues.
@compatibility(eager)
QueueRunners are not compatible with eager execution. Instead, please
use `tf.data` to get data into your model.
@end_compatibility
"""
@deprecation.deprecated(None, _DEPRECATION_INSTRUCTION)
def __init__(self, queue=None, enqueue_ops=None, close_op=None,
cancel_op=None, queue_closed_exception_types=None,
queue_runner_def=None, import_scope=None):
"""Create a QueueRunner.
On construction the `QueueRunner` adds an op to close the queue. That op
will be run if the enqueue ops raise exceptions.
When you later call the `create_threads()` method, the `QueueRunner` will
create one thread for each op in `enqueue_ops`. Each thread will run its
enqueue op in parallel with the other threads. The enqueue ops do not have
to all be the same op, but it is expected that they all enqueue tensors in
`queue`.
Args:
queue: A `Queue`.
enqueue_ops: List of enqueue ops to run in threads later.
close_op: Op to close the queue. Pending enqueue ops are preserved.
cancel_op: Op to close the queue and cancel pending enqueue ops.
queue_closed_exception_types: Optional tuple of Exception types that
indicate that the queue has been closed when raised during an enqueue
operation. Defaults to `(tf.errors.OutOfRangeError,)`. Another common
case includes `(tf.errors.OutOfRangeError, tf.errors.CancelledError)`,
when some of the enqueue ops may dequeue from other Queues.
queue_runner_def: Optional `QueueRunnerDef` protocol buffer. If specified,
recreates the QueueRunner from its contents. `queue_runner_def` and the
other arguments are mutually exclusive.
import_scope: Optional `string`. Name scope to add. Only used when
initializing from protocol buffer.
Raises:
ValueError: If both `queue_runner_def` and `queue` are both specified.
ValueError: If `queue` or `enqueue_ops` are not provided when not
restoring from `queue_runner_def`.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError(
"QueueRunners are not supported when eager execution is enabled. "
"Instead, please use tf.data to get data into your model.")
if queue_runner_def:
if queue or enqueue_ops:
raise ValueError("queue_runner_def and queue are mutually exclusive.")
self._init_from_proto(queue_runner_def,
import_scope=import_scope)
else:
self._init_from_args(
queue=queue, enqueue_ops=enqueue_ops,
close_op=close_op, cancel_op=cancel_op,
queue_closed_exception_types=queue_closed_exception_types)
# Protect the count of runs to wait for.
self._lock = threading.Lock()
# A map from a session object to the number of outstanding queue runner
# threads for that session.
self._runs_per_session = weakref.WeakKeyDictionary()
# List of exceptions raised by the running threads.
self._exceptions_raised = []
def _init_from_args(self, queue=None, enqueue_ops=None, close_op=None,
cancel_op=None, queue_closed_exception_types=None):
"""Create a QueueRunner from arguments.
Args:
queue: A `Queue`.
enqueue_ops: List of enqueue ops to run in threads later.
close_op: Op to close the queue. Pending enqueue ops are preserved.
cancel_op: Op to close the queue and cancel pending enqueue ops.
queue_closed_exception_types: Tuple of exception types, which indicate
the queue has been safely closed.
Raises:
ValueError: If `queue` or `enqueue_ops` are not provided when not
restoring from `queue_runner_def`.
TypeError: If `queue_closed_exception_types` is provided, but is not
a non-empty tuple of error types (subclasses of `tf.errors.OpError`).
"""
if not queue or not enqueue_ops:
raise ValueError("Must provide queue and enqueue_ops.")
self._queue = queue
self._enqueue_ops = enqueue_ops
self._close_op = close_op
self._cancel_op = cancel_op
if queue_closed_exception_types is not None:
if (not isinstance(queue_closed_exception_types, tuple)
or not queue_closed_exception_types
or not all(issubclass(t, errors.OpError)
for t in queue_closed_exception_types)):
raise TypeError(
"queue_closed_exception_types, when provided, "
"must be a tuple of tf.error types, but saw: %s"
% queue_closed_exception_types)
self._queue_closed_exception_types = queue_closed_exception_types
# Close when no more will be produced, but pending enqueues should be
# preserved.
if self._close_op is None:
self._close_op = self._queue.close()
# Close and cancel pending enqueues since there was an error and we want
# to unblock everything so we can cleanly exit.
if self._cancel_op is None:
self._cancel_op = self._queue.close(cancel_pending_enqueues=True)
if not self._queue_closed_exception_types:
self._queue_closed_exception_types = (errors.OutOfRangeError,)
else:
self._queue_closed_exception_types = tuple(
self._queue_closed_exception_types)
def _init_from_proto(self, queue_runner_def, import_scope=None):
"""Create a QueueRunner from `QueueRunnerDef`.
Args:
queue_runner_def: Optional `QueueRunnerDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(queue_runner_def, queue_runner_pb2.QueueRunnerDef)
g = ops.get_default_graph()
self._queue = g.as_graph_element(
ops.prepend_name_scope(queue_runner_def.queue_name, import_scope))
self._enqueue_ops = [g.as_graph_element(
ops.prepend_name_scope(op, import_scope))
for op in queue_runner_def.enqueue_op_name]
self._close_op = g.as_graph_element(ops.prepend_name_scope(
queue_runner_def.close_op_name, import_scope))
self._cancel_op = g.as_graph_element(ops.prepend_name_scope(
queue_runner_def.cancel_op_name, import_scope))
self._queue_closed_exception_types = tuple(
errors.exception_type_from_error_code(code)
for code in queue_runner_def.queue_closed_exception_types)
# Legacy support for old QueueRunnerDefs created before this field
# was added.
if not self._queue_closed_exception_types:
self._queue_closed_exception_types = (errors.OutOfRangeError,)
@property
def queue(self):
return self._queue
@property
def enqueue_ops(self):
return self._enqueue_ops
@property
def close_op(self):
return self._close_op
@property
def cancel_op(self):
return self._cancel_op
@property
def queue_closed_exception_types(self):
return self._queue_closed_exception_types
@property
def exceptions_raised(self):
"""Exceptions raised but not handled by the `QueueRunner` threads.
Exceptions raised in queue runner threads are handled in one of two ways
depending on whether or not a `Coordinator` was passed to
`create_threads()`:
* With a `Coordinator`, exceptions are reported to the coordinator and
forgotten by the `QueueRunner`.
* Without a `Coordinator`, exceptions are captured by the `QueueRunner` and
made available in this `exceptions_raised` property.
Returns:
A list of Python `Exception` objects. The list is empty if no exception
was captured. (No exceptions are captured when using a Coordinator.)
"""
return self._exceptions_raised
@property
def name(self):
"""The string name of the underlying Queue."""
return self._queue.name
# pylint: disable=broad-except
def _run(self, sess, enqueue_op, coord=None):
"""Execute the enqueue op in a loop, close the queue in case of error.
Args:
sess: A Session.
enqueue_op: The Operation to run.
coord: Optional Coordinator object for reporting errors and checking
for stop conditions.
"""
decremented = False
try:
# Make a cached callable from the `enqueue_op` to decrease the
# Python overhead in the queue-runner loop.
enqueue_callable = sess.make_callable(enqueue_op)
while True:
if coord and coord.should_stop():
break
try:
enqueue_callable()
except self._queue_closed_exception_types: # pylint: disable=catching-non-exception
# This exception indicates that a queue was closed.
with self._lock:
self._runs_per_session[sess] -= 1
decremented = True
if self._runs_per_session[sess] == 0:
try:
sess.run(self._close_op)
except Exception as e:
# Intentionally ignore errors from close_op.
logging.vlog(1, "Ignored exception: %s", str(e))
return
except Exception as e:
# This catches all other exceptions.
if coord:
coord.request_stop(e)
else:
logging.error("Exception in QueueRunner: %s", str(e))
with self._lock:
self._exceptions_raised.append(e)
raise
finally:
# Make sure we account for all terminations: normal or errors.
if not decremented:
with self._lock:
self._runs_per_session[sess] -= 1
def _close_on_stop(self, sess, cancel_op, coord):
"""Close the queue when the Coordinator requests stop.
Args:
sess: A Session.
cancel_op: The Operation to run.
coord: Coordinator.
"""
coord.wait_for_stop()
try:
sess.run(cancel_op)
except Exception as e:
# Intentionally ignore errors from cancel_op.
logging.vlog(1, "Ignored exception: %s", str(e))
# pylint: enable=broad-except
def create_threads(self, sess, coord=None, daemon=False, start=False):
"""Create threads to run the enqueue ops for the given session.
This method requires a session in which the graph was launched. It creates
a list of threads, optionally starting them. There is one thread for each
op passed in `enqueue_ops`.
The `coord` argument is an optional coordinator that the threads will use
to terminate together and report exceptions. If a coordinator is given,
this method starts an additional thread to close the queue when the
coordinator requests a stop.
If previously created threads for the given session are still running, no
new threads will be created.
Args:
sess: A `Session`.
coord: Optional `Coordinator` object for reporting errors and checking
stop conditions.
daemon: Boolean. If `True` make the threads daemon threads.
start: Boolean. If `True` starts the threads. If `False` the
caller must call the `start()` method of the returned threads.
Returns:
A list of threads.
"""
with self._lock:
try:
if self._runs_per_session[sess] > 0:
# Already started: no new threads to return.
return []
except KeyError:
# We haven't seen this session yet.
pass
self._runs_per_session[sess] = len(self._enqueue_ops)
self._exceptions_raised = []
ret_threads = []
for op in self._enqueue_ops:
name = "QueueRunnerThread-{}-{}".format(self.name, op.name)
ret_threads.append(threading.Thread(target=self._run,
args=(sess, op, coord),
name=name))
if coord:
name = "QueueRunnerThread-{}-close_on_stop".format(self.name)
ret_threads.append(threading.Thread(target=self._close_on_stop,
args=(sess, self._cancel_op, coord),
name=name))
for t in ret_threads:
if coord:
coord.register_thread(t)
if daemon:
t.daemon = True
if start:
t.start()
return ret_threads
def to_proto(self, export_scope=None):
"""Converts this `QueueRunner` to a `QueueRunnerDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `QueueRunnerDef` protocol buffer, or `None` if the `Variable` is not in
the specified name scope.
"""
if (export_scope is None or
self.queue.name.startswith(export_scope)):
queue_runner_def = queue_runner_pb2.QueueRunnerDef()
queue_runner_def.queue_name = ops.strip_name_scope(
self.queue.name, export_scope)
for enqueue_op in self.enqueue_ops:
queue_runner_def.enqueue_op_name.append(
ops.strip_name_scope(enqueue_op.name, export_scope))
queue_runner_def.close_op_name = ops.strip_name_scope(
self.close_op.name, export_scope)
queue_runner_def.cancel_op_name = ops.strip_name_scope(
self.cancel_op.name, export_scope)
queue_runner_def.queue_closed_exception_types.extend([
errors.error_code_from_exception_type(cls)
for cls in self._queue_closed_exception_types])
return queue_runner_def
else:
return None
@staticmethod
def from_proto(queue_runner_def, import_scope=None):
"""Returns a `QueueRunner` object created from `queue_runner_def`."""
return QueueRunner(queue_runner_def=queue_runner_def,
import_scope=import_scope)
@tf_export(v1=["train.queue_runner.add_queue_runner", "train.add_queue_runner"])
@deprecation.deprecated(None, _DEPRECATION_INSTRUCTION)
def add_queue_runner(qr, collection=ops.GraphKeys.QUEUE_RUNNERS):
"""Adds a `QueueRunner` to a collection in the graph.
When building a complex model that uses many queues it is often difficult to
gather all the queue runners that need to be run. This convenience function
allows you to add a queue runner to a well known collection in the graph.
The companion method `start_queue_runners()` can be used to start threads for
all the collected queue runners.
Args:
qr: A `QueueRunner`.
collection: A `GraphKey` specifying the graph collection to add
the queue runner to. Defaults to `GraphKeys.QUEUE_RUNNERS`.
"""
ops.add_to_collection(collection, qr)
@tf_export(v1=["train.queue_runner.start_queue_runners",
"train.start_queue_runners"])
@deprecation.deprecated(None, _DEPRECATION_INSTRUCTION)
def start_queue_runners(sess=None, coord=None, daemon=True, start=True,
collection=ops.GraphKeys.QUEUE_RUNNERS):
"""Starts all queue runners collected in the graph.
This is a companion method to `add_queue_runner()`. It just starts
threads for all queue runners collected in the graph. It returns
the list of all threads.
Args:
sess: `Session` used to run the queue ops. Defaults to the
default session.
coord: Optional `Coordinator` for coordinating the started threads.
daemon: Whether the threads should be marked as `daemons`, meaning
they don't block program exit.
start: Set to `False` to only create the threads, not start them.
collection: A `GraphKey` specifying the graph collection to
get the queue runners from. Defaults to `GraphKeys.QUEUE_RUNNERS`.
Raises:
ValueError: if `sess` is None and there isn't any default session.
TypeError: if `sess` is not a `tf.compat.v1.Session` object.
Returns:
A list of threads.
Raises:
RuntimeError: If called with eager execution enabled.
ValueError: If called without a default `tf.compat.v1.Session` registered.
@compatibility(eager)
Not compatible with eager execution. To ingest data under eager execution,
use the `tf.data` API instead.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError("Queues are not compatible with eager execution.")
if sess is None:
sess = ops.get_default_session()
if not sess:
raise ValueError("Cannot start queue runners: No default session is "
"registered. Use `with sess.as_default()` or pass an "
"explicit session to tf.start_queue_runners(sess=sess)")
if not isinstance(sess, session.SessionInterface):
# Following check is due to backward compatibility. (b/62061352)
if sess.__class__.__name__ in [
"MonitoredSession", "SingularMonitoredSession"]:
return []
raise TypeError("sess must be a `tf.Session` object. "
"Given class: {}".format(sess.__class__))
queue_runners = ops.get_collection(collection)
if not queue_runners:
logging.warning(
"`tf.train.start_queue_runners()` was called when no queue runners "
"were defined. You can safely remove the call to this deprecated "
"function.")
with sess.graph.as_default():
threads = []
for qr in ops.get_collection(collection):
threads.extend(qr.create_threads(sess, coord=coord, daemon=daemon,
start=start))
return threads
ops.register_proto_function(ops.GraphKeys.QUEUE_RUNNERS,
proto_type=queue_runner_pb2.QueueRunnerDef,
to_proto=QueueRunner.to_proto,
from_proto=QueueRunner.from_proto)
|
tensorflow-master
|
tensorflow/python/training/queue_runner_impl.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class SameVariablesClearContainerTest(test.TestCase):
# Verifies behavior of tf.Session.reset() with multiple containers using
# default container names as defined by the target name.
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
@test_util.run_deprecated_v1
def testSameVariablesClearContainer(self):
# Starts two servers with different names so they map to different
# resource "containers".
server0 = server_lib.Server(
{
"local0": ["localhost:0"]
}, protocol="grpc", start=True)
server1 = server_lib.Server(
{
"local1": ["localhost:0"]
}, protocol="grpc", start=True)
# Creates a graph with 2 variables.
v0 = variables.Variable(1.0, name="v0")
v1 = variables.Variable(2.0, name="v0")
# Initializes the variables. Verifies that the values are correct.
sess_0 = session.Session(server0.target)
sess_1 = session.Session(server1.target)
sess_0.run(v0.initializer)
sess_1.run(v1.initializer)
self.assertAllEqual(1.0, sess_0.run(v0))
self.assertAllEqual(2.0, sess_1.run(v1))
# Resets container "local0". Verifies that v0 is no longer initialized.
session.Session.reset(server0.target, ["local0"])
sess = session.Session(server0.target)
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v0)
# Reinitializes v0 for the following test.
self.evaluate(v0.initializer)
# Verifies that v1 is still valid.
self.assertAllEqual(2.0, sess_1.run(v1))
# Resets container "local1". Verifies that v1 is no longer initialized.
session.Session.reset(server1.target, ["local1"])
sess = session.Session(server1.target)
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v1)
# Verifies that v0 is still valid.
sess = session.Session(server0.target)
self.assertAllEqual(1.0, self.evaluate(v0))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/server_lib_same_variables_clear_container_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deprecated, please use ../distribute/distribute_lib.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.python.distribute.distribute_lib import *
|
tensorflow-master
|
tensorflow/python/training/distribute.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for AdagradDA operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad_da
class AdagradDAOptimizerTest(test.TestCase):
def doTestAdagradDAwithoutRegularizationBasic1(self, use_resource=False):
for dtype in [dtypes.float64, dtypes.float32]:
with self.cached_session() as sess:
global_step = variables.Variable(0, dtype=dtypes.int64)
if use_resource:
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
else:
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run a step of AdagradDA
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
# Let g to be gradient accumulator, gg to be gradient squared
# accumulator, T be the global step, lr is the learning rate, and k the
# initial gradient squared accumulator value.
# w = \dfrac{sign(-g)*lr*|g - l1*T|_{+}}{l2*T*lr + \sqrt{k+gg})}
# For -0.1*3.0*(0.1 - 0)/(0 + sqrt(0.1 + 0.1*0.1)) = -0.904534
# similarly for others.
self.assertAllCloseAccordingToType(
np.array([-0.904534, -1.603567]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.094821, -0.189358]), v1_val)
@test_util.run_deprecated_v1
def testAdagradDAWithoutRegularizationBasic1(self):
self.doTestAdagradDAwithoutRegularizationBasic1()
@test_util.run_deprecated_v1
def testResourceAdagradDAWithoutRegularizationBasic1(self):
self.doTestAdagradDAwithoutRegularizationBasic1(use_resource=True)
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
global_step = resource_variable_ops.ResourceVariable(
0, dtype=dtypes.int64)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = adagrad_da.AdagradDAOptimizer(
1.0, global_step).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[-1, -1]],
self.evaluate(var0),
rtol=0.01)
@test_util.run_deprecated_v1
def testAdagradDAwithoutRegularizationBasic2(self):
for dtype in [dtypes.float64, dtypes.float32]:
with self.cached_session() as sess:
global_step = variables.Variable(0, dtype=dtypes.int64)
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run a step of AdagradDA
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.904534, -1.603567]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.094821, -0.189358]), v1_val)
@test_util.run_deprecated_v1
def testAdagradDAWithL1(self):
for dtype in [dtypes.float64, dtypes.float32]:
with self.cached_session() as sess:
global_step = variables.Variable(0, dtype=dtypes.int64)
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run a step of AdagradDA
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.895489, -1.59555]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.085339, -0.17989]), v1_val)
@test_util.run_deprecated_v1
def testAdagradDAWithL1_L2(self):
for dtype in [dtypes.float64, dtypes.float32]:
with self.cached_session() as sess:
global_step = variables.Variable(0, dtype=dtypes.int64)
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run a step of AdagradDA
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.046907, -0.093659]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.004275, -0.009023]), v1_val)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/adagrad_da_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.tensorboard_logging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import tempfile
import time
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer
from tensorflow.python.training import tensorboard_logging
@test_util.run_deprecated_v1
class EventLoggingTest(test.TestCase):
def setUp(self):
self._work_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self._sw = writer.FileWriter(self._work_dir)
tensorboard_logging.set_summary_writer(self._sw)
self.addCleanup(shutil.rmtree, self._work_dir)
# Stop the clock to avoid test flakiness.
now = time.time()
time._real_time = time.time
time.time = lambda: now
# Mock out logging calls so we can verify that the right number of messages
# get logged.
self.logged_message_count = 0
self._actual_log = logging.log
def mockLog(*args, **kwargs):
self.logged_message_count += 1
self._actual_log(*args, **kwargs)
logging.log = mockLog
def tearDown(self):
time.time = time._real_time
logging.log = self._actual_log
def assertLoggedMessagesAre(self, expected_messages):
self._sw.close()
event_paths = glob.glob(os.path.join(self._work_dir, "event*"))
# If the tests runs multiple time in the same directory we can have
# more than one matching event file. We only want to read the last one.
self.assertTrue(event_paths)
event_reader = summary_iterator.summary_iterator(event_paths[-1])
# Skip over the version event.
next(event_reader)
for level, message in expected_messages:
event = next(event_reader)
self.assertEqual(event.wall_time, time.time())
self.assertEqual(event.log_message.level, level)
self.assertEqual(event.log_message.message, message)
def testBasic(self):
tensorboard_logging.set_summary_writer(self._sw)
tensorboard_logging.error("oh no!")
tensorboard_logging.error("for%s", "mat")
self.assertLoggedMessagesAre([(event_pb2.LogMessage.ERROR, "oh no!"),
(event_pb2.LogMessage.ERROR, "format")])
self.assertEqual(2, self.logged_message_count)
def testVerbosity(self):
tensorboard_logging.set_summary_writer(self._sw)
tensorboard_logging.set_verbosity(tensorboard_logging.ERROR)
tensorboard_logging.warn("warn")
tensorboard_logging.error("error")
tensorboard_logging.set_verbosity(tensorboard_logging.DEBUG)
tensorboard_logging.debug("debug")
self.assertLoggedMessagesAre([(event_pb2.LogMessage.ERROR, "error"),
(event_pb2.LogMessage.DEBUGGING, "debug")])
# All message should be logged because tensorboard_logging verbosity doesn't
# affect logging verbosity.
self.assertEqual(3, self.logged_message_count)
def testBadVerbosity(self):
with self.assertRaises(ValueError):
tensorboard_logging.set_verbosity("failure")
with self.assertRaises(ValueError):
tensorboard_logging.log("bad", "dead")
def testNoSummaryWriter(self):
"""Test that logging without a SummaryWriter succeeds."""
tensorboard_logging.set_summary_writer(None)
tensorboard_logging.warn("this should work")
self.assertEqual(1, self.logged_message_count)
def testSummaryWriterFailsAfterClear(self):
tensorboard_logging._clear_summary_writer()
with self.assertRaises(RuntimeError):
tensorboard_logging.log(tensorboard_logging.ERROR, "failure")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/tensorboard_logging_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for moving_averages.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import moving_averages
from tensorflow.python.training import saver as saver_lib
class MovingAveragesTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testAssignMovingAverageWithoutZeroDebias(self):
var = variables.Variable([10.0, 11.0])
val = constant_op.constant([1.0, 2.0], dtypes.float32)
decay = 0.25
if context.executing_eagerly():
self.assertAllClose([10.0, 11.0], self.evaluate(var))
assign = moving_averages.assign_moving_average(
var, val, decay, zero_debias=False)
self.assertAllClose(
[10.0 * 0.25 + 1.0 * (1.0 - 0.25), 11.0 * 0.25 + 2.0 * (1.0 - 0.25)],
self.evaluate(var))
else:
assign = moving_averages.assign_moving_average(
var, val, decay, zero_debias=False)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([10.0, 11.0], self.evaluate(var))
assign.op.run()
self.assertAllClose(
[10.0 * 0.25 + 1.0 * (1.0 - 0.25), 11.0 * 0.25 + 2.0 * (1.0 - 0.25)],
self.evaluate(var))
@test_util.run_in_graph_and_eager_modes
def testAssignMovingAverage(self):
with self.cached_session():
var = variables.Variable([0.0, 0.0])
val = constant_op.constant([1.0, 2.0], dtypes.float32)
decay = 0.25
if context.executing_eagerly():
self.assertAllClose([0.0, 0.0], self.evaluate(var))
assign = moving_averages.assign_moving_average(var, val, decay)
self.assertAllClose(
[1.0 * (1.0 - 0.25) / (1 - 0.25), 2.0 * (1.0 - 0.25) / (1 - 0.25)],
self.evaluate(var))
else:
assign = moving_averages.assign_moving_average(var, val, decay)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([0.0, 0.0], self.evaluate(var))
assign.op.run()
self.assertAllClose(
[1.0 * (1.0 - 0.25) / (1 - 0.25), 2.0 * (1.0 - 0.25) / (1 - 0.25)],
self.evaluate(var))
@test_util.run_deprecated_v1
def testAssignMovingAverageNewNamingMultipleCalls(self):
with variable_scope.variable_scope("scope1") as vs1:
with variable_scope.variable_scope("scope2"):
var = variables.Variable(1.0, name="Var")
moving_averages.assign_moving_average(var, 0.0, 0.99)
moving_averages.assign_moving_average(var, 0.0, 0.99)
expected_names = ["scope1/scope2/Var:0",
"scope1/scope2/scope1/scope2/Var/biased:0",
"scope1/scope2/scope1/scope2/Var/local_step:0",
"scope1/scope2/scope1/scope2/Var/biased_1:0",
"scope1/scope2/scope1/scope2/Var/local_step_1:0"]
actual_names = [v.name for v in vs1.global_variables()]
self.assertSetEqual(set(expected_names), set(actual_names))
@test_util.run_deprecated_v1
def testAssignMovingAverageNewNamingMultipleCallsWithReuse(self):
with variable_scope.variable_scope("scope1") as vs1:
var = variable_scope.get_variable("Var", shape=[])
moving_averages.assign_moving_average(var, 0.0, 0.99)
moving_averages.assign_moving_average(var, 0.0, 0.99)
with variable_scope.variable_scope(vs1, reuse=True):
var = variable_scope.get_variable("Var", shape=[])
moving_averages.assign_moving_average(var, 0.0, 0.99)
moving_averages.assign_moving_average(var, 0.0, 0.99)
@test_util.run_deprecated_v1
def testWeightedMovingAverage(self):
with self.cached_session() as sess:
decay = 0.5
weight = array_ops.placeholder(dtypes.float32, [])
val = array_ops.placeholder(dtypes.float32, [])
wma = moving_averages.weighted_moving_average(val, decay, weight)
self.evaluate(variables.global_variables_initializer())
# Get the first weighted moving average.
val_1 = 3.0
weight_1 = 4.0
wma_array = sess.run(wma, feed_dict={val: val_1, weight: weight_1})
numerator_1 = val_1 * weight_1 * (1.0 - decay)
denominator_1 = weight_1 * (1.0 - decay)
self.assertAllClose(numerator_1 / denominator_1, wma_array)
# Get the second weighted moving average.
val_2 = 11.0
weight_2 = 22.0
wma_array = sess.run(wma, feed_dict={val: val_2, weight: weight_2})
numerator_2 = numerator_1 * decay + val_2 * weight_2 * (1.0 - decay)
denominator_2 = denominator_1 * decay + weight_2 * (1.0 - decay)
self.assertAllClose(numerator_2 / denominator_2, wma_array)
@test_util.run_deprecated_v1
def testWeightedMovingAverageBfloat16(self):
bfloat16 = pywrap_tensorflow.TF_bfloat16_type()
with self.cached_session() as sess:
decay = 0.5
weight = array_ops.placeholder(dtypes.bfloat16, [])
val = array_ops.placeholder(dtypes.bfloat16, [])
wma = moving_averages.weighted_moving_average(val, decay, weight)
self.evaluate(variables.global_variables_initializer())
# Get the first weighted moving average.
val_1 = 3.0
weight_1 = 4.0
wma_array = sess.run(wma, feed_dict={val: val_1, weight: weight_1})
numerator_1 = val_1 * weight_1 * (1.0 - decay)
denominator_1 = weight_1 * (1.0 - decay)
self.assertAllClose(numerator_1 / denominator_1, wma_array)
# Get the second weighted moving average.
val_2 = 11.0
weight_2 = 22.0
wma_array = sess.run(wma, feed_dict={val: val_2, weight: weight_2})
numerator_2 = numerator_1 * decay + val_2 * weight_2 * (1.0 - decay)
denominator_2 = denominator_1 * decay + weight_2 * (1.0 - decay)
self.assertAllClose(bfloat16(numerator_2 / denominator_2), wma_array)
def _Repeat(value, dim):
if dim == 1:
return value
return [value] * dim
class ExponentialMovingAverageTest(test.TestCase):
def _CheckDecay(self, ema, actual_decay, dim):
def _Scale(dk, steps):
if ema._zero_debias:
return 1 - dk**steps
else:
return 1
tens = _Repeat(10.0, dim)
thirties = _Repeat(30.0, dim)
var0 = variables.Variable(tens, name="v0")
var1 = variables.Variable(thirties, name="v1")
self.evaluate(variables.global_variables_initializer())
# Note that tensor2 is not a Variable but just a plain Tensor resulting
# from the sum operation.
tensor2 = var0 + var1
update = ema.apply([var0, var1, tensor2])
avg0 = ema.average(var0)
avg1 = ema.average(var1)
avg2 = ema.average(tensor2)
self.assertItemsEqual([var0, var1], variables.moving_average_variables())
self.assertFalse(avg0 in variables.trainable_variables())
self.assertFalse(avg1 in variables.trainable_variables())
self.assertFalse(avg2 in variables.trainable_variables())
self.evaluate(variables.global_variables_initializer())
self.assertEqual("v0/ExponentialMovingAverage:0", avg0.name)
self.assertEqual("v1/ExponentialMovingAverage:0", avg1.name)
self.assertEqual("add/ExponentialMovingAverage:0", avg2.name)
# Check initial values.
self.assertAllClose(tens, self.evaluate(var0))
self.assertAllClose(thirties, self.evaluate(var1))
self.assertAllClose(_Repeat(10.0 + 30.0, dim), self.evaluate(tensor2))
# Check that averages are initialized correctly.
self.assertAllClose(tens, self.evaluate(avg0))
self.assertAllClose(thirties, self.evaluate(avg1))
# Note that averages of Tensor's initialize to zeros_like since no value
# of the Tensor is known because the Op has not been run (yet).
self.assertAllClose(_Repeat(0.0, dim), self.evaluate(avg2))
# Update the averages and check.
update.run()
dk = actual_decay
expected = _Repeat(10.0 * dk + 10.0 * (1 - dk), dim)
self.assertAllClose(expected, self.evaluate(avg0))
expected = _Repeat(30.0 * dk + 30.0 * (1 - dk), dim)
self.assertAllClose(expected, self.evaluate(avg1))
expected = _Repeat(0.0 * dk + (10.0 + 30.0) * (1 - dk) / _Scale(dk, 1), dim)
self.assertAllClose(expected, self.evaluate(avg2))
# Again, update the averages and check.
update.run()
expected = _Repeat((10.0 * dk + 10.0 * (1 - dk)) * dk + 10.0 * (1 - dk),
dim)
self.assertAllClose(expected, self.evaluate(avg0))
expected = _Repeat((30.0 * dk + 30.0 * (1 - dk)) * dk + 30.0 * (1 - dk),
dim)
self.assertAllClose(expected, self.evaluate(avg1))
expected = _Repeat(((0.0 * dk + (10.0 + 30.0) * (1 - dk)) * dk +
(10.0 + 30.0) * (1 - dk)) / _Scale(dk, 2), dim)
self.assertAllClose(expected, self.evaluate(avg2))
@test_util.run_v1_only("b/120545219")
def testAverageVariablesNoNumUpdates_Scalar(self):
with self.cached_session():
ema = moving_averages.ExponentialMovingAverage(0.25)
self._CheckDecay(ema, actual_decay=0.25, dim=1)
@test_util.run_v1_only("b/120545219")
def testAverageVariablesNoNumUpdates_Scalar_Debias(self):
with self.cached_session():
ema = moving_averages.ExponentialMovingAverage(0.25, zero_debias=True)
self._CheckDecay(ema, actual_decay=0.25, dim=1)
@test_util.run_v1_only("b/120545219")
def testAverageVariablesNoNumUpdates_Vector(self):
with self.cached_session():
ema = moving_averages.ExponentialMovingAverage(0.25)
self._CheckDecay(ema, actual_decay=0.25, dim=5)
@test_util.run_v1_only("b/120545219")
def testAverageVariablesNoNumUpdates_Vector_Debias(self):
with self.cached_session():
ema = moving_averages.ExponentialMovingAverage(0.25, zero_debias=True)
self._CheckDecay(ema, actual_decay=0.25, dim=5)
@test_util.run_v1_only("b/120545219")
def testAverageVariablesNumUpdates_Scalar(self):
with self.cached_session():
# With num_updates 1, the decay applied is 0.1818
ema = moving_averages.ExponentialMovingAverage(0.25, num_updates=1)
self._CheckDecay(ema, actual_decay=0.181818, dim=1)
@test_util.run_v1_only("b/120545219")
def testAverageVariablesNumUpdates_Scalar_Debias(self):
with self.cached_session():
# With num_updates 1, the decay applied is 0.1818
ema = moving_averages.ExponentialMovingAverage(
0.25, num_updates=1, zero_debias=True)
self._CheckDecay(ema, actual_decay=0.181818, dim=1)
@test_util.run_v1_only("b/120545219")
def testAverageVariablesNumUpdates_Vector(self):
with self.cached_session():
# With num_updates 1, the decay applied is 0.1818
ema = moving_averages.ExponentialMovingAverage(0.25, num_updates=1)
self._CheckDecay(ema, actual_decay=0.181818, dim=5)
@test_util.run_v1_only("b/120545219")
def testAverageVariablesNumUpdates_Vector_Debias(self):
with self.cached_session():
# With num_updates 1, the decay applied is 0.1818
ema = moving_averages.ExponentialMovingAverage(
0.25, num_updates=1, zero_debias=True)
self._CheckDecay(ema, actual_decay=0.181818, dim=5)
@test_util.run_v1_only("b/120545219")
def testAverageVariablesWithControlDeps(self):
with self.cached_session() as sess:
v0 = variables.Variable(0, name="v0")
add_to_v0 = v0.assign_add(1)
v1 = variables.Variable([10.0], name="v1")
assign_to_v1 = v1.assign([20.0])
ema = moving_averages.ExponentialMovingAverage(0.25)
with ops.control_dependencies([add_to_v0]):
ema_op = ema.apply([v1])
# the moving average of v1 should not have any control inputs
v1_avg = ema.average(v1)
self.assertEqual([], v1_avg.initializer.control_inputs)
self.assertEqual([], v1_avg.value().op.control_inputs)
self.assertEqual([], v1_avg.value().op.control_inputs)
# We should be able to initialize v1_avg before v0.
self.evaluate(v1_avg.initializer)
self.evaluate(v0.initializer)
self.assertEqual([10.0], self.evaluate(v1_avg))
# running ema_op should add to v0 (in addition to updating v1_avg)
self.evaluate(assign_to_v1)
self.evaluate(ema_op)
self.assertEqual(1, self.evaluate(v0))
self.assertEqual([17.5], self.evaluate(v1_avg))
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testBasicEager(self):
v0 = variables.Variable(1.0)
v1 = variables.Variable(2.0)
ema = moving_averages.ExponentialMovingAverage(0.25)
op = ema.apply([v0, v1])
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(op)
self.evaluate(v0.assign(2.0))
self.evaluate(v1.assign(4.0))
self.evaluate(ema.apply([v0, v1]))
self.assertAllEqual(self.evaluate(ema.average(v0)), 1.75)
self.assertAllEqual(self.evaluate(ema.average(v1)), 3.5)
def averageVariablesNamesHelper(self, zero_debias):
with self.cached_session():
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(30.0, name="v1")
# Add a non-trainable variable.
v2 = variables.Variable(20.0, name="v2", trainable=False)
tensor2 = v0 + v1
ema = moving_averages.ExponentialMovingAverage(
0.25, zero_debias=zero_debias, name="foo")
self.assertEqual("foo", ema.name)
self.assertEqual("v0/foo", ema.average_name(v0))
self.assertEqual("v1/foo", ema.average_name(v1))
self.assertEqual("add/foo", ema.average_name(tensor2))
ema.apply([v0, v1, tensor2])
vars_to_restore = ema.variables_to_restore()
# vars_to_restore should contain the following:
# {v0/foo : v0,
# v1/foo : v1,
# add/foo : add/foo,
# v2 : v2}
expected_names = [
ema.average_name(v0), ema.average_name(v1), ema.average_name(tensor2),
v2.op.name
]
if zero_debias:
# vars_to_restore should also contain the following:
# {add/foo/biased: add/foo/biased,
# add/foo/local_step: add/foo/local_step}
expected_names += [
ema.average_name(tensor2) + "/biased",
ema.average_name(tensor2) + "/local_step"
]
self.assertEqual(sorted(expected_names), sorted(vars_to_restore.keys()))
self.assertEqual(ema.average(v0).op.name, ema.average_name(v0))
self.assertEqual(ema.average(v1).op.name, ema.average_name(v1))
self.assertEqual(ema.average(tensor2).op.name, ema.average_name(tensor2))
@test_util.run_v1_only("b/120545219")
def testAverageVariablesNames(self):
self.averageVariablesNamesHelper(zero_debias=True)
@test_util.run_v1_only("b/120545219")
def testAverageVariablesNamesNoDebias(self):
self.averageVariablesNamesHelper(zero_debias=False)
def averageVariablesNamesRespectScopeHelper(self, zero_debias):
# See discussion on #2740.
with self.cached_session():
with variable_scope.variable_scope("scope1"):
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(30.0, name="v1")
# Add a non-trainable variable.
v2 = variables.Variable(20.0, name="v2", trainable=False)
tensor2 = v0 + v1
with variable_scope.variable_scope("scope2"):
ema = moving_averages.ExponentialMovingAverage(
0.25, zero_debias=zero_debias, name="foo")
self.assertEqual("scope2/scope1/v0/foo", ema.average_name(v0))
self.assertEqual("scope2/scope1/v1/foo", ema.average_name(v1))
self.assertEqual("scope2/scope1/add/foo", ema.average_name(tensor2))
ema.apply([v0, v1, tensor2])
vars_to_restore = ema.variables_to_restore()
# `vars_to_restore` should contain the following:
# {scope2/scope1/v0/foo : v0,
# scope2/scope1/v1/foo : v1,
# scope2/scope1/add/foo : add/foo,
# scope1/v2 : v2}
expected_names = [
ema.average_name(v0), ema.average_name(v1),
ema.average_name(tensor2), v2.op.name
]
if zero_debias:
# `vars_to_restore` should also contain the following:
# {scope2/scope2/scope1/add/foo/biased: add/foo/biased,
# scope2/scope2/scope1/add/foo/local_step: add/foo/local_step}
sc = "scope2/"
expected_names += [
sc + ema.average_name(tensor2) + "/biased",
sc + ema.average_name(tensor2) + "/local_step"
]
self.assertEqual(sorted(expected_names), sorted(vars_to_restore.keys()))
self.assertEqual(ema.average(v0).op.name, ema.average_name(v0))
self.assertEqual(ema.average(v1).op.name, ema.average_name(v1))
self.assertEqual(
ema.average(tensor2).op.name, ema.average_name(tensor2))
@test_util.run_v1_only("b/120545219")
def testAverageVariablesNamesRespectScope(self):
self.averageVariablesNamesRespectScopeHelper(zero_debias=True)
@test_util.run_v1_only("b/120545219")
def testAverageVariablesNamesRespectScopeNoDebias(self):
self.averageVariablesNamesRespectScopeHelper(zero_debias=False)
@test_util.run_v1_only("b/120545219")
def testSubsetAverageVariablesNames(self):
with self.cached_session():
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(30.0, name="v1")
# Add a non-trainable variable.
v2 = variables.Variable(20.0, name="v2", trainable=False)
tensor2 = v0 + v1
ema = moving_averages.ExponentialMovingAverage(0.25, name="foo_avg")
self.assertEqual("v0/foo_avg", ema.average_name(v0))
self.assertEqual("v1/foo_avg", ema.average_name(v1))
self.assertEqual("add/foo_avg", ema.average_name(tensor2))
vars_to_restore = ema.variables_to_restore([v0, tensor2])
# vars_to_restore should contain the following:
# {v0/foo_avg : v0,
# add/foo_avg : add
# v1 : v1,
# v2 : v2}
self.assertEqual(
sorted(vars_to_restore.keys()),
sorted([
ema.average_name(v0), ema.average_name(tensor2), v1.op.name,
v2.op.name
]))
ema.apply([v0, v1, tensor2])
self.assertEqual(ema.average(v0).op.name, ema.average_name(v0))
self.assertEqual(ema.average(v1).op.name, ema.average_name(v1))
self.assertEqual(ema.average(tensor2).op.name, ema.average_name(tensor2))
@test_util.run_v1_only("b/120545219")
def testAverageVariablesDeviceAssignment(self):
with ops.device("/job:dev_v0"):
v0 = variables.Variable(10.0, name="v0")
with ops.device("/job:dev_v1"):
v1 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="v1",
container="",
shared_name="")
v1.set_shape([1])
tensor2 = v0 + v1
ema = moving_averages.ExponentialMovingAverage(0.25, name="foo_avg")
with ops.device("/job:default"):
ema.apply([v0, v1, tensor2])
self.assertDeviceEqual("/job:dev_v0", ema.average(v0).device)
self.assertDeviceEqual("/job:dev_v1", ema.average(v1).device)
# However, the colocation property is maintained.
self.assertEqual([b"loc:@v1"], ema.average(v1).op.colocation_groups())
self.assertDeviceEqual("/job:default", ema.average(tensor2).device)
def _ExportAndImportGraph(self, graph):
"""Export and import graph into a new graph."""
meta_graph = saver_lib.export_meta_graph(
graph=graph, collection_list=graph.get_all_collection_keys())
graph_copy = ops.Graph()
with graph_copy.as_default():
_ = saver_lib.import_meta_graph(meta_graph)
return graph_copy
@test_util.run_deprecated_v1
def testImportedGraphVariablesToRestore(self):
g = ops.Graph()
with g.as_default():
variables.Variable(10.0, name="v")
# Export and import the graph into a new graph.
g_copy = self._ExportAndImportGraph(g)
with g_copy.as_default():
ema = moving_averages.ExponentialMovingAverage(0.25, name="foo_avg")
vars_to_restore = ema.variables_to_restore()
# There should only be one variable in vars_to_restore. This is important
# to check because when importing from a GraphDef, TF makes duplicate
# python Variable objects referring to the same underlying variable. We
# need to be sure that two variables referring to the same variable don't
# both get added to vars_to_restore.
self.assertEqual(len(vars_to_restore), 1)
self.assertTrue("v/foo_avg" in vars_to_restore)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/moving_averages_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GradientDescent for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.GradientDescentOptimizer"])
class GradientDescentOptimizer(optimizer.Optimizer):
"""Optimizer that implements the gradient descent algorithm.
"""
def __init__(self, learning_rate, use_locking=False, name="GradientDescent"):
"""Construct a new gradient descent optimizer.
Args:
learning_rate: A Tensor or a floating point value. The learning
rate to use.
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "GradientDescent".
@compatibility(eager)
When eager execution is enabled, `learning_rate` can be a callable that
takes no arguments and returns the actual value to use. This can be useful
for changing these values across different invocations of optimizer
functions.
@end_compatibility
"""
super(GradientDescentOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._learning_rate_tensor = None
def _apply_dense(self, grad, var):
return training_ops.apply_gradient_descent(
var,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, handle):
return training_ops.resource_apply_gradient_descent(
handle.handle, math_ops.cast(self._learning_rate_tensor,
grad.dtype.base_dtype),
grad, use_locking=self._use_locking)
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
return resource_variable_ops.resource_scatter_add(
handle.handle, indices, -grad * self._learning_rate)
def _apply_sparse_duplicate_indices(self, grad, var):
delta = ops.IndexedSlices(
grad.values *
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.indices, grad.dense_shape)
return var.scatter_sub(delta, use_locking=self._use_locking)
def _prepare(self):
learning_rate = self._call_if_callable(self._learning_rate)
self._learning_rate_tensor = ops.convert_to_tensor(
learning_rate, name="learning_rate")
|
tensorflow-master
|
tensorflow/python/training/gradient_descent.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_ops
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import server_lib
class GrpcServerTest(test.TestCase):
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(GrpcServerTest, self).__init__(methodName)
self._cached_server = server_lib.Server.create_local_server()
def testRunStep(self):
server = self._cached_server
with session.Session(server.target) as sess:
c = constant_op.constant([[2, 1]])
d = constant_op.constant([[1], [2]])
e = math_ops.matmul(c, d)
self.assertAllEqual([[4]], sess.run(e))
# TODO(mrry): Add `server.stop()` and `server.join()` when these work.
@test_util.run_v1_only("b/120545219")
def testMultipleSessions(self):
server = self._cached_server
c = constant_op.constant([[2, 1]])
d = constant_op.constant([[1], [2]])
e = math_ops.matmul(c, d)
sess_1 = session.Session(server.target)
sess_2 = session.Session(server.target)
self.assertAllEqual([[4]], sess_1.run(e))
self.assertAllEqual([[4]], sess_2.run(e))
sess_1.close()
sess_2.close()
# TODO(mrry): Add `server.stop()` and `server.join()` when these work.
# Verifies various reset failures.
@test_util.run_v1_only("b/120545219")
def testResetFails(self):
# Creates variable with container name.
with ops.container("test0"):
v0 = variables.VariableV1(1.0, name="v0")
# Creates variable with default container.
v1 = variables.VariableV1(2.0, name="v1")
# Verifies resetting the non-existent target returns error.
with self.assertRaises(errors_impl.NotFoundError):
session.Session.reset("nonexistent", ["test0"])
# Verifies resetting with config.
# Verifies that resetting target with no server times out.
with self.assertRaises(errors_impl.DeadlineExceededError):
session.Session.reset(
"grpc://localhost:0", ["test0"],
config=config_pb2.ConfigProto(operation_timeout_in_ms=5))
# Verifies no containers are reset with non-existent container.
server = self._cached_server
sess = session.Session(server.target)
sess.run(variables.global_variables_initializer())
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
# No container is reset, but the server is reset.
session.Session.reset(server.target, ["test1"])
# Verifies that both variables are still valid.
sess = session.Session(server.target)
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
def _useRPCConfig(self):
"""Return a `tf.compat.v1.ConfigProto` that ensures we use the RPC stack for tests.
This configuration ensures that we continue to exercise the gRPC
stack when testing, rather than using the in-process optimization,
which avoids using gRPC as the transport between a client and
master in the same process.
Returns:
A `tf.compat.v1.ConfigProto`.
"""
return config_pb2.ConfigProto(rpc_options=config_pb2.RPCOptions(
use_rpc_for_inprocess_master=True))
def testLargeConstant(self):
server = self._cached_server
with session.Session(server.target, config=self._useRPCConfig()) as sess:
const_val = np.empty([10000, 3000], dtype=np.float32)
const_val.fill(0.5)
c = constant_op.constant(const_val)
shape_t = array_ops.shape(c)
self.assertAllEqual([10000, 3000], sess.run(shape_t))
def testLargeFetch(self):
server = self._cached_server
with session.Session(server.target, config=self._useRPCConfig()) as sess:
c = array_ops.fill([10000, 3000], 0.5)
expected_val = np.empty([10000, 3000], dtype=np.float32)
expected_val.fill(0.5)
self.assertAllEqual(expected_val, sess.run(c))
def testLargeFeed(self):
server = self._cached_server
with session.Session(server.target, config=self._useRPCConfig()) as sess:
feed_val = np.empty([10000, 3000], dtype=np.float32)
feed_val.fill(0.5)
p = array_ops.placeholder(dtypes.float32, shape=[10000, 3000])
min_t = math_ops.reduce_min(p)
max_t = math_ops.reduce_max(p)
min_val, max_val = sess.run([min_t, max_t], feed_dict={p: feed_val})
self.assertEqual(0.5, min_val)
self.assertEqual(0.5, max_val)
@test_util.run_v1_only("b/120545219")
def testCloseCancelsBlockingOperation(self):
server = self._cached_server
sess = session.Session(server.target, config=self._useRPCConfig())
q = data_flow_ops.FIFOQueue(10, [dtypes.float32])
enqueue_op = q.enqueue(37.0)
dequeue_t = q.dequeue()
sess.run(enqueue_op)
sess.run(dequeue_t)
def blocking_dequeue():
with self.assertRaisesRegexp(errors_impl.CancelledError,
"Session::Close"):
sess.run(dequeue_t)
blocking_thread = self.checkedThread(blocking_dequeue)
blocking_thread.start()
time.sleep(0.5)
sess.close()
blocking_thread.join()
def testInteractiveSession(self):
server = self._cached_server
# Session creation will warn (in C++) that the place_pruned_graph option
# is not supported, but it should successfully ignore it.
sess = session.InteractiveSession(server.target)
c = constant_op.constant(42.0)
self.assertEqual(42.0, self.evaluate(c))
sess.close()
def testSetConfiguration(self):
config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(per_process_gpu_memory_fraction=0.1))
# Configure a server using the default local server options.
server = server_lib.Server.create_local_server(config=config, start=False)
self.assertEqual(0.1, server.server_def.default_session_config.gpu_options.
per_process_gpu_memory_fraction)
# Configure a server using an explicit ServerDefd with an
# overridden config.
cluster_def = server_lib.ClusterSpec({
"localhost": ["localhost:0"]
}).as_cluster_def()
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def,
job_name="localhost",
task_index=0,
protocol="grpc")
server = server_lib.Server(server_def, config=config, start=False)
self.assertEqual(0.1, server.server_def.default_session_config.gpu_options.
per_process_gpu_memory_fraction)
def testInvalidHostname(self):
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "port"):
_ = server_lib.Server(
{
"local": ["localhost"]
}, job_name="local", task_index=0)
@test_util.run_v1_only("b/120545219")
def testTimeoutRaisesException(self):
server = self._cached_server
q = data_flow_ops.FIFOQueue(1, [dtypes.float32])
blocking_t = q.dequeue()
with session.Session(server.target) as sess:
with self.assertRaises(errors_impl.DeadlineExceededError):
sess.run(blocking_t, options=config_pb2.RunOptions(timeout_in_ms=1000))
with session.Session(server.target, config=self._useRPCConfig()) as sess:
with self.assertRaises(errors_impl.DeadlineExceededError):
sess.run(blocking_t, options=config_pb2.RunOptions(timeout_in_ms=1000))
def testTwoServersSamePort(self):
# Starting a server with the same target as the cached server should fail.
server = self._cached_server
with self.assertRaises(errors_impl.UnknownError):
_ = server_lib.Server(
{"local_2": [server.target[len("grpc://"):]]})
def testExtendAfterQueueRunners(self):
server = self._cached_server
with session.Session(server.target) as sess:
input_queue = input_ops.input_producer(constant_op.constant(
[0.], dtype=dtypes.float32))
self.assertIsNotNone(input_queue)
var = variables.VariableV1(1., dtype=dtypes.float32, trainable=False,
name="var")
sess.run(variables.global_variables_initializer())
queue_runner_impl.start_queue_runners(sess)
sess.run(var.assign(3.0))
@test_util.run_v1_only("b/120545219")
def testIsolateSessionState(self):
server = self._cached_server
init_value = array_ops.placeholder(dtypes.int32)
v = variables.VariableV1(init_value, validate_shape=False, name="v")
sharing_config = config_pb2.ConfigProto(isolate_session_state=False)
sharing_sess_0 = session.Session(server.target, config=sharing_config)
sharing_sess_1 = session.Session(server.target, config=sharing_config)
isolate_config = config_pb2.ConfigProto(isolate_session_state=True)
isolate_sess_0 = session.Session(server.target, config=isolate_config)
isolate_sess_1 = session.Session(server.target, config=isolate_config)
# Initially all variables are initialized.
for sess in [sharing_sess_0, sharing_sess_1,
isolate_sess_0, isolate_sess_1]:
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(v)
# Shared sessions will see each other's updates, but isolated sessions
# will not.
sharing_sess_0.run(v.initializer, feed_dict={init_value: 86})
self.assertAllEqual(86, sharing_sess_0.run(v))
self.assertAllEqual(86, sharing_sess_1.run(v))
with self.assertRaises(errors_impl.FailedPreconditionError):
isolate_sess_0.run(v)
with self.assertRaises(errors_impl.FailedPreconditionError):
isolate_sess_1.run(v)
# Changing the shape works because `validate_shape` is False.
sharing_sess_1.run(v.initializer, feed_dict={init_value: [86, 99]})
self.assertAllEqual([86, 99], sharing_sess_0.run(v))
self.assertAllEqual([86, 99], sharing_sess_1.run(v))
with self.assertRaises(errors_impl.FailedPreconditionError):
isolate_sess_0.run(v)
with self.assertRaises(errors_impl.FailedPreconditionError):
isolate_sess_1.run(v)
# Initializing in an isolated session will only affect the state in that
# session.
isolate_sess_0.run(v.initializer, feed_dict={init_value: 37})
self.assertAllEqual([86, 99], sharing_sess_0.run(v))
self.assertAllEqual([86, 99], sharing_sess_1.run(v))
self.assertAllEqual(37, isolate_sess_0.run(v))
with self.assertRaises(errors_impl.FailedPreconditionError):
isolate_sess_1.run(v)
# Isolated sessions can have different shapes for the same variable.
isolate_sess_1.run(v.initializer, feed_dict={init_value: [19, 86]})
self.assertAllEqual([86, 99], sharing_sess_0.run(v))
self.assertAllEqual([86, 99], sharing_sess_1.run(v))
self.assertAllEqual(37, isolate_sess_0.run(v))
self.assertAllEqual([19, 86], isolate_sess_1.run(v))
@test_util.run_v1_only("b/120545219")
def testShapeChangingIsolateState(self):
server = self._cached_server
sharing_config = config_pb2.ConfigProto(isolate_session_state=False)
isolate_config = config_pb2.ConfigProto(isolate_session_state=True)
with ops.Graph().as_default():
w_vector = variables.VariableV1([1, 2, 3], name="w")
with session.Session(server.target, config=sharing_config) as sess:
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(w_vector)
sess.run(w_vector.initializer)
self.assertAllEqual([1, 2, 3], sess.run(w_vector))
with ops.Graph().as_default():
w_vector = variables.VariableV1([4, 5, 6], name="w")
with session.Session(server.target, config=sharing_config) as sess:
self.assertAllEqual([1, 2, 3], sess.run(w_vector))
sess.run(w_vector.initializer)
self.assertAllEqual([4, 5, 6], sess.run(w_vector))
with ops.Graph().as_default():
w_scalar = variables.VariableV1(86, name="w")
with session.Session(server.target, config=sharing_config) as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(w_scalar.initializer)
with ops.Graph().as_default():
w_scalar = variables.VariableV1(37, name="w")
with session.Session(server.target, config=isolate_config) as sess:
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(w_scalar)
sess.run(w_scalar.initializer)
self.assertAllEqual(37, sess.run(w_scalar))
class ServerDefTest(test.TestCase):
def testLocalServer(self):
cluster_def = server_lib.ClusterSpec({
"local": ["localhost:2222"]
}).as_cluster_def()
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def, job_name="local", task_index=0, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'local' tasks { key: 0 value: 'localhost:2222' } }
}
job_name: 'local' task_index: 0 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = server_lib.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testTwoProcesses(self):
cluster_def = server_lib.ClusterSpec({
"local": ["localhost:2222", "localhost:2223"]
}).as_cluster_def()
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def, job_name="local", task_index=1, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'local' tasks { key: 0 value: 'localhost:2222' }
tasks { key: 1 value: 'localhost:2223' } }
}
job_name: 'local' task_index: 1 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = server_lib.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testTwoJobs(self):
cluster_def = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
}).as_cluster_def()
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def, job_name="worker", task_index=2, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
}
job_name: 'worker' task_index: 2 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = server_lib.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testDenseAndSparseJobs(self):
cluster_def = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": {
0: "worker0:2222",
2: "worker2:2222"
}
}).as_cluster_def()
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def, job_name="worker", task_index=2, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 2 value: 'worker2:2222' } }
}
job_name: 'worker' task_index: 2 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = server_lib.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
class ClusterSpecTest(test.TestCase):
def testStringConversion(self):
cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:1111"],
"worker": ["worker0:3333", "worker1:4444"]
})
expected_str = (
"ClusterSpec({'ps': ['ps0:1111'], 'worker': ['worker0:3333', "
"'worker1:4444']})")
self.assertEqual(expected_str, str(cluster_spec))
def testProtoDictDefEquivalences(self):
cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
def testProtoDictDefEquivalencesWithZeroWorker(self):
cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": []
})
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' }
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
def testClusterSpecAccessors(self):
original_dict = {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"],
"sparse": {
0: "sparse0:2222",
3: "sparse3:2222"
}
}
cluster_spec = server_lib.ClusterSpec(original_dict)
self.assertEqual(original_dict, cluster_spec.as_dict())
self.assertEqual(2, cluster_spec.num_tasks("ps"))
self.assertEqual(3, cluster_spec.num_tasks("worker"))
self.assertEqual(2, cluster_spec.num_tasks("sparse"))
with self.assertRaises(ValueError):
cluster_spec.num_tasks("unknown")
self.assertEqual("ps0:2222", cluster_spec.task_address("ps", 0))
self.assertEqual("sparse0:2222", cluster_spec.task_address("sparse", 0))
with self.assertRaises(ValueError):
cluster_spec.task_address("unknown", 0)
with self.assertRaises(ValueError):
cluster_spec.task_address("sparse", 2)
self.assertEqual([0, 1], cluster_spec.task_indices("ps"))
self.assertEqual([0, 1, 2], cluster_spec.task_indices("worker"))
self.assertEqual([0, 3], cluster_spec.task_indices("sparse"))
with self.assertRaises(ValueError):
cluster_spec.task_indices("unknown")
# NOTE(mrry): `ClusterSpec.job_tasks()` is not recommended for use
# with sparse jobs.
self.assertEqual(["ps0:2222", "ps1:2222"], cluster_spec.job_tasks("ps"))
self.assertEqual(["worker0:2222", "worker1:2222", "worker2:2222"],
cluster_spec.job_tasks("worker"))
self.assertEqual(["sparse0:2222", None, None, "sparse3:2222"],
cluster_spec.job_tasks("sparse"))
with self.assertRaises(ValueError):
cluster_spec.job_tasks("unknown")
def testEmptyClusterSpecIsFalse(self):
self.assertFalse(server_lib.ClusterSpec({}))
def testNonEmptyClusterSpecIsTrue(self):
self.assertTrue(server_lib.ClusterSpec({"job": ["host:port"]}))
def testEq(self):
self.assertEquals(server_lib.ClusterSpec({}), server_lib.ClusterSpec({}))
self.assertEquals(
server_lib.ClusterSpec({
"job": ["host:2222"]
}),
server_lib.ClusterSpec({
"job": ["host:2222"]
}),)
self.assertEquals(
server_lib.ClusterSpec({
"job": {
0: "host:2222"
}
}), server_lib.ClusterSpec({
"job": ["host:2222"]
}))
def testNe(self):
self.assertNotEquals(
server_lib.ClusterSpec({}),
server_lib.ClusterSpec({
"job": ["host:2223"]
}),)
self.assertNotEquals(
server_lib.ClusterSpec({
"job1": ["host:2222"]
}),
server_lib.ClusterSpec({
"job2": ["host:2222"]
}),)
self.assertNotEquals(
server_lib.ClusterSpec({
"job": ["host:2222"]
}),
server_lib.ClusterSpec({
"job": ["host:2223"]
}),)
self.assertNotEquals(
server_lib.ClusterSpec({
"job": ["host:2222", "host:2223"]
}),
server_lib.ClusterSpec({
"job": ["host:2223", "host:2222"]
}),)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/server_lib_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Synchronize replicas for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
from tensorflow.python.training import session_manager
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# Please note that the gradients from replicas are averaged instead of summed
# (as in the old sync_replicas_optimizer) so you need to increase the learning
# rate according to the number of replicas. This change is introduced to be
# consistent with how gradients are aggregated (averaged) within a batch in a
# replica.
@tf_export(v1=["train.SyncReplicasOptimizer"])
class SyncReplicasOptimizer(optimizer.Optimizer):
"""Class to synchronize, aggregate gradients and pass them to the optimizer.
This class is deprecated. For synchrononous training, please use [Distribution
Strategies](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute).
In a typical asynchronous training environment, it's common to have some
stale gradients. For example, with a N-replica asynchronous training,
gradients will be applied to the variables N times independently. Depending
on each replica's training speed, some gradients might be calculated from
copies of the variable from several steps back (N-1 steps on average). This
optimizer avoids stale gradients by collecting gradients from all replicas,
averaging them, then applying them to the variables in one shot, after
which replicas can fetch the new variables and continue.
The following accumulators/queue are created:
* N `gradient accumulators`, one per variable to train. Gradients are pushed
to them and the chief worker will wait until enough gradients are collected
and then average them before applying to variables. The accumulator will
drop all stale gradients (more details in the accumulator op).
* 1 `token` queue where the optimizer pushes the new global_step value after
all variables are updated.
The following local variable is created:
* `sync_rep_local_step`, one per replica. Compared against the global_step in
each accumulator to check for staleness of the gradients.
The optimizer adds nodes to the graph to collect gradients and pause the
trainers until variables are updated.
For the Parameter Server job:
1. An accumulator is created for each variable, and each replica pushes the
gradients into the accumulators instead of directly applying them to the
variables.
2. Each accumulator averages once enough gradients (replicas_to_aggregate)
have been accumulated.
3. Apply the averaged gradients to the variables.
4. Only after all variables have been updated, increment the global step.
5. Only after step 4, pushes `global_step` in the `token_queue`, once for
each worker replica. The workers can now fetch the global step, use it to
update its local_step variable and start the next batch. Please note that
some workers can consume multiple minibatches, while some may not consume
even one. This is because each worker fetches minibatches as long as
a token exists. If one worker is stuck for some reason and does not
consume a token, another worker can use it.
For the replicas:
1. Start a step: fetch variables and compute gradients.
2. Once the gradients have been computed, push them into gradient
accumulators. Each accumulator will check the staleness and drop the stale.
3. After pushing all the gradients, dequeue an updated value of global_step
from the token queue and record that step to its local_step variable. Note
that this is effectively a barrier.
4. Start the next batch.
### Usage
```python
# Create any optimizer to update the variables, say a simple SGD:
opt = GradientDescentOptimizer(learning_rate=0.1)
# Wrap the optimizer with sync_replicas_optimizer with 50 replicas: at each
# step the optimizer collects 50 gradients before applying to variables.
# Note that if you want to have 2 backup replicas, you can change
# total_num_replicas=52 and make sure this number matches how many physical
# replicas you started in your job.
opt = tf.compat.v1.train.SyncReplicasOptimizer(opt, replicas_to_aggregate=50,
total_num_replicas=50)
# Some models have startup_delays to help stabilize the model but when using
# sync_replicas training, set it to 0.
# Now you can call `minimize()` or `compute_gradients()` and
# `apply_gradients()` normally
training_op = opt.minimize(total_loss, global_step=self.global_step)
# You can create the hook which handles initialization and queues.
sync_replicas_hook = opt.make_session_run_hook(is_chief)
```
In the training program, every worker will run the train_op as if not
synchronized.
```python
with training.MonitoredTrainingSession(
master=workers[worker_id].target, is_chief=is_chief,
hooks=[sync_replicas_hook]) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(training_op)
```
To use SyncReplicasOptimizer with an `Estimator`, you need to send
sync_replicas_hook while calling the fit.
```python
my_estimator = DNNClassifier(..., optimizer=opt)
my_estimator.fit(..., hooks=[sync_replicas_hook])
```
"""
@deprecation.deprecated(
None,
"The `SyncReplicaOptimizer` class is deprecated. For synchrononous "
"training, please use [Distribution Strategies](https://github.com/"
"tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute).",
warn_once=True)
def __init__(self,
opt,
replicas_to_aggregate,
total_num_replicas=None,
variable_averages=None,
variables_to_average=None,
use_locking=False,
name="sync_replicas"):
"""Construct a sync_replicas optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
replicas_to_aggregate: number of replicas to aggregate for each variable
update.
total_num_replicas: Total number of tasks/workers/replicas, could be
different from replicas_to_aggregate.
If total_num_replicas > replicas_to_aggregate: it is backup_replicas +
replicas_to_aggregate.
If total_num_replicas < replicas_to_aggregate: Replicas compute
multiple batches per update to variables.
variable_averages: Optional `ExponentialMovingAverage` object, used to
maintain moving averages for the variables passed in
`variables_to_average`.
variables_to_average: a list of variables that need to be averaged. Only
needed if variable_averages is passed in.
use_locking: If True use locks for update operation.
name: string. Optional name of the returned operation.
"""
if total_num_replicas is None:
total_num_replicas = replicas_to_aggregate
super(SyncReplicasOptimizer, self).__init__(use_locking, name)
logging.info(
"SyncReplicasV2: replicas_to_aggregate=%s; total_num_replicas=%s",
replicas_to_aggregate, total_num_replicas)
self._opt = opt
self._replicas_to_aggregate = replicas_to_aggregate
self._gradients_applied = False
self._variable_averages = variable_averages
self._variables_to_average = variables_to_average
self._total_num_replicas = total_num_replicas
self._tokens_per_step = max(total_num_replicas, replicas_to_aggregate)
self._global_step = None
self._sync_token_queue = None
# The synchronization op will be executed in a queue runner which should
# only be executed by one of the replicas (usually the chief).
self._chief_queue_runner = None
# Remember which accumulator is on which device to set the initial step in
# the accumulator to be global step. This list contains list of the
# following format: (accumulator, device).
self._accumulator_list = []
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of "loss" for the variables in "var_list".
This simply wraps the compute_gradients() from the real optimizer. The
gradients will be aggregated in the apply_gradients() so that user can
modify the gradients like clipping with per replica global norm if needed.
The global norm with aggregated gradients can be bad as one replica's huge
gradients can hurt the gradients from other replicas.
Args:
*args: Arguments for compute_gradients().
**kwargs: Keyword arguments for compute_gradients().
Returns:
A list of (gradient, variable) pairs.
"""
return self._opt.compute_gradients(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This contains most of the synchronization implementation and also wraps the
apply_gradients() from the real optimizer.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
train_op: The op to dequeue a token so the replicas can exit this batch
and start the next one. This is executed by each replica.
Raises:
ValueError: If the grads_and_vars is empty.
ValueError: If global step is not provided, the staleness cannot be
checked.
"""
if not grads_and_vars:
raise ValueError("Must supply at least one variable")
if global_step is None:
raise ValueError("Global step is required to check staleness")
self._global_step = global_step
train_ops = []
aggregated_grad = []
var_list = []
# local_anchor op will be placed on this worker task by default.
local_anchor = control_flow_ops.no_op()
# Colocating local_step variable prevents it being placed on the PS.
distribution_strategy = distribution_strategy_context.get_strategy()
with distribution_strategy.extended.colocate_vars_with(local_anchor):
self._local_step = variable_scope.variable(
initial_value=0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=global_step.dtype.base_dtype,
name="sync_rep_local_step")
self.local_step_init_op = state_ops.assign(self._local_step, global_step)
chief_init_ops = [self.local_step_init_op]
self.ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
with ops.name_scope(None, self._name):
for grad, var in grads_and_vars:
var_list.append(var)
with ops.device(var.device):
# Dense gradients.
if grad is None:
aggregated_grad.append(None) # pass-through.
continue
elif isinstance(grad, ops.Tensor):
grad_accum = data_flow_ops.ConditionalAccumulator(
grad.dtype,
shape=var.get_shape(),
shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_grad(
self._replicas_to_aggregate))
else:
if not isinstance(grad, ops.IndexedSlices):
raise ValueError("Unknown grad type!")
grad_accum = data_flow_ops.SparseConditionalAccumulator(
grad.dtype, shape=(), shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_indexed_slices_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_indexed_slices_grad(
self._replicas_to_aggregate))
self._accumulator_list.append((grad_accum, var.device))
aggregated_grads_and_vars = zip(aggregated_grad, var_list)
# sync_op will be assigned to the same device as the global step.
with ops.device(global_step.device), ops.name_scope(""):
update_op = self._opt.apply_gradients(aggregated_grads_and_vars,
global_step)
# Create token queue.
with ops.device(global_step.device), ops.name_scope(""):
sync_token_queue = (
data_flow_ops.FIFOQueue(-1,
global_step.dtype.base_dtype,
shapes=(),
name="sync_token_q",
shared_name="sync_token_q"))
self._sync_token_queue = sync_token_queue
# dummy_queue is passed to the queue runner. Don't use the real queues
# because the queue runner doesn't automatically reopen it once it
# closed queues in PS devices.
dummy_queue = (
data_flow_ops.FIFOQueue(1,
types_pb2.DT_INT32,
shapes=(),
name="dummy_queue",
shared_name="dummy_queue"))
with ops.device(global_step.device), ops.name_scope(""):
# Replicas have to wait until they can get a token from the token queue.
with ops.control_dependencies(train_ops):
token = sync_token_queue.dequeue()
train_op = state_ops.assign(self._local_step, token)
with ops.control_dependencies([update_op]):
# Sync_op needs to insert tokens to the token queue at the end of the
# step so the replicas can fetch them to start the next step.
tokens = array_ops.fill([self._tokens_per_step], global_step)
sync_op = sync_token_queue.enqueue_many((tokens,))
if self._variable_averages is not None:
with ops.control_dependencies([sync_op]), ops.name_scope(""):
sync_op = self._variable_averages.apply(
self._variables_to_average)
self._chief_queue_runner = queue_runner.QueueRunner(dummy_queue,
[sync_op])
for accum, dev in self._accumulator_list:
with ops.device(dev):
chief_init_ops.append(
accum.set_global_step(
global_step, name="SetGlobalStep"))
self.chief_init_op = control_flow_ops.group(*(chief_init_ops))
self._gradients_applied = True
return train_op
def get_chief_queue_runner(self):
"""Returns the QueueRunner for the chief to execute.
This includes the operations to synchronize replicas: aggregate gradients,
apply to variables, increment global step, insert tokens to token queue.
Note that this can only be called after calling apply_gradients() which
actually generates this queuerunner.
Returns:
A `QueueRunner` for chief to execute.
Raises:
ValueError: If this is called before apply_gradients().
"""
if self._gradients_applied is False:
raise ValueError("Should be called after apply_gradients().")
return self._chief_queue_runner
def get_slot(self, *args, **kwargs):
"""Return a slot named "name" created for "var" by the Optimizer.
This simply wraps the get_slot() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
return self._opt.get_slot(*args, **kwargs)
def variables(self):
"""Fetches a list of optimizer variables in the default graph.
This wraps `variables()` from the actual optimizer. It does not include
the `SyncReplicasOptimizer`'s local step.
Returns:
A list of variables.
"""
return self._opt.variables()
def get_slot_names(self, *args, **kwargs):
"""Return a list of the names of slots created by the `Optimizer`.
This simply wraps the get_slot_names() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
A list of strings.
"""
return self._opt.get_slot_names(*args, **kwargs)
def get_init_tokens_op(self, num_tokens=-1):
"""Returns the op to fill the sync_token_queue with the tokens.
This is supposed to be executed in the beginning of the chief/sync thread
so that even if the total_num_replicas is less than replicas_to_aggregate,
the model can still proceed as the replicas can compute multiple steps per
variable update. Make sure:
`num_tokens >= replicas_to_aggregate - total_num_replicas`.
Args:
num_tokens: Number of tokens to add to the queue.
Returns:
An op for the chief/sync replica to fill the token queue.
Raises:
ValueError: If this is called before apply_gradients().
ValueError: If num_tokens are smaller than replicas_to_aggregate -
total_num_replicas.
"""
if self._gradients_applied is False:
raise ValueError(
"get_init_tokens_op() should be called after apply_gradients().")
tokens_needed = self._replicas_to_aggregate - self._total_num_replicas
if num_tokens == -1:
num_tokens = self._replicas_to_aggregate
elif num_tokens < tokens_needed:
raise ValueError(
"Too few tokens to finish the first step: %d (given) vs %d (needed)" %
(num_tokens, tokens_needed))
if num_tokens > 0:
with ops.device(self._global_step.device), ops.name_scope(""):
tokens = array_ops.fill([num_tokens], self._global_step)
init_tokens = self._sync_token_queue.enqueue_many((tokens,))
else:
init_tokens = control_flow_ops.no_op(name="no_init_tokens")
return init_tokens
def make_session_run_hook(self, is_chief, num_tokens=-1):
"""Creates a hook to handle SyncReplicasHook ops such as initialization."""
return _SyncReplicasOptimizerHook(self, is_chief, num_tokens)
class _SyncReplicasOptimizerHook(session_run_hook.SessionRunHook):
"""A SessionRunHook handles ops related to SyncReplicasOptimizer."""
def __init__(self, sync_optimizer, is_chief, num_tokens):
"""Creates hook to handle SyncReplicasOptimizer initialization ops.
Args:
sync_optimizer: `SyncReplicasOptimizer` which this hook will initialize.
is_chief: `Bool`, whether is this a chief replica or not.
num_tokens: Number of tokens to add to the queue.
"""
self._sync_optimizer = sync_optimizer
self._is_chief = is_chief
self._num_tokens = num_tokens
def begin(self):
if self._sync_optimizer._gradients_applied is False: # pylint: disable=protected-access
raise ValueError(
"SyncReplicasOptimizer.apply_gradient should be called before using "
"the hook.")
if self._is_chief:
self._local_init_op = self._sync_optimizer.chief_init_op
self._ready_for_local_init_op = (
self._sync_optimizer.ready_for_local_init_op)
self._q_runner = self._sync_optimizer.get_chief_queue_runner()
self._init_tokens_op = self._sync_optimizer.get_init_tokens_op(
self._num_tokens)
else:
self._local_init_op = self._sync_optimizer.local_step_init_op
self._ready_for_local_init_op = (
self._sync_optimizer.ready_for_local_init_op)
self._q_runner = None
self._init_tokens_op = None
def after_create_session(self, session, coord):
"""Runs SyncReplicasOptimizer initialization ops."""
local_init_success, msg = session_manager._ready( # pylint: disable=protected-access
self._ready_for_local_init_op, session,
"Model is not ready for SyncReplicasOptimizer local init.")
if not local_init_success:
raise RuntimeError(
"Init operations did not make model ready for SyncReplicasOptimizer "
"local_init. Init op: %s, error: %s" %
(self._local_init_op.name, msg))
session.run(self._local_init_op)
if self._init_tokens_op is not None:
session.run(self._init_tokens_op)
if self._q_runner is not None:
self._q_runner.create_threads(
session, coord=coord, daemon=True, start=True)
|
tensorflow-master
|
tensorflow/python/training/sync_replicas_optimizer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class SparseJobTest(test.TestCase):
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
@test_util.run_deprecated_v1
def testSparseJob(self):
server = server_lib.Server({"local": {37: "localhost:0"}})
with ops.device("/job:local/task:37"):
a = constant_op.constant(1.0)
with session.Session(server.target) as sess:
self.assertEqual(1.0, self.evaluate(a))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/server_lib_sparse_job_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Python interface for creating TensorFlow servers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def _make_server_def(server_or_cluster_def, job_name, task_index, protocol,
config):
"""Creates a `tf.train.ServerDef` protocol buffer.
Args:
server_or_cluster_def: A `tf.train.ServerDef` or `tf.train.ClusterDef`
protocol buffer, or a `tf.train.ClusterSpec` object, describing the server
to be defined and/or the cluster of which it is a member.
job_name: (Optional.) Specifies the name of the job of which the server is a
member. Defaults to the value in `server_or_cluster_def`, if specified.
task_index: (Optional.) Specifies the task index of the server in its job.
Defaults to the value in `server_or_cluster_def`, if specified. Otherwise
defaults to 0 if the server's job has only one task.
protocol: (Optional.) Specifies the protocol to be used by the server.
Acceptable values include `"grpc", "grpc+verbs"`. Defaults to the value in
`server_or_cluster_def`, if specified. Otherwise defaults to `"grpc"`.
config: (Options.) A `tf.compat.v1.ConfigProto` that specifies default
configuration options for all sessions that run on this server.
Returns:
A `tf.train.ServerDef`.
Raises:
TypeError: If the arguments do not have the appropriate type.
ValueError: If an argument is not specified and cannot be inferred.
"""
server_def = tensorflow_server_pb2.ServerDef()
if isinstance(server_or_cluster_def, tensorflow_server_pb2.ServerDef):
server_def.MergeFrom(server_or_cluster_def)
if job_name is not None:
server_def.job_name = job_name
if task_index is not None:
server_def.task_index = task_index
if protocol is not None:
server_def.protocol = protocol
if config is not None:
server_def.default_session_config.MergeFrom(config)
else:
try:
cluster_spec = ClusterSpec(server_or_cluster_def)
except TypeError:
raise TypeError("Could not convert `server_or_cluster_def` to a "
"`tf.train.ServerDef` or `tf.train.ClusterSpec`.")
if job_name is None:
if len(cluster_spec.jobs) == 1:
job_name = cluster_spec.jobs[0]
else:
raise ValueError("Must specify an explicit `job_name`.")
if task_index is None:
task_indices = cluster_spec.task_indices(job_name)
if len(task_indices) == 1:
task_index = task_indices[0]
else:
raise ValueError("Must specify an explicit `task_index`.")
if protocol is None:
protocol = "grpc"
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_spec.as_cluster_def(),
job_name=job_name,
task_index=task_index,
protocol=protocol)
if config is not None:
server_def.default_session_config.MergeFrom(config)
return server_def
@tf_export("distribute.Server", v1=["distribute.Server", "train.Server"])
@deprecation.deprecated_endpoints("train.Server")
class Server(object):
"""An in-process TensorFlow server, for use in distributed training.
A `tf.distribute.Server` instance encapsulates a set of devices and a
`tf.compat.v1.Session` target that
can participate in distributed training. A server belongs to a
cluster (specified by a `tf.train.ClusterSpec`), and
corresponds to a particular task in a named job. The server can
communicate with any other server in the same cluster.
"""
def __init__(self,
server_or_cluster_def,
job_name=None,
task_index=None,
protocol=None,
config=None,
start=True):
"""Creates a new server with the given definition.
The `job_name`, `task_index`, and `protocol` arguments are optional, and
override any information provided in `server_or_cluster_def`.
Args:
server_or_cluster_def: A `tf.train.ServerDef` or `tf.train.ClusterDef`
protocol buffer, or a `tf.train.ClusterSpec` object, describing the
server to be created and/or the cluster of which it is a member.
job_name: (Optional.) Specifies the name of the job of which the server is
a member. Defaults to the value in `server_or_cluster_def`, if
specified.
task_index: (Optional.) Specifies the task index of the server in its job.
Defaults to the value in `server_or_cluster_def`, if specified.
Otherwise defaults to 0 if the server's job has only one task.
protocol: (Optional.) Specifies the protocol to be used by the server.
Acceptable values include `"grpc", "grpc+verbs"`. Defaults to the value
in `server_or_cluster_def`, if specified. Otherwise defaults to
`"grpc"`.
config: (Options.) A `tf.compat.v1.ConfigProto` that specifies default
configuration options for all sessions that run on this server.
start: (Optional.) Boolean, indicating whether to start the server after
creating it. Defaults to `True`.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow server.
"""
self._server_def = _make_server_def(server_or_cluster_def, job_name,
task_index, protocol, config)
self._server = c_api.TF_NewServer(self._server_def.SerializeToString())
if start:
self.start()
def __del__(self):
try:
c_api.TF_ServerStop(self._server)
# Clean shutdown of servers is not yet implemented, so
# we leak instead of calling c_api.TF_DeleteServer here.
# See:
# https://github.com/tensorflow/tensorflow/blob/0495317a6e9dd4cac577b9d5cf9525e62b571018/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h#L73
except errors.UnimplementedError:
pass
except AttributeError:
# At shutdown, `c_api` may have been garbage collected.
pass
self._server = None
def start(self):
"""Starts this server.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
starting the TensorFlow server.
"""
c_api.TF_ServerStart(self._server)
def join(self):
"""Blocks until the server has shut down.
This method currently blocks forever.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
joining the TensorFlow server.
"""
c_api.TF_ServerJoin(self._server)
@property
def server_def(self):
"""Returns the `tf.train.ServerDef` for this server.
Returns:
A `tf.train.ServerDef` protocol buffer that describes the configuration
of this server.
"""
return self._server_def
@property
def target(self):
"""Returns the target for a `tf.compat.v1.Session` to connect to this server.
To create a
`tf.compat.v1.Session` that
connects to this server, use the following snippet:
```python
server = tf.distribute.Server(...)
with tf.compat.v1.Session(server.target):
# ...
```
Returns:
A string containing a session target for this server.
"""
return c_api.TF_ServerTarget(self._server)
@staticmethod
def create_local_server(config=None, start=True):
"""Creates a new single-process cluster running on the local host.
This method is a convenience wrapper for creating a
`tf.distribute.Server` with a `tf.train.ServerDef` that specifies a
single-process cluster containing a single task in a job called
`"local"`.
Args:
config: (Options.) A `tf.compat.v1.ConfigProto` that specifies default
configuration options for all sessions that run on this server.
start: (Optional.) Boolean, indicating whether to start the server after
creating it. Defaults to `True`.
Returns:
A local `tf.distribute.Server`.
"""
# Specifying port 0 means that the OS will choose a free port for the
# server.
return Server({"local": ["localhost:0"]},
protocol="grpc",
config=config,
start=start)
@tf_export("train.ClusterSpec")
class ClusterSpec(object):
"""Represents a cluster as a set of "tasks", organized into "jobs".
A `tf.train.ClusterSpec` represents the set of processes that
participate in a distributed TensorFlow computation. Every
`tf.distribute.Server` is constructed in a particular cluster.
To create a cluster with two jobs and five tasks, you specify the
mapping from job names to lists of network addresses (typically
hostname-port pairs).
```python
cluster = tf.train.ClusterSpec({"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"],
"ps": ["ps0.example.com:2222",
"ps1.example.com:2222"]})
```
Each job may also be specified as a sparse mapping from task indices
to network addresses. This enables a server to be configured without
needing to know the identity of (for example) all other worker
tasks:
```python
cluster = tf.train.ClusterSpec({"worker": {1: "worker1.example.com:2222"},
"ps": ["ps0.example.com:2222",
"ps1.example.com:2222"]})
```
"""
def __init__(self, cluster):
"""Creates a `ClusterSpec`.
Args:
cluster: A dictionary mapping one or more job names to (i) a list of
network addresses, or (ii) a dictionary mapping integer task indices to
network addresses; or a `tf.train.ClusterDef` protocol buffer.
Raises:
TypeError: If `cluster` is not a dictionary mapping strings to lists
of strings, and not a `tf.train.ClusterDef` protobuf.
"""
if isinstance(cluster, dict):
self._cluster_spec = {}
for job_name, tasks in cluster.items():
if isinstance(tasks, (list, tuple)):
job_tasks = {i: task for i, task in enumerate(tasks)}
elif isinstance(tasks, dict):
job_tasks = {i: task for i, task in tasks.items()}
else:
raise TypeError("The tasks for job %r must be a list or a dictionary "
"from integers to strings." % job_name)
self._cluster_spec[job_name] = job_tasks
self._make_cluster_def()
elif isinstance(cluster, cluster_pb2.ClusterDef):
self._cluster_def = cluster
self._cluster_spec = {}
for job_def in self._cluster_def.job:
self._cluster_spec[job_def.name] = {
i: t for i, t in job_def.tasks.items()
}
elif isinstance(cluster, ClusterSpec):
self._cluster_def = cluster_pb2.ClusterDef()
self._cluster_def.MergeFrom(cluster.as_cluster_def())
self._cluster_spec = {}
for job_def in self._cluster_def.job:
self._cluster_spec[job_def.name] = {
i: t for i, t in job_def.tasks.items()
}
else:
raise TypeError("`cluster` must be a dictionary mapping one or more "
"job names to lists of network addresses, or a "
"`ClusterDef` protocol buffer")
def __nonzero__(self):
return bool(self._cluster_spec)
# Python 3.x
__bool__ = __nonzero__
def __eq__(self, other):
return self._cluster_spec == other
def __ne__(self, other):
return self._cluster_spec != other
def __str__(self):
key_values = self.as_dict()
string_items = [
repr(k) + ": " + repr(key_values[k]) for k in sorted(key_values)
]
return "ClusterSpec({" + ", ".join(string_items) + "})"
def as_dict(self):
"""Returns a dictionary from job names to their tasks.
For each job, if the task index space is dense, the corresponding
value will be a list of network addresses; otherwise it will be a
dictionary mapping (sparse) task indices to the corresponding
addresses.
Returns:
A dictionary mapping job names to lists or dictionaries
describing the tasks in those jobs.
"""
ret = {}
for job in self.jobs:
task_indices = self.task_indices(job)
if len(task_indices) == 0:
ret[job] = {}
continue
if max(task_indices) + 1 == len(task_indices):
# Return a list because the task indices are dense. This
# matches the behavior of `as_dict()` before support for
# sparse jobs was added.
ret[job] = self.job_tasks(job)
else:
ret[job] = {i: self.task_address(job, i) for i in task_indices}
return ret
def as_cluster_def(self):
"""Returns a `tf.train.ClusterDef` protocol buffer based on this cluster."""
return self._cluster_def
@property
def jobs(self):
"""Returns a list of job names in this cluster.
Returns:
A list of strings, corresponding to the names of jobs in this cluster.
"""
return list(self._cluster_spec.keys())
def num_tasks(self, job_name):
"""Returns the number of tasks defined in the given job.
Args:
job_name: The string name of a job in this cluster.
Returns:
The number of tasks defined in the given job.
Raises:
ValueError: If `job_name` does not name a job in this cluster.
"""
try:
job = self._cluster_spec[job_name]
except KeyError:
raise ValueError("No such job in cluster: %r" % job_name)
return len(job)
def task_indices(self, job_name):
"""Returns a list of valid task indices in the given job.
Args:
job_name: The string name of a job in this cluster.
Returns:
A list of valid task indices in the given job.
Raises:
ValueError: If `job_name` does not name a job in this cluster,
or no task with index `task_index` is defined in that job.
"""
try:
job = self._cluster_spec[job_name]
except KeyError:
raise ValueError("No such job in cluster: %r" % job_name)
return list(sorted(job.keys()))
def task_address(self, job_name, task_index):
"""Returns the address of the given task in the given job.
Args:
job_name: The string name of a job in this cluster.
task_index: A non-negative integer.
Returns:
The address of the given task in the given job.
Raises:
ValueError: If `job_name` does not name a job in this cluster,
or no task with index `task_index` is defined in that job.
"""
try:
job = self._cluster_spec[job_name]
except KeyError:
raise ValueError("No such job in cluster: %r" % job_name)
try:
return job[task_index]
except KeyError:
raise ValueError("No task with index %r in job %r" %
(task_index, job_name))
def job_tasks(self, job_name):
"""Returns a mapping from task ID to address in the given job.
NOTE: For backwards compatibility, this method returns a list. If
the given job was defined with a sparse set of task indices, the
length of this list may not reflect the number of tasks defined in
this job. Use the `tf.train.ClusterSpec.num_tasks` method
to find the number of tasks defined in a particular job.
Args:
job_name: The string name of a job in this cluster.
Returns:
A list of task addresses, where the index in the list
corresponds to the task index of each task. The list may contain
`None` if the job was defined with a sparse set of task indices.
Raises:
ValueError: If `job_name` does not name a job in this cluster.
"""
try:
job = self._cluster_spec[job_name]
except KeyError:
raise ValueError("No such job in cluster: %r" % job_name)
ret = [None for _ in range(max(job.keys()) + 1)]
for i, task in job.items():
ret[i] = task
return ret
def _make_cluster_def(self):
"""Creates a `tf.train.ClusterDef` based on the given `cluster_spec`.
Raises:
TypeError: If `cluster_spec` is not a dictionary mapping strings to lists
of strings.
"""
self._cluster_def = cluster_pb2.ClusterDef()
# NOTE(mrry): Sort by job_name to produce deterministic protobufs.
for job_name, tasks in sorted(self._cluster_spec.items()):
try:
job_name = compat.as_bytes(job_name)
except TypeError:
raise TypeError("Job name %r must be bytes or unicode" % job_name)
job_def = self._cluster_def.job.add()
job_def.name = job_name
for i, task_address in sorted(tasks.items()):
try:
task_address = compat.as_bytes(task_address)
except TypeError:
raise TypeError("Task address %r must be bytes or unicode" %
task_address)
job_def.tasks[i] = task_address
|
tensorflow-master
|
tensorflow/python/training/server_lib.py
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for basic_session_run_hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import shutil
import tempfile
import time
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.testing.python.framework import fake_summary_writer
from tensorflow.python.client import session as session_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.summary import summary as summary_lib
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
# Provide a realistic start time for unit tests where we need to mock out
# calls to time.time().
MOCK_START_TIME = 1484695987.209386
class MockCheckpointSaverListener(
basic_session_run_hooks.CheckpointSaverListener):
def __init__(self):
self.begin_count = 0
self.before_save_count = 0
self.after_save_count = 0
self.end_count = 0
self.ask_for_stop = False
def begin(self):
self.begin_count += 1
def before_save(self, session, global_step):
self.before_save_count += 1
def after_save(self, session, global_step):
self.after_save_count += 1
if self.ask_for_stop:
return True
def end(self, session, global_step):
self.end_count += 1
def get_counts(self):
return {
'begin': self.begin_count,
'before_save': self.before_save_count,
'after_save': self.after_save_count,
'end': self.end_count
}
class SecondOrStepTimerTest(test.TestCase):
@test_util.run_deprecated_v1
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SecondOrStepTimer(every_secs=2.0, every_steps=10)
@test_util.run_deprecated_v1
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SecondOrStepTimer()
@test.mock.patch.object(time, 'time')
def test_every_secs(self, mock_time):
mock_time.return_value = MOCK_START_TIME
timer = basic_session_run_hooks.SecondOrStepTimer(every_secs=1.0)
self.assertTrue(timer.should_trigger_for_step(1))
timer.update_last_triggered_step(1)
self.assertFalse(timer.should_trigger_for_step(1))
self.assertFalse(timer.should_trigger_for_step(2))
mock_time.return_value += 1.0
self.assertFalse(timer.should_trigger_for_step(1))
self.assertTrue(timer.should_trigger_for_step(2))
def test_every_steps(self):
timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=3)
self.assertTrue(timer.should_trigger_for_step(1))
timer.update_last_triggered_step(1)
self.assertFalse(timer.should_trigger_for_step(1))
self.assertFalse(timer.should_trigger_for_step(2))
self.assertFalse(timer.should_trigger_for_step(3))
self.assertTrue(timer.should_trigger_for_step(4))
def test_update_last_triggered_step(self):
timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=1)
elapsed_secs, elapsed_steps = timer.update_last_triggered_step(1)
self.assertEqual(None, elapsed_secs)
self.assertEqual(None, elapsed_steps)
elapsed_secs, elapsed_steps = timer.update_last_triggered_step(5)
self.assertLess(0, elapsed_secs)
self.assertEqual(4, elapsed_steps)
elapsed_secs, elapsed_steps = timer.update_last_triggered_step(7)
self.assertLess(0, elapsed_secs)
self.assertEqual(2, elapsed_steps)
class StopAtStepTest(test.TestCase):
def test_raise_in_both_last_step_and_num_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.StopAtStepHook(num_steps=10, last_step=20)
def test_stop_based_on_last_step(self):
h = basic_session_run_hooks.StopAtStepHook(last_step=10)
with ops.Graph().as_default():
global_step = variables.get_or_create_global_step()
no_op = control_flow_ops.no_op()
h.begin()
with session_lib.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [h])
sess.run(state_ops.assign(global_step, 5))
h.after_create_session(sess, None)
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 9))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 10))
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 11))
mon_sess._should_stop = False
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
def test_stop_based_on_num_step(self):
h = basic_session_run_hooks.StopAtStepHook(num_steps=10)
with ops.Graph().as_default():
global_step = variables.get_or_create_global_step()
no_op = control_flow_ops.no_op()
h.begin()
with session_lib.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [h])
sess.run(state_ops.assign(global_step, 5))
h.after_create_session(sess, None)
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 13))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 14))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 15))
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 16))
mon_sess._should_stop = False
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
def test_stop_based_with_multiple_steps(self):
h = basic_session_run_hooks.StopAtStepHook(num_steps=10)
with ops.Graph().as_default():
global_step = variables.get_or_create_global_step()
no_op = control_flow_ops.no_op()
h.begin()
with session_lib.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [h])
sess.run(state_ops.assign(global_step, 5))
h.after_create_session(sess, None)
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 15))
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
class LoggingTensorHookTest(test.TestCase):
def setUp(self):
# Mock out logging calls so we can verify whether correct tensors are being
# monitored.
self._actual_log = tf_logging.info
self.logged_message = None
def mock_log(*args, **kwargs):
self.logged_message = args
self._actual_log(*args, **kwargs)
tf_logging.info = mock_log
def tearDown(self):
tf_logging.info = self._actual_log
def test_illegal_args(self):
with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):
basic_session_run_hooks.LoggingTensorHook(tensors=['t'], every_n_iter=0)
with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):
basic_session_run_hooks.LoggingTensorHook(tensors=['t'], every_n_iter=-10)
with self.assertRaisesRegexp(ValueError, 'xactly one of'):
basic_session_run_hooks.LoggingTensorHook(
tensors=['t'], every_n_iter=5, every_n_secs=5)
with self.assertRaisesRegexp(ValueError, 'xactly one of'):
basic_session_run_hooks.LoggingTensorHook(tensors=['t'])
def test_print_at_end_only(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
t = constant_op.constant(42.0, name='foo')
train_op = constant_op.constant(3)
hook = basic_session_run_hooks.LoggingTensorHook(
tensors=[t.name], at_end=True)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
self.evaluate(variables_lib.global_variables_initializer())
self.logged_message = ''
for _ in range(3):
mon_sess.run(train_op)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self.logged_message).find(t.name), -1)
hook.end(sess)
self.assertRegexpMatches(str(self.logged_message), t.name)
def _validate_print_every_n_steps(self, sess, at_end):
t = constant_op.constant(42.0, name='foo')
train_op = constant_op.constant(3)
hook = basic_session_run_hooks.LoggingTensorHook(
tensors=[t.name], every_n_iter=10, at_end=at_end)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
self.evaluate(variables_lib.global_variables_initializer())
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
for _ in range(3):
self.logged_message = ''
for _ in range(9):
mon_sess.run(train_op)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self.logged_message).find(t.name), -1)
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
# Add additional run to verify proper reset when called multiple times.
self.logged_message = ''
mon_sess.run(train_op)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self.logged_message).find(t.name), -1)
self.logged_message = ''
hook.end(sess)
if at_end:
self.assertRegexpMatches(str(self.logged_message), t.name)
else:
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self.logged_message).find(t.name), -1)
def test_print_every_n_steps(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
self._validate_print_every_n_steps(sess, at_end=False)
# Verify proper reset.
self._validate_print_every_n_steps(sess, at_end=False)
def test_print_every_n_steps_and_end(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
self._validate_print_every_n_steps(sess, at_end=True)
# Verify proper reset.
self._validate_print_every_n_steps(sess, at_end=True)
def test_print_first_step(self):
# if it runs every iteration, first iteration has None duration.
with ops.Graph().as_default(), session_lib.Session() as sess:
t = constant_op.constant(42.0, name='foo')
train_op = constant_op.constant(3)
hook = basic_session_run_hooks.LoggingTensorHook(
tensors={'foo': t}, every_n_iter=1)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
self.evaluate(variables_lib.global_variables_initializer())
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), 'foo')
# in first run, elapsed time is None.
self.assertEqual(str(self.logged_message).find('sec'), -1)
def _validate_print_every_n_secs(self, sess, at_end, mock_time):
t = constant_op.constant(42.0, name='foo')
train_op = constant_op.constant(3)
hook = basic_session_run_hooks.LoggingTensorHook(
tensors=[t.name], every_n_secs=1.0, at_end=at_end)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
self.evaluate(variables_lib.global_variables_initializer())
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.logged_message = ''
mon_sess.run(train_op)
self.assertEqual(str(self.logged_message).find(t.name), -1)
mock_time.return_value += 1.0
self.logged_message = ''
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
self.logged_message = ''
hook.end(sess)
if at_end:
self.assertRegexpMatches(str(self.logged_message), t.name)
else:
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self.logged_message).find(t.name), -1)
@test.mock.patch.object(time, 'time')
def test_print_every_n_secs(self, mock_time):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_time.return_value = MOCK_START_TIME
self._validate_print_every_n_secs(sess, at_end=False, mock_time=mock_time)
# Verify proper reset.
self._validate_print_every_n_secs(sess, at_end=False, mock_time=mock_time)
@test.mock.patch.object(time, 'time')
def test_print_every_n_secs_and_end(self, mock_time):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_time.return_value = MOCK_START_TIME
self._validate_print_every_n_secs(sess, at_end=True, mock_time=mock_time)
# Verify proper reset.
self._validate_print_every_n_secs(sess, at_end=True, mock_time=mock_time)
def test_print_formatter(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
t = constant_op.constant(42.0, name='foo')
train_op = constant_op.constant(3)
hook = basic_session_run_hooks.LoggingTensorHook(
tensors=[t.name], every_n_iter=10,
formatter=lambda items: 'qqq=%s' % items[t.name])
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
self.evaluate(variables_lib.global_variables_initializer())
mon_sess.run(train_op)
self.assertEqual(self.logged_message[0], 'qqq=42.0')
class CheckpointSaverHookTest(test.TestCase):
def setUp(self):
self.model_dir = tempfile.mkdtemp()
self.graph = ops.Graph()
with self.graph.as_default():
self.scaffold = monitored_session.Scaffold()
self.global_step = variables.get_or_create_global_step()
self.train_op = training_util._increment_global_step(1)
def tearDown(self):
shutil.rmtree(self.model_dir, ignore_errors=True)
def test_saves_when_saver_and_scaffold_both_missing(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=1)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_raise_when_saver_and_scaffold_both_present(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, saver=self.scaffold.saver, scaffold=self.scaffold)
@test_util.run_deprecated_v1
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_secs=10, save_steps=20)
@test_util.run_deprecated_v1
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.CheckpointSaverHook(self.model_dir)
def test_save_secs_saves_in_first_step(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_secs=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_secs_calls_listeners_at_begin_and_end(self):
with self.graph.as_default():
listener = MockCheckpointSaverListener()
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_secs=2,
scaffold=self.scaffold,
listeners=[listener])
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op) # hook runs here
mon_sess.run(self.train_op) # hook won't run here, so it does at end
hook.end(sess) # hook runs here
self.assertEqual({
'begin': 1,
'before_save': 2,
'after_save': 2,
'end': 1
}, listener.get_counts())
def test_listener_with_monitored_session(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
global_step = variables.get_or_create_global_step()
train_op = training_util._increment_global_step(1)
listener = MockCheckpointSaverListener()
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_steps=1,
scaffold=scaffold,
listeners=[listener])
with monitored_session.SingularMonitoredSession(
hooks=[hook],
scaffold=scaffold,
checkpoint_dir=self.model_dir) as sess:
sess.run(train_op)
sess.run(train_op)
global_step_val = sess.raw_session().run(global_step)
listener_counts = listener.get_counts()
self.assertEqual(2, global_step_val)
self.assertEqual({
'begin': 1,
'before_save': 3,
'after_save': 3,
'end': 1
}, listener_counts)
def test_listener_stops_training_in_after_save(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
variables.get_or_create_global_step()
train_op = training_util._increment_global_step(1)
listener = MockCheckpointSaverListener()
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=1, scaffold=scaffold, listeners=[listener])
with monitored_session.SingularMonitoredSession(
hooks=[hook], scaffold=scaffold,
checkpoint_dir=self.model_dir) as sess:
sess.run(train_op)
self.assertFalse(sess.should_stop())
sess.run(train_op)
self.assertFalse(sess.should_stop())
listener.ask_for_stop = True
sess.run(train_op)
self.assertTrue(sess.should_stop())
def test_listener_with_default_saver(self):
with ops.Graph().as_default():
global_step = variables.get_or_create_global_step()
train_op = training_util._increment_global_step(1)
listener = MockCheckpointSaverListener()
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_steps=1,
listeners=[listener])
with monitored_session.SingularMonitoredSession(
hooks=[hook],
checkpoint_dir=self.model_dir) as sess:
sess.run(train_op)
sess.run(train_op)
global_step_val = sess.raw_session().run(global_step)
listener_counts = listener.get_counts()
self.assertEqual(2, global_step_val)
self.assertEqual({
'begin': 1,
'before_save': 3,
'after_save': 3,
'end': 1
}, listener_counts)
with ops.Graph().as_default():
global_step = variables.get_or_create_global_step()
with monitored_session.SingularMonitoredSession(
checkpoint_dir=self.model_dir) as sess2:
global_step_saved_val = sess2.run(global_step)
self.assertEqual(2, global_step_saved_val)
def test_two_listeners_with_default_saver(self):
with ops.Graph().as_default():
global_step = variables.get_or_create_global_step()
train_op = training_util._increment_global_step(1)
listener1 = MockCheckpointSaverListener()
listener2 = MockCheckpointSaverListener()
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_steps=1,
listeners=[listener1, listener2])
with monitored_session.SingularMonitoredSession(
hooks=[hook],
checkpoint_dir=self.model_dir) as sess:
sess.run(train_op)
sess.run(train_op)
global_step_val = sess.raw_session().run(global_step)
listener1_counts = listener1.get_counts()
listener2_counts = listener2.get_counts()
self.assertEqual(2, global_step_val)
self.assertEqual({
'begin': 1,
'before_save': 3,
'after_save': 3,
'end': 1
}, listener1_counts)
self.assertEqual(listener1_counts, listener2_counts)
with ops.Graph().as_default():
global_step = variables.get_or_create_global_step()
with monitored_session.SingularMonitoredSession(
checkpoint_dir=self.model_dir) as sess2:
global_step_saved_val = sess2.run(global_step)
self.assertEqual(2, global_step_saved_val)
@test.mock.patch.object(time, 'time')
def test_save_secs_saves_periodically(self, mock_time):
with self.graph.as_default():
mock_time.return_value = MOCK_START_TIME
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_secs=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mock_time.return_value = MOCK_START_TIME
mon_sess.run(self.train_op) # Saved.
mock_time.return_value = MOCK_START_TIME + 0.5
mon_sess.run(self.train_op) # Not saved.
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
# Simulate 2.5 seconds of sleep.
mock_time.return_value = MOCK_START_TIME + 2.5
mon_sess.run(self.train_op) # Saved.
mock_time.return_value = MOCK_START_TIME + 2.6
mon_sess.run(self.train_op) # Not saved.
mock_time.return_value = MOCK_START_TIME + 2.7
mon_sess.run(self.train_op) # Not saved.
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
# Simulate 7.5 more seconds of sleep (10 seconds from start.
mock_time.return_value = MOCK_START_TIME + 10
mon_sess.run(self.train_op) # Saved.
self.assertEqual(6,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
@test.mock.patch.object(time, 'time')
def test_save_secs_calls_listeners_periodically(self, mock_time):
with self.graph.as_default():
mock_time.return_value = MOCK_START_TIME
listener = MockCheckpointSaverListener()
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_secs=2,
scaffold=self.scaffold,
listeners=[listener])
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mock_time.return_value = MOCK_START_TIME + 0.5
mon_sess.run(self.train_op) # hook runs here
mock_time.return_value = MOCK_START_TIME + 0.5
mon_sess.run(self.train_op)
mock_time.return_value = MOCK_START_TIME + 3.0
mon_sess.run(self.train_op) # hook runs here
mock_time.return_value = MOCK_START_TIME + 3.5
mon_sess.run(self.train_op)
mock_time.return_value = MOCK_START_TIME + 4.0
mon_sess.run(self.train_op)
mock_time.return_value = MOCK_START_TIME + 6.5
mon_sess.run(self.train_op) # hook runs here
mock_time.return_value = MOCK_START_TIME + 7.0
mon_sess.run(self.train_op) # hook won't run here, so it does at end
mock_time.return_value = MOCK_START_TIME + 7.5
hook.end(sess) # hook runs here
self.assertEqual({
'begin': 1,
'before_save': 4,
'after_save': 4,
'end': 1
}, listener.get_counts())
def test_save_steps_saves_in_first_step(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# saved
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_saves_at_end(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_secs=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
hook.end(sess)
self.assertEqual(2,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_summary_writer_defs(self):
fake_summary_writer.FakeSummaryWriter.install()
writer_cache.FileWriterCache.clear()
summary_writer = writer_cache.FileWriterCache.get(self.model_dir)
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
hook.after_create_session(sess, None)
mon_sess.run(self.train_op)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.model_dir,
expected_added_meta_graphs=[
meta_graph.create_meta_graph_def(
graph_def=self.graph.as_graph_def(add_shapes=True),
saver_def=self.scaffold.saver.saver_def)
])
fake_summary_writer.FakeSummaryWriter.uninstall()
def test_save_checkpoint_before_first_train_step(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [hook])
sess.run(self.scaffold.init_op)
hook.after_create_session(sess, None)
# Verifies that checkpoint is saved at step 0.
self.assertEqual(0,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
# Verifies that no checkpoint is saved after one training step.
mon_sess.run(self.train_op)
self.assertEqual(0,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
# Verifies that checkpoint is saved after save_steps.
mon_sess.run(self.train_op)
self.assertEqual(2,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
class CheckpointSaverHookMultiStepTest(test.TestCase):
def setUp(self):
self.model_dir = tempfile.mkdtemp()
self.graph = ops.Graph()
self.steps_per_run = 5
with self.graph.as_default():
self.scaffold = monitored_session.Scaffold()
self.global_step = variables.get_or_create_global_step()
self.train_op = training_util._increment_global_step(self.steps_per_run)
def tearDown(self):
shutil.rmtree(self.model_dir, ignore_errors=True)
def test_save_steps_saves_in_first_step(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_steps=2*self.steps_per_run,
scaffold=self.scaffold)
hook._set_steps_per_run(self.steps_per_run)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_steps=2*self.steps_per_run,
scaffold=self.scaffold)
hook._set_steps_per_run(self.steps_per_run)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
# Saved (step=5)
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Not saved (step=10)
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Saved (step=15)
self.assertEqual(15,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Not saved (step=20)
self.assertEqual(15,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Saved (step=25)
self.assertEqual(25,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_steps_saves_at_end(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_steps=2*self.steps_per_run,
scaffold=self.scaffold)
hook._set_steps_per_run(self.steps_per_run)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
hook.end(sess)
self.assertEqual(10,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
class ResourceCheckpointSaverHookTest(test.TestCase):
def setUp(self):
self.model_dir = tempfile.mkdtemp()
self.graph = ops.Graph()
with self.graph.as_default():
self.scaffold = monitored_session.Scaffold()
with variable_scope.variable_scope('foo', use_resource=True):
self.global_step = training_util.get_or_create_global_step()
self.train_op = training_util._increment_global_step(1)
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# saved
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
class StepCounterHookTest(test.TestCase):
def setUp(self):
self.log_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.log_dir, ignore_errors=True)
@test.mock.patch.object(time, 'time')
def test_step_counter_every_n_steps(self, mock_time):
mock_time.return_value = MOCK_START_TIME
with ops.Graph().as_default() as g, session_lib.Session() as sess:
variables.get_or_create_global_step()
train_op = training_util._increment_global_step(1)
summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)
hook = basic_session_run_hooks.StepCounterHook(
summary_writer=summary_writer, every_n_steps=10)
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
with test.mock.patch.object(tf_logging, 'warning') as mock_log:
for _ in range(30):
mock_time.return_value += 0.01
mon_sess.run(train_op)
# logging.warning should not be called.
self.assertIsNone(mock_log.call_args)
hook.end(sess)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertItemsEqual([11, 21], summary_writer.summaries.keys())
for step in [11, 21]:
summary_value = summary_writer.summaries[step][0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
@test.mock.patch.object(time, 'time')
def test_step_counter_every_n_secs(self, mock_time):
mock_time.return_value = MOCK_START_TIME
with ops.Graph().as_default() as g, session_lib.Session() as sess:
variables.get_or_create_global_step()
train_op = training_util._increment_global_step(1)
summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)
hook = basic_session_run_hooks.StepCounterHook(
summary_writer=summary_writer, every_n_steps=None, every_n_secs=0.1)
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op)
mock_time.return_value += 0.2
mon_sess.run(train_op)
mock_time.return_value += 0.2
mon_sess.run(train_op)
hook.end(sess)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertTrue(summary_writer.summaries, 'No summaries were created.')
self.assertItemsEqual([2, 3], summary_writer.summaries.keys())
for summary in summary_writer.summaries.values():
summary_value = summary[0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
def test_global_step_name(self):
with ops.Graph().as_default() as g, session_lib.Session() as sess:
with variable_scope.variable_scope('bar'):
variable_scope.get_variable(
'foo',
initializer=0,
trainable=False,
collections=[
ops.GraphKeys.GLOBAL_STEP, ops.GraphKeys.GLOBAL_VARIABLES
])
train_op = training_util._increment_global_step(1)
summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)
hook = basic_session_run_hooks.StepCounterHook(
summary_writer=summary_writer, every_n_steps=1, every_n_secs=None)
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op)
mon_sess.run(train_op)
hook.end(sess)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertTrue(summary_writer.summaries, 'No summaries were created.')
self.assertItemsEqual([2], summary_writer.summaries.keys())
summary_value = summary_writer.summaries[2][0].value[0]
self.assertEqual('bar/foo/sec', summary_value.tag)
def test_log_warning_if_global_step_not_increased(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
variables.get_or_create_global_step()
train_op = training_util._increment_global_step(0) # keep same.
self.evaluate(variables_lib.global_variables_initializer())
hook = basic_session_run_hooks.StepCounterHook(
every_n_steps=1, every_n_secs=None)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op) # Run one step to record global step.
with test.mock.patch.object(tf_logging, 'log_first_n') as mock_log:
for _ in range(30):
mon_sess.run(train_op)
self.assertRegexpMatches(
str(mock_log.call_args),
'global step.*has not been increased')
hook.end(sess)
def _setup_steps_per_run_test(self,
every_n_steps,
steps_per_run,
graph,
sess):
variables.get_or_create_global_step()
self.train_op = training_util._increment_global_step(steps_per_run)
self.summary_writer = fake_summary_writer.FakeSummaryWriter(
self.log_dir, graph)
self.hook = basic_session_run_hooks.StepCounterHook(
summary_writer=self.summary_writer, every_n_steps=every_n_steps)
self.hook._set_steps_per_run(steps_per_run)
self.hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
self.mon_sess = monitored_session._HookedSession(sess, [self.hook])
@test.mock.patch.object(time, 'time')
def test_steps_per_run_less_than_every_n_steps(self, mock_time):
mock_time.return_value = MOCK_START_TIME
with ops.Graph().as_default() as g, session_lib.Session() as sess:
self._setup_steps_per_run_test(10, 5, g, sess)
# Logs at 15, 25
for _ in range(5):
mock_time.return_value += 0.01
self.mon_sess.run(self.train_op)
self.hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertItemsEqual([15, 25], self.summary_writer.summaries.keys())
for step in [15, 25]:
summary_value = self.summary_writer.summaries[step][0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
@test.mock.patch.object(time, 'time')
def test_steps_per_run_equal_every_n_steps(self, mock_time):
mock_time.return_value = MOCK_START_TIME
with ops.Graph().as_default() as g, session_lib.Session() as sess:
self._setup_steps_per_run_test(5, 5, g, sess)
# Logs at 10, 15, 20, 25
for _ in range(5):
mock_time.return_value += 0.01
self.mon_sess.run(self.train_op)
self.hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertItemsEqual([10, 15, 20, 25],
self.summary_writer.summaries.keys())
for step in [10, 15, 20, 25]:
summary_value = self.summary_writer.summaries[step][0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
@test.mock.patch.object(time, 'time')
def test_steps_per_run_greater_than_every_n_steps(self, mock_time):
mock_time.return_value = MOCK_START_TIME
with ops.Graph().as_default() as g, session_lib.Session() as sess:
self._setup_steps_per_run_test(5, 10, g, sess)
# Logs at 20, 30, 40, 50
for _ in range(5):
mock_time.return_value += 0.01
self.mon_sess.run(self.train_op)
self.hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertItemsEqual([20, 30, 40, 50],
self.summary_writer.summaries.keys())
for step in [20, 30, 40, 50]:
summary_value = self.summary_writer.summaries[step][0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
@test_util.run_deprecated_v1
class SummarySaverHookTest(test.TestCase):
def setUp(self):
test.TestCase.setUp(self)
self.log_dir = 'log/dir'
self.summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir)
var = variables_lib.Variable(0.0)
tensor = state_ops.assign_add(var, 1.0)
tensor2 = tensor * 2
self.summary_op = summary_lib.scalar('my_summary', tensor)
self.summary_op2 = summary_lib.scalar('my_summary2', tensor2)
variables.get_or_create_global_step()
self.train_op = training_util._increment_global_step(1)
def test_raise_when_scaffold_and_summary_op_both_missing(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SummarySaverHook()
def test_raise_when_scaffold_and_summary_op_both_present(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SummarySaverHook(
scaffold=monitored_session.Scaffold(), summary_op=self.summary_op)
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SummarySaverHook(
save_secs=10, save_steps=20, summary_writer=self.summary_writer)
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SummarySaverHook(
save_secs=None, save_steps=None, summary_writer=self.summary_writer)
def test_save_steps(self):
hook = basic_session_run_hooks.SummarySaverHook(
save_steps=8,
summary_writer=self.summary_writer,
summary_op=self.summary_op)
with self.cached_session() as sess:
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(30):
mon_sess.run(self.train_op)
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0
},
9: {
'my_summary': 2.0
},
17: {
'my_summary': 3.0
},
25: {
'my_summary': 4.0
},
})
def test_multiple_summaries(self):
hook = basic_session_run_hooks.SummarySaverHook(
save_steps=8,
summary_writer=self.summary_writer,
summary_op=[self.summary_op, self.summary_op2])
with self.cached_session() as sess:
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(10):
mon_sess.run(self.train_op)
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0,
'my_summary2': 2.0
},
9: {
'my_summary': 2.0,
'my_summary2': 4.0
},
})
@test.mock.patch.object(time, 'time')
def test_save_secs_saving_once_every_step(self, mock_time):
mock_time.return_value = MOCK_START_TIME
hook = basic_session_run_hooks.SummarySaverHook(
save_secs=0.5,
summary_writer=self.summary_writer,
summary_op=self.summary_op)
with self.cached_session() as sess:
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(4):
mon_sess.run(self.train_op)
mock_time.return_value += 0.5
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0
},
2: {
'my_summary': 2.0
},
3: {
'my_summary': 3.0
},
4: {
'my_summary': 4.0
},
})
@test.mock.patch.object(time, 'time')
def test_save_secs_saving_once_every_three_steps(self, mock_time):
mock_time.return_value = 1484695987.209386
hook = basic_session_run_hooks.SummarySaverHook(
save_secs=9.,
summary_writer=self.summary_writer,
summary_op=self.summary_op)
with self.cached_session() as sess:
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(8):
mon_sess.run(self.train_op)
mock_time.return_value += 3.1
hook.end(sess)
# 24.8 seconds passed (3.1*8), it saves every 9 seconds starting from first:
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0
},
4: {
'my_summary': 2.0
},
7: {
'my_summary': 3.0
},
})
class GlobalStepWaiterHookTest(test.TestCase):
def test_not_wait_for_step_zero(self):
with ops.Graph().as_default():
variables.get_or_create_global_step()
hook = basic_session_run_hooks.GlobalStepWaiterHook(wait_until_step=0)
hook.begin()
with session_lib.Session() as sess:
# Before run should return without waiting gstep increment.
hook.before_run(
session_run_hook.SessionRunContext(
original_args=None, session=sess))
@test.mock.patch.object(time, 'sleep')
def test_wait_for_step(self, mock_sleep):
with ops.Graph().as_default():
gstep = variables.get_or_create_global_step()
hook = basic_session_run_hooks.GlobalStepWaiterHook(wait_until_step=1000)
hook.begin()
with session_lib.Session() as sess:
# Mock out calls to time.sleep() to update the global step.
class Context(object):
counter = 0
def mock_sleep_side_effect(seconds):
del seconds # argument is ignored
Context.counter += 1
if Context.counter == 1:
# The first time sleep() is called, we update the global_step from
# 0 to 500.
sess.run(state_ops.assign(gstep, 500))
elif Context.counter == 2:
# The second time sleep() is called, we update the global_step from
# 500 to 1100.
sess.run(state_ops.assign(gstep, 1100))
else:
raise AssertionError(
'Expected before_run() to terminate after the second call to '
'time.sleep()')
mock_sleep.side_effect = mock_sleep_side_effect
# Run the mocked-out interaction with the hook.
self.evaluate(variables_lib.global_variables_initializer())
run_context = session_run_hook.SessionRunContext(
original_args=None, session=sess)
hook.before_run(run_context)
self.assertEqual(Context.counter, 2)
class FinalOpsHookTest(test.TestCase):
def test_final_ops_is_scalar_tensor(self):
with ops.Graph().as_default():
expected_value = 4
final_ops = constant_op.constant(expected_value)
hook = basic_session_run_hooks.FinalOpsHook(final_ops)
hook.begin()
with session_lib.Session() as session:
hook.end(session)
self.assertEqual(expected_value,
hook.final_ops_values)
def test_final_ops_is_tensor(self):
with ops.Graph().as_default():
expected_values = [1, 6, 3, 5, 2, 4]
final_ops = constant_op.constant(expected_values)
hook = basic_session_run_hooks.FinalOpsHook(final_ops)
hook.begin()
with session_lib.Session() as session:
hook.end(session)
self.assertListEqual(expected_values,
hook.final_ops_values.tolist())
def test_final_ops_triggers_out_of_range_error(self):
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.range(1)
iterator = dataset_ops.make_one_shot_iterator(dataset)
read_ops = iterator.get_next()
final_ops = read_ops
hook = basic_session_run_hooks.FinalOpsHook(final_ops)
hook.begin()
with session_lib.Session() as session:
session.run(read_ops)
with test.mock.patch.object(tf_logging, 'warning') as mock_log:
with self.assertRaisesRegexp(errors.OutOfRangeError,
'End of sequence'):
hook.end(session)
self.assertRegexpMatches(
str(mock_log.call_args),
'dependency back to some input source')
def test_final_ops_with_dictionary(self):
with ops.Graph().as_default():
expected_values = [4, -3]
final_ops = array_ops.placeholder(dtype=dtypes.float32)
final_ops_feed_dict = {final_ops: expected_values}
hook = basic_session_run_hooks.FinalOpsHook(
final_ops, final_ops_feed_dict)
hook.begin()
with session_lib.Session() as session:
hook.end(session)
self.assertListEqual(expected_values,
hook.final_ops_values.tolist())
@test_util.run_deprecated_v1
class ResourceSummarySaverHookTest(test.TestCase):
def setUp(self):
test.TestCase.setUp(self)
self.log_dir = 'log/dir'
self.summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir)
var = variable_scope.get_variable('var', initializer=0.0, use_resource=True)
tensor = state_ops.assign_add(var, 1.0)
self.summary_op = summary_lib.scalar('my_summary', tensor)
with variable_scope.variable_scope('foo', use_resource=True):
variables.create_global_step()
self.train_op = training_util._increment_global_step(1)
def test_save_steps(self):
hook = basic_session_run_hooks.SummarySaverHook(
save_steps=8,
summary_writer=self.summary_writer,
summary_op=self.summary_op)
with self.cached_session() as sess:
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(30):
mon_sess.run(self.train_op)
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0
},
9: {
'my_summary': 2.0
},
17: {
'my_summary': 3.0
},
25: {
'my_summary': 4.0
},
})
class FeedFnHookTest(test.TestCase):
def test_feeding_placeholder(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
x = array_ops.placeholder(dtype=dtypes.float32)
y = x + 1
hook = basic_session_run_hooks.FeedFnHook(
feed_fn=lambda: {x: 1.0})
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
self.assertEqual(mon_sess.run(y), 2)
class ProfilerHookTest(test.TestCase):
def setUp(self):
super(ProfilerHookTest, self).setUp()
self.output_dir = tempfile.mkdtemp()
self.graph = ops.Graph()
self.filepattern = os.path.join(self.output_dir, 'timeline-*.json')
with self.graph.as_default():
self.global_step = variables.get_or_create_global_step()
self.train_op = state_ops.assign_add(self.global_step, 1)
def tearDown(self):
super(ProfilerHookTest, self).tearDown()
shutil.rmtree(self.output_dir, ignore_errors=True)
def _count_timeline_files(self):
return len(gfile.Glob(self.filepattern))
@test_util.run_deprecated_v1
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.ProfilerHook(save_secs=10, save_steps=20)
@test_util.run_deprecated_v1
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.ProfilerHook(save_secs=None, save_steps=None)
def test_save_secs_does_not_save_in_first_step(self):
with self.graph.as_default():
hook = basic_session_run_hooks.ProfilerHook(
save_secs=2, output_dir=self.output_dir)
with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:
sess.run(self.train_op)
self.assertEqual(0, self._count_timeline_files())
@test.mock.patch.object(time, 'time')
def test_save_secs_saves_periodically(self, mock_time):
# Pick a fixed start time.
with self.graph.as_default():
mock_time.return_value = MOCK_START_TIME
hook = basic_session_run_hooks.ProfilerHook(
save_secs=2, output_dir=self.output_dir)
with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:
sess.run(self.train_op) # Not saved.
self.assertEqual(0, self._count_timeline_files())
# Simulate 2.5 seconds of sleep.
mock_time.return_value = MOCK_START_TIME + 2.5
sess.run(self.train_op) # Saved.
self.assertEqual(1, self._count_timeline_files())
# Pretend some small amount of time has passed.
mock_time.return_value = MOCK_START_TIME + 2.6
sess.run(self.train_op) # Not saved.
# Edge test just before we should save the timeline.
mock_time.return_value = MOCK_START_TIME + 4.4
sess.run(self.train_op) # Not saved.
self.assertEqual(1, self._count_timeline_files())
mock_time.return_value = MOCK_START_TIME + 4.5
sess.run(self.train_op) # Saved.
self.assertEqual(2, self._count_timeline_files())
def test_save_steps_does_not_save_in_first_step(self):
with self.graph.as_default():
hook = basic_session_run_hooks.ProfilerHook(
save_steps=1, output_dir=self.output_dir)
with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:
sess.run(self.train_op) # Not saved.
self.assertEqual(0, self._count_timeline_files())
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
hook = basic_session_run_hooks.ProfilerHook(
save_steps=2, output_dir=self.output_dir)
with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:
self.assertEqual(0, self._count_timeline_files())
sess.run(self.train_op) # Not saved.
self.assertEqual(0, self._count_timeline_files())
sess.run(self.train_op) # Saved.
self.assertEqual(1, self._count_timeline_files())
sess.run(self.train_op) # Not saved.
self.assertEqual(1, self._count_timeline_files())
sess.run(self.train_op) # Saved.
self.assertEqual(2, self._count_timeline_files())
sess.run(self.train_op) # Not saved.
self.assertEqual(2, self._count_timeline_files())
def test_run_metadata_saves(self):
writer_cache.FileWriterCache.clear()
fake_summary_writer.FakeSummaryWriter.install()
fake_writer = writer_cache.FileWriterCache.get(self.output_dir)
with self.graph.as_default():
hook = basic_session_run_hooks.ProfilerHook(
save_steps=1, output_dir=self.output_dir)
with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:
sess.run(self.train_op) # Not saved.
sess.run(self.train_op) # Saved.
self.assertEqual(
list(fake_writer._added_run_metadata.keys()), ['step_2'])
fake_summary_writer.FakeSummaryWriter.uninstall()
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/training/basic_session_run_hooks_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and computes summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import time
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as _summary
from tensorflow.python.training import coordinator
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.training import session_manager as session_manager_mod
from tensorflow.python.training import training_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.Supervisor"])
class Supervisor(object):
"""A training helper that checkpoints models and computes summaries.
This class is deprecated. Please use
`tf.compat.v1.train.MonitoredTrainingSession` instead.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of TensorFlow
training programs.
#### Use for a single program
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir')
# Get a TensorFlow session managed by the supervisor.
with sv.managed_session(FLAGS.master) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
Within the `with sv.managed_session()` block all variables in the graph have
been initialized. In addition, a few services have been started to
checkpoint the model and add summaries to the event log.
If the program crashes and is restarted, the managed session automatically
reinitialize variables from the most recent checkpoint.
The supervisor is notified of any exception raised by one of the services.
After an exception is raised, `should_stop()` returns `True`. In that case
the training loop should also stop. This is why the training loop has to
check for `sv.should_stop()`.
Exceptions that indicate that the training inputs have been exhausted,
`tf.errors.OutOfRangeError`, also cause `sv.should_stop()` to return `True`
but are not re-raised from the `with` block: they indicate a normal
termination.
#### Use for multiple replicas
To train with replicas you deploy the same program in a `Cluster`.
One of the tasks must be identified as the *chief*: the task that handles
initialization, checkpoints, summaries, and recovery. The other tasks
depend on the *chief* for these services.
The only change you have to do to the single program code is to indicate
if the program is running as the *chief*.
```python
# Choose a task as the chief. This could be based on server_def.task_index,
# or job_def.name, or job_def.tasks. It's entirely up to the end user.
# But there can be only one *chief*.
is_chief = (server_def.task_index == 0)
server = tf.distribute.Server(server_def)
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that uses log directory on a shared file system.
# Indicate if you are the 'chief'
sv = Supervisor(logdir='/shared_directory/...', is_chief=is_chief)
# Get a Session in a TensorFlow server on the cluster.
with sv.managed_session(server.target) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
In the *chief* task, the `Supervisor` works exactly as in the first example
above. In the other tasks `sv.managed_session()` waits for the Model to have
been initialized before returning a session to the training code. The
non-chief tasks depend on the chief task for initializing the model.
If one of the tasks crashes and restarts, `managed_session()`
checks if the Model is initialized. If yes, it just creates a session and
returns it to the training code that proceeds normally. If the model needs
to be initialized, the chief task takes care of reinitializing it; the other
tasks just wait for the model to have been initialized.
NOTE: This modified program still works fine as a single program.
The single program marks itself as the chief.
#### What `master` string to use
Whether you are running on your machine or in the cluster you can use the
following values for the --master flag:
* Specifying `''` requests an in-process session that does not use RPC.
* Specifying `'local'` requests a session that uses the RPC-based
"Master interface" to run TensorFlow programs. See
`tf.train.Server.create_local_server` for
details.
* Specifying `'grpc://hostname:port'` requests a session that uses
the RPC interface to a specific host, and also allows the in-process
master to access remote tensorflow workers. Often, it is
appropriate to pass `server.target` (for some `tf.distribute.Server`
named `server).
#### Advanced use
##### Launching additional services
`managed_session()` launches the Checkpoint and Summary services (threads).
If you need more services to run you can simply launch them in the block
controlled by `managed_session()`.
Example: Start a thread to print losses. We want this thread to run
every 60 seconds, so we launch it with `sv.loop()`.
```python
...
sv = Supervisor(logdir='/tmp/mydir')
with sv.managed_session(FLAGS.master) as sess:
sv.loop(60, print_loss, (sess, ))
while not sv.should_stop():
sess.run(my_train_op)
```
##### Launching fewer services
`managed_session()` launches the "summary" and "checkpoint" threads which use
either the optionally `summary_op` and `saver` passed to the constructor, or
default ones created automatically by the supervisor. If you want to run
your own summary and checkpointing logic, disable these services by passing
`None` to the `summary_op` and `saver` parameters.
Example: Create summaries manually every 100 steps in the chief.
```python
# Create a Supervisor with no automatic summaries.
sv = Supervisor(logdir='/tmp/mydir', is_chief=is_chief, summary_op=None)
# As summary_op was None, managed_session() does not start the
# summary thread.
with sv.managed_session(FLAGS.master) as sess:
for step in xrange(1000000):
if sv.should_stop():
break
if is_chief and step % 100 == 0:
# Create the summary every 100 chief steps.
sv.summary_computed(sess, sess.run(my_summary_op))
else:
# Train normally
sess.run(my_train_op)
```
##### Custom model initialization
`managed_session()` only supports initializing the model by running an
`init_op` or restoring from the latest checkpoint. If you have special
initialization needs, see how to specify a `local_init_op` when creating the
supervisor. You can also use the `SessionManager` directly to create a
session and check if it could be initialized automatically.
"""
# Value to pass for the 'ready_op', 'init_op', 'summary_op', 'saver',
# and 'global_step' parameters of Supervisor.__init__() to indicate that
# the default behavior should be used.
USE_DEFAULT = 0
@deprecation.deprecated(None,
"Please switch to tf.train.MonitoredTrainingSession")
def __init__(self,
graph=None,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT,
is_chief=True,
init_op=USE_DEFAULT,
init_feed_dict=None,
local_init_op=USE_DEFAULT,
logdir=None,
summary_op=USE_DEFAULT,
saver=USE_DEFAULT,
global_step=USE_DEFAULT,
save_summaries_secs=120,
save_model_secs=600,
recovery_wait_secs=30,
stop_grace_secs=120,
checkpoint_basename="model.ckpt",
session_manager=None,
summary_writer=USE_DEFAULT,
init_fn=None,
local_init_run_options=None):
"""Create a `Supervisor`.
Args:
graph: A `Graph`. The graph that the model will use. Defaults to the
default `Graph`. The supervisor may add operations to the graph before
creating a session, but the graph should not be modified by the caller
after passing it to the supervisor.
ready_op: 1-D string `Tensor`. This tensor is evaluated by supervisors in
`prepare_or_wait_for_session()` to check if the model is ready to use.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from `tf.compat.v1.report_uninitialized_variables()`
If `None`, the model is not checked for readiness.
ready_for_local_init_op: 1-D string `Tensor`. This tensor is evaluated by
supervisors in `prepare_or_wait_for_session()` to check if the model is
ready to run the local_init_op. The model is considered ready if it
returns an empty array. Defaults to `None`. If `None`, the model is not
checked for readiness before running local_init_op.
is_chief: If True, create a chief supervisor in charge of initializing and
restoring the model. If False, create a supervisor that relies on a
chief supervisor for inits and restore.
init_op: `Operation`. Used by chief supervisors to initialize the model
when it can not be recovered. Defaults to an `Operation` that
initializes all global variables. If `None`, no initialization is done
automatically unless you pass a value for `init_fn`, see below.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
local_init_op: `Operation`. Used by all supervisors to run initializations
that should run for every new supervisor instance. By default these are
table initializers and initializers for local variables. If `None`, no
further per supervisor-instance initialization is done automatically.
logdir: A string. Optional path to a directory where to checkpoint the
model and log events for the visualizer. Used by chief supervisors. The
directory will be created if it does not exist.
summary_op: An `Operation` that returns a Summary for the event logs. Used
by chief supervisors if a `logdir` was specified. Defaults to the
operation returned from summary.merge_all(). If `None`, summaries are
not computed automatically.
saver: A Saver object. Used by chief supervisors if a `logdir` was
specified. Defaults to the saved returned by Saver(). If `None`, the
model is not saved automatically.
global_step: An integer Tensor of size 1 that counts steps. The value
from 'global_step' is used in summaries and checkpoint filenames.
Default to the op named 'global_step' in the graph if it exists, is of
rank 1, size 1, and of type tf.int32 or tf.int64. If `None` the global
step is not recorded in summaries and checkpoint files. Used by chief
supervisors if a `logdir` was specified.
save_summaries_secs: Number of seconds between the computation of
summaries for the event log. Defaults to 120 seconds. Pass 0 to
disable summaries.
save_model_secs: Number of seconds between the creation of model
checkpoints. Defaults to 600 seconds. Pass 0 to disable checkpoints.
recovery_wait_secs: Number of seconds between checks that the model is
ready. Used by supervisors when waiting for a chief supervisor to
initialize or restore the model. Defaults to 30 seconds.
stop_grace_secs: Grace period, in seconds, given to running threads to
stop when `stop()` is called. Defaults to 120 seconds.
checkpoint_basename: The basename for checkpoint saving.
session_manager: `SessionManager`, which manages Session creation and
recovery. If it is `None`, a default `SessionManager` will be created
with the set of arguments passed in for backwards compatibility.
summary_writer: `SummaryWriter` to use or `USE_DEFAULT`. Can be `None` to
indicate that no summaries should be written.
init_fn: Optional callable used to initialize the model. Called after the
optional `init_op` is called. The callable must accept one argument,
the session being initialized.
local_init_run_options: RunOptions to be passed as the SessionManager
local_init_run_options parameter.
Returns:
A `Supervisor`.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
`Supervisor`s are not supported when eager execution is enabled.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError("Supervisors are compatible with eager execution.")
# Set default values of arguments.
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
self._init_ready_op(
ready_op=ready_op, ready_for_local_init_op=ready_for_local_init_op)
self._init_init_op(init_op=init_op, init_feed_dict=init_feed_dict)
self._init_local_init_op(local_init_op=local_init_op)
self._init_saver(saver=saver)
self._init_summary_op(summary_op=summary_op)
self._init_global_step(global_step=global_step)
self._graph = graph
self._meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=self._saver.saver_def if self._saver else None)
self._is_chief = is_chief
self._coord = coordinator.Coordinator()
self._recovery_wait_secs = recovery_wait_secs
self._stop_grace_secs = stop_grace_secs
self._init_fn = init_fn
self._local_init_run_options = local_init_run_options
# Set all attributes related to checkpointing and writing events to None.
# Afterwards, set them appropriately for chief supervisors, as these are
# the only supervisors that can write checkpoints and events.
self._logdir = None
self._save_summaries_secs = None
self._save_model_secs = None
self._save_path = None
self._summary_writer = None
if self._is_chief:
self._logdir = logdir
self._save_summaries_secs = save_summaries_secs
self._save_model_secs = save_model_secs
if self._logdir:
self._save_path = os.path.join(self._logdir, checkpoint_basename)
if summary_writer is Supervisor.USE_DEFAULT:
if self._logdir:
self._summary_writer = _summary.FileWriter(self._logdir)
else:
self._summary_writer = summary_writer
self._graph_added_to_summary = False
self._init_session_manager(session_manager=session_manager)
self._verify_setup()
# The graph is not allowed to change anymore.
graph.finalize()
def _init_session_manager(self, session_manager=None):
if session_manager is None:
self._session_manager = session_manager_mod.SessionManager(
local_init_op=self._local_init_op,
ready_op=self._ready_op,
ready_for_local_init_op=self._ready_for_local_init_op,
graph=self._graph,
recovery_wait_secs=self._recovery_wait_secs,
local_init_run_options=self._local_init_run_options)
else:
self._session_manager = session_manager
def _get_first_op_from_collection(self, key):
"""Returns the first `Operation` from a collection.
Args:
key: A string collection key.
Returns:
The first Op found in a collection, or `None` if the collection is empty.
"""
try:
op_list = ops.get_collection(key)
if len(op_list) > 1:
logging.info("Found %d %s operations. Returning the first one.",
len(op_list), key)
if op_list:
return op_list[0]
except LookupError:
pass
return None
def _init_ready_op(self,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT):
"""Initializes ready_op.
Args:
ready_op: `Tensor` to check if the model is initialized. If it's set to
USE_DEFAULT, creates an op that checks all the variables are
initialized.
ready_for_local_init_op: `Tensor` to check if the model is ready to run
local_init_op. If it's set to USE_DEFAULT, creates an op that checks all
the global variables are initialized.
"""
if ready_op is Supervisor.USE_DEFAULT:
ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
self._ready_op = ready_op
# ready_for_local_init_op defaults to None for backward compatibility
if ready_for_local_init_op is Supervisor.USE_DEFAULT:
ready_for_local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)
self._ready_for_local_init_op = ready_for_local_init_op
def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):
"""Initializes init_op.
Args:
init_op: `Operation` to initialize the variables. If set to USE_DEFAULT,
create an op that initializes all variables and tables.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
"""
if init_op is Supervisor.USE_DEFAULT:
init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)
if init_op is None:
init_op = variables.global_variables_initializer()
ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)
self._init_op = init_op
self._init_feed_dict = init_feed_dict
def _init_local_init_op(self, local_init_op=USE_DEFAULT):
"""Initializes local_init_op.
Args:
local_init_op: `Operation` run for every new supervisor instance. If set
to USE_DEFAULT, use the first op from the GraphKeys.LOCAL_INIT_OP
collection. If the collection is empty, create an op that initializes
all local variables and all tables.
"""
if local_init_op is Supervisor.USE_DEFAULT:
local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [
variables.local_variables_initializer(),
lookup_ops.tables_initializer()
]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
self._local_init_op = local_init_op
def _init_saver(self, saver=USE_DEFAULT):
"""Initializes saver.
Args:
saver: A `Saver` object. If set to USE_DEFAULT, create one that saves all
the variables.
"""
if saver is Supervisor.USE_DEFAULT:
saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.global_variables():
saver = saver_mod.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
self._saver = saver
def _init_summary_op(self, summary_op=USE_DEFAULT):
"""Initializes summary_op.
Args:
summary_op: An Operation that returns a Summary for the event logs. If set
to USE_DEFAULT, create an op that merges all the summaries.
"""
if summary_op is Supervisor.USE_DEFAULT:
summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is None:
summary_op = _summary.merge_all()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
self._summary_op = summary_op
def _init_global_step(self, global_step=USE_DEFAULT):
"""Initializes global_step.
Args:
global_step: An integer Tensor of size 1 that counts steps. If set to
USE_DEFAULT, creates global_step tensor.
"""
if global_step is Supervisor.USE_DEFAULT:
global_step = self._get_first_op_from_collection(
ops.GraphKeys.GLOBAL_STEP)
if global_step is None:
global_step = self._default_global_step_tensor()
if global_step is not None:
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)
self._global_step = global_step
@property
def is_chief(self):
"""Return True if this is a chief supervisor.
Returns:
A bool.
"""
return self._is_chief
@property
def session_manager(self):
"""Return the SessionManager used by the Supervisor.
Returns:
A SessionManager object.
"""
return self._session_manager
@property
def coord(self):
"""Return the Coordinator used by the Supervisor.
The Coordinator can be useful if you want to run multiple threads
during your training.
Returns:
A Coordinator object.
"""
return self._coord
@property
def init_op(self):
"""Return the Init Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._init_op
@property
def init_feed_dict(self):
"""Return the feed dictionary used when evaluating the `init_op`.
Returns:
A feed dictionary or `None`.
"""
return self._init_feed_dict
@property
def ready_op(self):
"""Return the Ready Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._ready_op
@property
def ready_for_local_init_op(self):
return self._ready_for_local_init_op
@property
def summary_writer(self):
"""Return the SummaryWriter used by the chief supervisor.
Returns:
A SummaryWriter.
"""
return self._summary_writer
@property
def summary_op(self):
"""Return the Summary Tensor used by the chief supervisor.
Returns:
A string Tensor for the summary or `None`.
"""
return self._summary_op
@property
def save_summaries_secs(self):
"""Return the delay between summary computations.
Returns:
A timestamp.
"""
return self._save_summaries_secs
@property
def global_step(self):
"""Return the global_step Tensor used by the supervisor.
Returns:
An integer Tensor for the global_step.
"""
return self._global_step
@property
def saver(self):
"""Return the Saver used by the supervisor.
Returns:
A Saver object.
"""
return self._saver
@property
def save_model_secs(self):
"""Return the delay between checkpoints.
Returns:
A timestamp.
"""
return self._save_model_secs
@property
def save_path(self):
"""Return the save path used by the supervisor.
Returns:
A string.
"""
return self._save_path
def _write_graph(self):
"""Writes graph_def to `logdir` and adds it to summary if applicable."""
assert self._is_chief
if self._logdir:
training_util.write_graph(
self._graph.as_graph_def(add_shapes=True), self._logdir,
"graph.pbtxt")
if self._summary_writer and not self._graph_added_to_summary:
self._summary_writer.add_graph(self._graph)
self._summary_writer.add_meta_graph(self._meta_graph_def)
self._graph_added_to_summary = True
def start_standard_services(self, sess):
"""Start the standard services for 'sess'.
This starts services in the background. The services started depend
on the parameters to the constructor and may include:
- A Summary thread computing summaries every save_summaries_secs.
- A Checkpoint thread saving the model every save_model_secs.
- A StepCounter thread measure step time.
Args:
sess: A Session.
Returns:
A list of threads that are running the standard services. You can use
the Supervisor's Coordinator to join these threads with:
sv.coord.Join(<list of threads>)
Raises:
RuntimeError: If called with a non-chief Supervisor.
ValueError: If not `logdir` was passed to the constructor as the
services need a log directory.
"""
if not self._is_chief:
raise RuntimeError("Only chief supervisor can start standard services. "
"Because only chief supervisors can write events.")
if not self._logdir:
logging.warning("Standard services need a 'logdir' "
"passed to the SessionManager")
return
if self._global_step is not None and self._summary_writer:
# Only add the session log if we keep track of global step.
# TensorBoard cannot use START message for purging expired events
# if there is no step value.
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), current_step)
threads = []
if self._save_summaries_secs and self._summary_writer:
if self._summary_op is not None:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
return threads
def prepare_or_wait_for_session(self,
master="",
config=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
start_standard_services=True):
"""Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow master to use. See the
`tf.compat.v1.Session` constructor for how this is interpreted.
config: Optional ConfigProto proto used to configure the session, which is
passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
max_wait_secs: Maximum time to wait for the session to become available.
start_standard_services: Whether to start the standard services and the
queue runners.
Returns:
A Session object that can be used to drive the model.
"""
# For users who recreate the session with prepare_or_wait_for_session(), we
# need to clear the coordinator's stop_event so that threads managed by the
# coordinator can run.
self._coord.clear_stop()
if self._summary_writer:
self._summary_writer.reopen()
if self._is_chief:
sess = self._session_manager.prepare_session(
master,
init_op=self.init_op,
saver=self.saver,
checkpoint_dir=self._logdir,
wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs,
config=config,
init_feed_dict=self._init_feed_dict,
init_fn=self._init_fn)
self._write_graph()
if start_standard_services:
logging.info("Starting standard services.")
self.start_standard_services(sess)
else:
sess = self._session_manager.wait_for_session(
master, config=config, max_wait_secs=max_wait_secs)
if start_standard_services:
logging.info("Starting queue runners.")
self.start_queue_runners(sess)
return sess
def start_queue_runners(self, sess, queue_runners=None):
"""Start threads for `QueueRunners`.
Note that the queue runners collected in the graph key `QUEUE_RUNNERS`
are already started automatically when you create a session with the
supervisor, so unless you have non-collected queue runners to start
you do not need to call this explicitly.
Args:
sess: A `Session`.
queue_runners: A list of `QueueRunners`. If not specified, we'll use the
list of queue runners gathered in the graph under the key
`GraphKeys.QUEUE_RUNNERS`.
Returns:
The list of threads started for the `QueueRunners`.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Queues are not compatible with eager execution. To ingest data when eager
execution is enabled, use the `tf.data` API.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError("Queues are not compatible with eager execution.")
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(
qr.create_threads(sess, coord=self._coord, daemon=True, start=True))
return threads
def loop(self, timer_interval_secs, target, args=None, kwargs=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(*args, **kwargs)`
repeatedly. Otherwise it calls it every `timer_interval_secs`
seconds. The thread terminates when a stop is requested.
The started thread is added to the list of threads managed by the supervisor
so it does not need to be passed to the `stop()` method.
Args:
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = coordinator.LooperThread(
self._coord,
timer_interval_secs,
target=target,
args=args,
kwargs=kwargs)
looper.start()
return looper
def stop(self,
threads=None,
close_summary_writer=True,
ignore_live_threads=False):
"""Stop the services and the coordinator.
This does not close the session.
Args:
threads: Optional list of threads to join with the coordinator. If
`None`, defaults to the threads running the standard services, the
threads started for `QueueRunners`, and the threads started by the
`loop()` method. To wait on additional threads, pass the list in this
parameter.
close_summary_writer: Whether to close the `summary_writer`. Defaults to
`True` if the summary writer was created by the supervisor, `False`
otherwise.
ignore_live_threads: If `True` ignores threads that remain running after a
grace period when joining threads via the coordinator, instead of
raising a RuntimeError.
"""
self._coord.request_stop()
try:
# coord.join() re-raises the first reported exception; the "finally"
# block ensures that we clean up whether or not an exception was
# reported.
self._coord.join(
threads,
stop_grace_period_secs=self._stop_grace_secs,
ignore_live_threads=ignore_live_threads)
finally:
# Close the writer last, in case one of the running threads was using it.
if close_summary_writer and self._summary_writer:
# Stop messages are not logged with event.step,
# since the session may have already terminated.
self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP))
self._summary_writer.close()
self._graph_added_to_summary = False
def request_stop(self, ex=None):
"""Request that the coordinator stop the threads.
See `Coordinator.request_stop()`.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
self._coord.request_stop(ex=ex)
def should_stop(self):
"""Check if the coordinator was told to stop.
See `Coordinator.should_stop()`.
Returns:
True if the coordinator was told to stop, False otherwise.
"""
return self._coord.should_stop()
def stop_on_exception(self):
"""Context handler to stop the supervisor when an exception is raised.
See `Coordinator.stop_on_exception()`.
Returns:
A context handler.
"""
return self._coord.stop_on_exception()
def wait_for_stop(self):
"""Block waiting for the coordinator to stop."""
self._coord.wait_for_stop()
def summary_computed(self, sess, summary, global_step=None):
"""Indicate that a summary was computed.
Args:
sess: A `Session` object.
summary: A Summary proto, or a string holding a serialized summary proto.
global_step: Int. global step this summary is associated with. If `None`,
it will try to fetch the current step.
Raises:
TypeError: if 'summary' is not a Summary proto or a string.
RuntimeError: if the Supervisor was created without a `logdir`.
"""
if not self._summary_writer:
raise RuntimeError("Writing a summary requires a summary writer.")
if global_step is None and self.global_step is not None:
global_step = training_util.global_step(sess, self.global_step)
self._summary_writer.add_summary(summary, global_step)
def _default_global_step_tensor(self):
"""Returns the global_step from the default graph.
Returns:
The global step `Tensor` or `None`.
"""
try:
gs = ops.get_default_graph().get_tensor_by_name("global_step:0")
if gs.dtype.base_dtype in [dtypes.int32, dtypes.int64]:
return gs
else:
logging.warning("Found 'global_step' is not an int type: %s", gs.dtype)
return None
except KeyError:
return None
def _verify_setup(self):
"""Check that all is good.
Raises:
ValueError: If something is not good.
"""
# Not running as chief means that replicas are used.
# In that case all Variables must have their device set.
if not self._is_chief:
for op in self._graph.get_operations():
if op.type in ["Variable", "VariableV2"] and not op.device:
raise ValueError("When using replicas, all Variables must have "
"their device set: %s" % op)
# pylint: disable=g-doc-return-or-yield,broad-except
@contextlib.contextmanager
def managed_session(self,
master="",
config=None,
start_standard_services=True,
close_summary_writer=True):
"""Returns a context manager for a managed session.
This context manager creates and automatically recovers a session. It
optionally starts the standard services that handle checkpoints and
summaries. It monitors exceptions raised from the `with` block or from the
services and stops the supervisor as needed.
The context manager is typically used as follows:
```python
def train():
sv = tf.compat.v1.train.Supervisor(...)
with sv.managed_session(<master>) as sess:
for step in xrange(..):
if sv.should_stop():
break
sess.run(<my training op>)
...do other things needed at each training step...
```
An exception raised from the `with` block or one of the service threads is
raised again when the block exits. This is done after stopping all threads
and closing the session. For example, an `AbortedError` exception, raised
in case of preemption of one of the workers in a distributed model, is
raised again when the block exits.
If you want to retry the training loop in case of preemption you can do it
as follows:
```python
def main(...):
while True
try:
train()
except tf.errors.Aborted:
pass
```
As a special case, exceptions used for control flow, such as
`OutOfRangeError` which reports that input queues are exhausted, are not
raised again from the `with` block: they indicate a clean termination of
the training loop and are considered normal termination.
Args:
master: name of the TensorFlow master to use. See the
`tf.compat.v1.Session` constructor for how this is interpreted.
config: Optional `ConfigProto` proto used to configure the session. Passed
as-is to create the session.
start_standard_services: Whether to start the standard services, such as
checkpoint, summary and step counter.
close_summary_writer: Whether to close the summary writer when closing the
session. Defaults to True.
Returns:
A context manager that yields a `Session` restored from the latest
checkpoint or initialized from scratch if not checkpoint exists. The
session is closed when the `with` block exits.
"""
try:
sess = self.prepare_or_wait_for_session(
master=master,
config=config,
start_standard_services=start_standard_services)
yield sess
except Exception as e:
self.request_stop(e)
finally:
try:
# Request all the threads to stop and wait for them to do so. Any
# exception raised by the threads is raised again from stop().
# Passing stop_grace_period_secs is for blocked enqueue/dequeue
# threads which are not checking for `should_stop()`. They
# will be stopped when we close the session further down.
self.stop(close_summary_writer=close_summary_writer)
finally:
# Close the session to finish up all pending calls. We do not care
# about exceptions raised when closing. This takes care of
# blocked enqueue/dequeue calls.
try:
sess.close()
except Exception:
# Silently ignore exceptions raised by close().
pass
# pylint: enable=g-doc-return-or-yield,broad-except
class SVSummaryThread(coordinator.LooperThread):
"""A thread to save summaries on a timer."""
def __init__(self, sv, sess):
"""Create a SVSummaryThread.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
if self._sv.global_step is not None:
summary_strs, global_step = self._sess.run(
[self._sv.summary_op, self._sv.global_step])
else:
summary_strs = self._sess.run(self._sv.summary_op)
global_step = None
if self._sv.summary_writer:
logging.info("Recording summary at step %s.", global_step)
self._sv.summary_writer.add_summary(summary_strs, global_step)
class SVStepCounterThread(coordinator.LooperThread):
"""Threads to count steps and measure their duration."""
def __init__(self, sv, sess, step_counter=None):
"""Create a `SVStepCounterThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
step_counter: A `Tensor` holding the step counter. By defaults, it uses
sv.global_step.
"""
super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
self._last_time = 0.0
self._last_step = 0
step_counter = sv.global_step if step_counter is None else step_counter
self._step_counter = step_counter
self._summary_tag = "%s/sec" % self._step_counter.op.name
def start_loop(self):
self._last_time = time.time()
self._last_step = training_util.global_step(self._sess, self._step_counter)
def run_loop(self):
# Count the steps.
current_step = training_util.global_step(self._sess, self._step_counter)
added_steps = current_step - self._last_step
self._last_step = current_step
# Measure the elapsed time.
current_time = time.time()
elapsed_time = current_time - self._last_time
self._last_time = current_time
# Reports the number of steps done per second
if elapsed_time > 0.:
steps_per_sec = added_steps / elapsed_time
else:
steps_per_sec = float("inf")
summary = Summary(value=[
Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec)
])
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary, current_step)
logging.log_first_n(logging.INFO, "%s: %g", 10, self._summary_tag,
steps_per_sec)
class SVTimerCheckpointThread(coordinator.LooperThread):
"""A thread to checkpoint on a timer."""
def __init__(self, sv, sess):
"""Create a `SVTimerCheckpointThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
logging.info("Saving checkpoint to path %s", self._sv.save_path)
self._sv.saver.save(
self._sess, self._sv.save_path, global_step=self._sv.global_step)
if self._sv.summary_writer and self._sv.global_step is not None:
current_step = training_util.global_step(self._sess, self._sv.global_step)
self._sv.summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._sv.save_path),
current_step)
# TODO(sherrym): All non-PEP8 compliant names will be deprecated shortly.
setattr(Supervisor, "PrepareSession", Supervisor.prepare_or_wait_for_session)
setattr(Supervisor, "StartQueueRunners", Supervisor.start_queue_runners)
setattr(Supervisor, "StartStandardServices", Supervisor.start_standard_services)
setattr(Supervisor, "Stop", Supervisor.stop)
setattr(Supervisor, "RequestStop", Supervisor.request_stop)
setattr(Supervisor, "Loop", Supervisor.loop)
setattr(Supervisor, "ShouldStop", Supervisor.should_stop)
setattr(Supervisor, "StopOnException", Supervisor.stop_on_exception)
setattr(Supervisor, "WaitForStop", Supervisor.wait_for_stop)
setattr(Supervisor, "SummaryComputed", Supervisor.summary_computed)
|
tensorflow-master
|
tensorflow/python/training/supervisor.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import os
import random
import time
import numpy as np
import six
from google.protobuf.any_pb2 import Any
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import queue_runner_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import adam
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import saver as saver_module
from tensorflow.python.training import saver_test_utils
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import base as trackable_base
from tensorflow.python.training.tracking import tracking as trackable_tracking
from tensorflow.python.training.tracking import util as trackable_utils
from tensorflow.python.util import compat
class SaverTest(test.TestCase):
def basicSaveRestore(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "basic_save_restore")
with self.session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
# Initialize all variables
if not context.executing_eagerly():
self.evaluate([variables.global_variables_initializer(), v2_init])
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Save the initialized values in the file at "save_path"
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"v2": v2.saveable
}, restore_sequentially=True)
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Assert that the variables are not initialized.
if not context.executing_eagerly():
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(self.evaluate(v2.keys())))
self.assertEqual(0, len(self.evaluate(v2.values())))
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.session(graph=ops_lib.Graph()) as sess:
v0_2 = variable_op(1000.0, name="v0")
v1_2 = variable_op(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2_2.insert("k1000", 3000.0)
# Check that the parameter nodes have been initialized.
if not context.executing_eagerly():
init_all_op = [variables.global_variables_initializer(), v2_init]
self.evaluate(init_all_op)
# TODO(xpan): Why _mutable_hash_table_v2 doesn't create empty
# table as it claims in eager mode?
self.assertEqual(b"k1000", self.evaluate(v2_2.keys()))
self.assertEqual(3000.0, self.evaluate(v2_2.values()))
self.assertEqual(1000.0, self.evaluate(v0_2))
self.assertEqual(2000.0, self.evaluate(v1_2))
# Restore the values saved earlier in the parameter nodes.
save2 = saver_module.Saver({"v0": v0_2, "v1": v1_2, "v2": v2_2.saveable})
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0_2))
self.assertEqual(20.0, self.evaluate(v1_2))
self.assertEqual(b"k1", self.evaluate(v2_2.keys()))
self.assertEqual(30.0, self.evaluate(v2_2.values()))
def testBasic(self):
self.basicSaveRestore(variables.Variable)
@test_util.run_in_graph_and_eager_modes
def testResourceBasic(self):
self.basicSaveRestore(resource_variable_ops.ResourceVariable)
@test_util.run_deprecated_v1
def testResourceColocation(self):
partitioner = partitioned_variables.fixed_size_partitioner(num_shards=2)
with ops_lib.device("/job:ps/device:GPU:0"):
v = variable_scope.get_variable("v0",
shape=[10, 2],
partitioner=partitioner,
use_resource=True)
saver_module.Saver({"v0": v}).build()
save_op = None
for op in ops_lib.get_default_graph().get_operations():
if op.type == "SaveV2":
save_op = op
break
assert save_op is not None
for save_inp in save_op.inputs[3:]:
# Input to SaveV2 op is placed on CPU of the same device as the Variable.
self.assertEqual("/job:ps/device:CPU:0", save_inp.device)
def testResourceVariableReadOpsAddedDeterministically(self):
graph_defs = []
num_graphs = 10
for _ in range(num_graphs):
with ops_lib.Graph().as_default() as g:
for i in range(20):
resource_variable_ops.ResourceVariable(i, name="var%s" % i)
saver_module.Saver()
graph_defs.append(g.as_graph_def())
for i in range(num_graphs - 1):
self.assertEqual(graph_defs[i], graph_defs[i + 1])
def testEagerBasic(self):
with context.eager_mode():
ckpt_prefix = os.path.join(self.get_temp_dir(), "ckpt")
v1 = resource_variable_ops.ResourceVariable(3.14, name="v1")
v2 = resource_variable_ops.ResourceVariable([1, 2], name="v2")
save = saver_module.Saver([v1, v2])
save.save(None, ckpt_prefix)
v1.assign(0.0)
v2.assign([0, 0])
self.assertNear(0.0, self.evaluate(v1), 1e-5)
self.assertAllEqual([0, 0], self.evaluate(v2))
save.restore(None, ckpt_prefix)
self.assertNear(3.14, self.evaluate(v1), 1e-5)
self.assertAllEqual([1, 2], self.evaluate(v2))
def testEagerGraphCompatibility(self):
# Save from graph mode and restore from eager mode.
graph_ckpt_prefix = os.path.join(self.get_temp_dir(), "graph_ckpt")
with context.graph_mode():
with self.session(graph=ops_lib.Graph()) as sess:
# Create a graph model and save the checkpoint.
w1 = resource_variable_ops.ResourceVariable(1.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(2.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
self.evaluate(variables.global_variables_initializer())
graph_saver.save(sess, graph_ckpt_prefix)
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w1 = resource_variable_ops.ResourceVariable(0.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(0.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
graph_saver.restore(None, graph_ckpt_prefix)
self.assertAllEqual(self.evaluate(w1), 1.0)
self.assertAllEqual(self.evaluate(w2), 2.0)
# Save from eager mode and restore from graph mode.
eager_ckpt_prefix = os.path.join(self.get_temp_dir(), "eager_ckpt")
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w3 = resource_variable_ops.ResourceVariable(3.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(4.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
graph_saver.save(None, eager_ckpt_prefix)
with context.graph_mode():
with self.session(graph=ops_lib.Graph()) as sess:
w3 = resource_variable_ops.ResourceVariable(0.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(0.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
self.evaluate(variables.global_variables_initializer())
graph_saver.restore(sess, eager_ckpt_prefix)
self.assertAllEqual(w3.eval(), 3.0)
self.assertAllEqual(w4.eval(), 4.0)
@test_util.run_in_graph_and_eager_modes
def testResourceSaveRestoreCachingDevice(self):
save_path = os.path.join(self.get_temp_dir(), "resource_cache")
with self.session(graph=ops_lib.Graph()) as sess:
v = resource_variable_ops.ResourceVariable([1], caching_device="/cpu:0",
name="v")
if context.executing_eagerly():
sess = None
else:
self.evaluate(variables.global_variables_initializer())
save = saver_module.Saver([v])
save.save(sess, save_path)
save2 = saver_module.Saver([v])
save2.restore(sess, save_path)
self.assertEquals(self.evaluate(v), [1])
def testNoAdditionalOpsAddedBySaverForResourceVariablesOutsideSaveScope(self):
with ops_lib.Graph().as_default() as g:
v = resource_variable_ops.ResourceVariable(1.0, name="v")
with ops_lib.name_scope("saver1"):
saver_module.Saver()
with ops_lib.name_scope("saver2"):
saver_module.Saver({"name": v})
ops_in_saver1_scope_but_not_save_scope = [
op for op in g.get_operations()
if (op.name.startswith("saver1/") and
not op.name.startswith("saver1/save/"))]
self.assertEqual(ops_in_saver1_scope_but_not_save_scope, [])
ops_in_saver2_scope_but_not_save_scope = [
op for op in g.get_operations()
if (op.name.startswith("saver2/") and
not op.name.startswith("saver2/save/"))]
self.assertEqual(ops_in_saver2_scope_but_not_save_scope, [])
@test_util.run_deprecated_v1
def testSaveCopyRestoreWithSaveRelativePaths(self):
"""Save, copy checkpoint dir and restore from copied dir.
This only works for save_relative_paths=True.
"""
save_dir1 = os.path.join(self.get_temp_dir(), "save_dir1")
os.mkdir(save_dir1)
save_path1 = os.path.join(save_dir1, "save_copy_restore")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.VariableV1(10.0, name="v0")
v1 = variables.VariableV1(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver(
var_list={
"v0": v0,
"v1": v1,
"v2": v2.saveable},
restore_sequentially=True,
save_relative_paths=True)
init_all_op = [variables.global_variables_initializer(), v2_init]
with self.cached_session() as sess:
# Initialize all variables
self.evaluate(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path1)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path1, val)
self.assertEqual(
checkpoint_management.latest_checkpoint(save_dir1), save_path1)
save_dir2 = os.path.join(self.get_temp_dir(), "save_dir2")
os.renames(save_dir1, save_dir2)
save_path2 = os.path.join(save_dir2, "save_copy_restore")
self.assertEqual(
checkpoint_management.latest_checkpoint(save_dir2), save_path2)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.cached_session() as sess:
v0 = variables.VariableV1(-1.0, name="v0")
v1 = variables.VariableV1(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
# Assert that the variables are not initialized.
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(self.evaluate(v2.keys())))
self.assertEqual(0, len(self.evaluate(v2.values())))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path2)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
@test_util.run_deprecated_v1
def testFilenameTensor(self):
v0 = variables.VariableV1(0, name="v0")
filename = b"somerandomfilename"
save = saver_module.Saver({"v0": v0}, filename=filename)
with self.cached_session() as sess:
tensor = sess.graph.get_tensor_by_name(
save.saver_def.filename_tensor_name)
self.assertEqual(self.evaluate(tensor), filename)
def testInvalidPath(self):
v0 = variables.VariableV1(0, name="v0")
for ver in (saver_pb2.SaverDef.V1, saver_pb2.SaverDef.V2):
with self.cached_session() as sess:
save = saver_module.Saver({"v0": v0}, write_version=ver)
with self.assertRaisesRegexp(
ValueError, "The passed save_path is not a valid checkpoint:"):
save.restore(sess, "invalid path")
@test_util.run_v1_only("b/120545219")
def testInt64(self):
save_path = os.path.join(self.get_temp_dir(), "int64")
with self.cached_session() as sess:
# Build a graph with 1 node, and save and restore for them.
v = variables.VariableV1(np.int64(15), name="v")
save = saver_module.Saver({"v": v}, restore_sequentially=True)
self.evaluate(variables.global_variables_initializer())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.cached_session() as sess:
v = variables.VariableV1(np.int64(-1), name="v")
save = saver_module.Saver({"v": v})
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v" in e.message):
self.evaluate(v)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(np.int64(15), self.evaluate(v))
def testSomeErrors(self):
with ops_lib.Graph().as_default():
v0 = variables.VariableV1([10.0], name="v0")
v1 = variables.VariableV1([20.0], name="v1")
v2 = variables.VariableV1([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# By default the name used for "v2" will be "v1" and raise an error.
with self.assertRaisesRegexp(ValueError, "same name: v1"):
saver_module.Saver([v0, v1, v2])
# The names are different and will work.
saver_module.Saver({"vee1": v1, "other": [v2]})
# Partitioned variables also cause name conflicts.
p_v1 = variable_scope.get_variable(
"p_v1",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2 = variable_scope.get_variable(
"p_v2",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2._name = "p_v1"
with self.assertRaisesRegexp(ValueError, "same name: p_v1"):
saver_module.Saver([p_v1, p_v2])
def testSameName(self):
with ops_lib.Graph().as_default():
v0 = variables.VariableV1([10.0], name="v0")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saving one variable under two names raises an error.
with self.assertRaisesRegexp(
ValueError, "The same saveable will be restored with two names: v0"):
saver_module.Saver({"v0": v0, "v0too": v0})
# Ditto for custom saveables.
with self.assertRaisesRegexp(
ValueError, "The same saveable will be restored with two names: v2"):
saver_module.Saver({"v2": v2.saveable, "v2too": v2.saveable})
# Verify non-duplicate names work.
saver_module.Saver({"v0": v0, "v2": v2.saveable})
@test_util.run_v1_only("b/120545219")
def testBasicsWithListOfVariables(self):
save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
with self.session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.VariableV1(10.0, name="v0")
v1 = variables.VariableV1(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver([v0, v1, v2.saveable])
self.evaluate(variables.global_variables_initializer())
v2_init.run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the variables
# have not been initialized either.
with self.session(graph=ops_lib.Graph()) as sess:
v0 = variables.VariableV1(-1.0, name="v0")
v1 = variables.VariableV1(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver([v0, v1, v2.saveable])
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v0" in e.message):
self.evaluate(v0)
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
self.evaluate(v1)
self.assertEqual(0, len(self.evaluate(v2.keys())))
self.assertEqual(0, len(self.evaluate(v2.values())))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.session(graph=ops_lib.Graph()) as sess:
v0_2 = variables.VariableV1(1000.0, name="v0")
v1_2 = variables.VariableV1(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
save2 = saver_module.Saver([v0_2, v1_2, v2_2.saveable])
v2_2.insert("k1000", 3000.0).run()
self.evaluate(variables.global_variables_initializer())
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, self.evaluate(v0_2))
self.assertEqual(2000.0, self.evaluate(v1_2))
self.assertEqual(b"k1000", self.evaluate(v2_2.keys()))
self.assertEqual(3000.0, self.evaluate(v2_2.values()))
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0_2))
self.assertEqual(20.0, self.evaluate(v1_2))
self.assertEqual(b"k1", self.evaluate(v2_2.keys()))
self.assertEqual(30.0, self.evaluate(v2_2.values()))
def _SaveAndLoad(self, var_name, var_value, other_value, save_path):
with self.session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(var_value, name=var_name)
save = saver_module.Saver({var_name: var})
if not context.executing_eagerly():
self.evaluate(var.initializer)
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
with self.session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(other_value, name=var_name)
save = saver_module.Saver({var_name: var})
save.restore(sess, save_path)
self.assertAllClose(var_value, self.evaluate(var))
def testCacheRereadsFile(self):
save_path = os.path.join(self.get_temp_dir(), "cache_rereads")
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
# Save and reload one Variable named "var1" in the same file.
# The cached readers should know to re-read the file.
self._SaveAndLoad("var1", 1.1, 2.2, save_path)
@test_util.run_deprecated_v1
def testAllowEmpty(self):
save_path = os.path.join(self.get_temp_dir(), "allow_empty")
with self.cached_session() as sess:
_ = constant_op.constant(1)
save = saver_module.Saver(allow_empty=True)
val = save.save(sess, save_path)
self.assertIsNone(val)
with self.cached_session() as sess:
save = saver_module.Saver(allow_empty=True)
save.restore(sess, save_path)
def testGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.VariableV1(123.45)
save = saver_module.Saver({"v0": v0_1})
self.evaluate(variables.global_variables_initializer())
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.VariableV1(543.21)
save = saver_module.Saver({"v0": v0_2})
self.evaluate(variables.global_variables_initializer())
def testSharedServerOnGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.VariableV1(123.45)
save = saver_module.Saver({"v0": v0_1}, sharded=True, allow_empty=True)
self.evaluate(variables.global_variables_initializer())
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.VariableV1(543.21)
save = saver_module.Saver({"v0": v0_2}, sharded=True, allow_empty=True)
self.evaluate(variables.global_variables_initializer())
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.VariableV1(1.0)
twos = variables.VariableV1([2.0, 2.0, 2.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
v2.insert("k1", 3.0).run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.VariableV1(0.0)
twos = variables.VariableV1([0.0, 0.0, 0.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, self.evaluate(one))
self.assertAllClose([2.0, 2.0, 2.0], self.evaluate(twos))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(3.0, self.evaluate(v2.values()))
def testVarListShouldBeEmptyInDeferredBuild(self):
with ops_lib.Graph().as_default():
v = variables.VariableV1(1.0)
with self.assertRaisesRegexp(ValueError, "defer_build"):
saver_module.Saver([v], defer_build=True)
def testBuildShouldBeCalledBeforeSaveInCaseOfDeferBuild(self):
save_path = os.path.join(self.get_temp_dir(), "error_deferred_build")
with ops_lib.Graph().as_default(), session.Session() as sess:
variables.VariableV1(1.0)
saver = saver_module.Saver(defer_build=True)
with self.assertRaisesRegexp(RuntimeError, "build"):
saver.save(sess, save_path)
def testDeferredBuild(self):
save_path = os.path.join(self.get_temp_dir(), "deferred_build")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.VariableV1(1.0)
save = saver_module.Saver(defer_build=True)
# if build is not deferred, saver cannot save the `twos`.
twos = variables.VariableV1([2.0, 2.0, 2.0])
init = variables.global_variables_initializer()
save.build()
init.run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.VariableV1(0.0)
twos = variables.VariableV1([0.0, 0.0, 0.0])
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, self.evaluate(one))
self.assertAllClose([2.0, 2.0, 2.0], self.evaluate(twos))
@test_util.run_v1_only("b/120545219")
def testReshape(self):
save_path = os.path.join(self.get_temp_dir(), "variables_reshape")
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.VariableV1([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
save.save(sess, save_path)
# Error when restoring with default reshape=False
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.VariableV1([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Assign requires shapes of both tensors to match."):
save.restore(sess, save_path)
# Restored to new shape with reshape=True
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.VariableV1([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver(reshape=True)
save.restore(sess, save_path)
self.assertAllClose([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
self.evaluate(var))
@test_util.run_in_graph_and_eager_modes
def testSaveWithGlobalStep(self, pad_step_number=False):
save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step")
global_step_int = 5
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
with self.session(graph=ops_lib.Graph()):
var = resource_variable_ops.ResourceVariable(1.0, name="var0")
save = saver_module.Saver(
{
var._shared_name: var
}, pad_step_number=pad_step_number)
if context.executing_eagerly():
sess = None
else:
self.evaluate(var.initializer)
sess = ops_lib.get_default_session()
if use_tensor:
global_step = constant_op.constant(global_step_int)
val = save.save(sess, save_path, global_step=global_step)
else:
val = save.save(sess, save_path, global_step=global_step_int)
if pad_step_number:
expected_save_path = "%s-%s" % (save_path,
"{:08d}".format(global_step_int))
else:
expected_save_path = "%s-%d" % (save_path, global_step_int)
self.assertEqual(expected_save_path, val)
def testSaveWithGlobalStepWithPadding(self):
self.testSaveWithGlobalStep(pad_step_number=True)
def testSaveToNonexistingPath(self):
file_io.write_string_to_file(
os.path.join(self.get_temp_dir(), "actually_a_file"), "")
paths = [
os.path.join(self.get_temp_dir(), "nonexisting_dir/path"),
os.path.join(self.get_temp_dir(), "other_nonexisting_dir/path1/path2"),
os.path.join(self.get_temp_dir(), "actually_a_file/path"),
]
for save_path in paths:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.VariableV1(10.0, name="v0")
v1 = variables.VariableV1(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
# In the case where the parent directory doesn't exist, whether or not the
# save succeeds or fails is implementation dependent. Therefore we allow
# both cases.
try:
with self.cached_session() as sess:
# Initialize all variables
self.evaluate(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Save the graph.
save.save(sess, save_path)
with self.cached_session() as sess:
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
except ValueError as exc:
error_msg_template = "Parent directory of {} doesn't exist, can't save."
self.assertEqual(error_msg_template.format(save_path), str(exc))
def testSaveToURI(self):
# ParseURI functions don't work on Windows yet.
# TODO(jhseu): Remove this check when it works.
if os.name == "nt":
self.skipTest("Local URI support doesn't work on Windows")
save_path = "file://" + os.path.join(self.get_temp_dir(), "uri")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.VariableV1(10.0, name="v0")
v1 = variables.VariableV1(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
with self.cached_session() as sess:
# Initialize all variables
self.evaluate(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
save.save(sess, save_path)
def testSaveRestoreAndValidateVariableDtype(self):
for variable_op in [
variables.Variable, resource_variable_ops.ResourceVariable
]:
save_path = os.path.join(self.get_temp_dir(), "basic_save_restore")
# Build the first session.
with self.session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(10.0, name="v0", dtype=dtypes.float32)
if not context.executing_eagerly():
self.evaluate([variables.global_variables_initializer()])
save = saver_module.Saver({"v0": v0})
save.save(sess, save_path)
# Start a second session.
with self.session(graph=ops_lib.Graph()) as sess:
v0_wrong_dtype = variable_op(1, name="v0", dtype=dtypes.int32)
# Restore the saved value with different dtype
# in the parameter nodes.
save = saver_module.Saver({"v0": v0_wrong_dtype})
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"original dtype"):
save.restore(sess, save_path)
# Test restoring large tensors (triggers a thread pool)
def testRestoreLargeTensors(self):
save_dir = self.get_temp_dir()
def _model():
small_v = [variable_scope.get_variable(
"small%d" % i, shape=[10, 2], use_resource=True) for i in range(5)]
large_v = [variable_scope.get_variable(
"large%d" % i, shape=[32000, 1000], use_resource=True)
for i in range(3)]
return small_v + large_v
save_graph = ops_lib.Graph()
with save_graph.as_default(), self.session(graph=save_graph) as sess:
orig_vars = _model()
self.evaluate(variables.global_variables_initializer())
save = saver_module.Saver(max_to_keep=1)
self.evaluate(variables.global_variables_initializer())
save.save(sess, save_dir)
orig_vals = self.evaluate(orig_vars)
restore_graph = ops_lib.Graph()
with restore_graph.as_default(), self.session(
graph=restore_graph) as sess:
restored_vars = _model()
save = saver_module.Saver(max_to_keep=1)
save.restore(sess, save_dir)
restored_vals = self.evaluate(restored_vars)
for orig, restored in zip(orig_vals, restored_vals):
self.assertAllEqual(orig, restored)
class SaveRestoreShardedTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
# Build a graph with 2 parameter nodes on different devices.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.VariableV1(10, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.VariableV1(20, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t0.insert("k1", 30.0).run()
t1.insert("k2", 40.0).run()
val = save.save(sess, save_path)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(save_path + "-?????-of-00002", val)
else:
self.assertEqual(save_path, val)
meta_graph_filename = checkpoint_management.meta_graph_filename(val)
self.assertEqual(save_path + ".meta", meta_graph_filename)
if save._write_version is saver_pb2.SaverDef.V1:
# Restore different ops from shard 0 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.VariableV1(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
save = saver_module.Saver(
{
"v0": v0,
"t0": t0.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t0.insert("k11", 33.0).run()
self.assertEqual(111, self.evaluate(v0))
self.assertEqual(b"k11", self.evaluate(t0.keys()))
self.assertEqual(33.0, self.evaluate(t0.values()))
save.restore(sess, save_path + "-00000-of-00002")
self.assertEqual(10, self.evaluate(v0))
self.assertEqual(b"k1", self.evaluate(t0.keys()))
self.assertEqual(30.0, self.evaluate(t0.values()))
# Restore different ops from shard 1 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variables.VariableV1(222)
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v1": v1,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t1.insert("k22", 44.0).run()
self.assertEqual(222, self.evaluate(v1))
self.assertEqual(b"k22", self.evaluate(t1.keys()))
self.assertEqual(44.0, self.evaluate(t1.values()))
save.restore(sess, save_path + "-00001-of-00002")
self.assertEqual(20, self.evaluate(v1))
self.assertEqual(b"k2", self.evaluate(t1.keys()))
self.assertEqual(40.0, self.evaluate(t1.values()))
# Now try a restore with the sharded filename.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.VariableV1(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.VariableV1(222, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t0.insert("k11", 33.0).run()
t1.insert("k22", 44.0).run()
self.assertEqual(111, self.evaluate(v0))
self.assertEqual(222, self.evaluate(v1))
self.assertEqual(b"k11", self.evaluate(t0.keys()))
self.assertEqual(33.0, self.evaluate(t0.values()))
self.assertEqual(b"k22", self.evaluate(t1.keys()))
self.assertEqual(44.0, self.evaluate(t1.values()))
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
if save._write_version is saver_pb2.SaverDef.V1:
save.restore(sess, save_path + "-?????-of-?????")
else:
save.restore(sess, save_path)
self.assertEqual(10, self.evaluate(v0))
self.assertEqual(20, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(t0.keys()))
self.assertEqual(30.0, self.evaluate(t0.values()))
self.assertEqual(b"k2", self.evaluate(t1.keys()))
self.assertEqual(40.0, self.evaluate(t1.values()))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(
checkpoint_management.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics-?????-of-00002"))
else:
self.assertEqual(
checkpoint_management.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics"))
@test_util.run_deprecated_v1
def testSaverDef(self):
with self.cached_session():
v0 = variables.VariableV1(123, name="v0")
save = saver_module.Saver({"v0": v0}, sharded=True)
sd = save.as_saver_def()
self.assertTrue(sd.sharded)
def _testPartitionedVariables(self, use_resource):
var_full_shape = [10, 3]
# Allows save/restore mechanism to work w/ different slicings.
var_name = "my_var"
saved_dir = self._get_test_dir("partitioned_variables")
saved_path = os.path.join(saved_dir, "ckpt")
call_saver_with_dict = False # updated by test loop below
def _save(partitioner=None):
with self.session(graph=ops_lib.Graph()) as sess:
# Calls .eval() to return the ndarray that makes up the full variable.
rnd = random_ops.random_uniform(var_full_shape).eval()
if partitioner:
vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=rnd,
partitioner=partitioner,
use_resource=use_resource)
]
else:
if use_resource:
vs = [resource_variable_ops.ResourceVariable(rnd, name=var_name)]
else:
vs = [variables.VariableV1(rnd, name=var_name)]
self.evaluate(variables.global_variables_initializer())
if call_saver_with_dict:
saver = saver_module.Saver({var_name: vs[0]})
else:
saver = saver_module.Saver(vs)
actual_path = saver.save(sess, saved_path)
self.assertEqual(saved_path, actual_path)
return rnd
def _restore(partitioner=None):
with self.session(graph=ops_lib.Graph()) as sess:
if partitioner:
new_vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=array_ops.zeros(var_full_shape),
partitioner=partitioner)
]
else:
new_vs = [
variables.VariableV1(
array_ops.zeros(
shape=var_full_shape), # != original contents.
name=var_name)
]
self.evaluate(variables.global_variables_initializer())
if call_saver_with_dict:
saver = saver_module.Saver({
var_name: new_vs[0]
})
else:
saver = saver_module.Saver(new_vs)
saver.restore(sess, saved_path)
if partitioner:
return new_vs[0].as_tensor().eval()
else:
return new_vs[0].eval()
for call_saver_with_dict in {False, True}:
# Save PartitionedVariable and restore into full variable.
saved_full = _save(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
restored_full = _restore()
self.assertAllEqual(saved_full, restored_full)
# Restores into the same number of partitions.
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
self.assertAllEqual(saved_full, restored_full)
# Restores into a different number of partitions.
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=3))
self.assertAllEqual(saved_full, restored_full)
# Now, saves a full variable and restores PartitionedVariable.
saved_full = _save()
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=3))
self.assertAllEqual(saved_full, restored_full)
@test_util.run_deprecated_v1
def testPartitionedVariable(self):
self._testPartitionedVariables(use_resource=False)
@test_util.run_deprecated_v1
def testPartitionedResourceVariable(self):
self._testPartitionedVariables(use_resource=True)
class SaveRestoreShardedTestV2(SaveRestoreShardedTest):
_WRITE_VERSION = saver_pb2.SaverDef.V2
class MaxToKeepTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def assertCheckpointState(self, model_checkpoint_path,
all_model_checkpoint_paths, save_dir):
checkpoint_state = checkpoint_management.get_checkpoint_state(save_dir)
self.assertEqual(checkpoint_state.model_checkpoint_path,
model_checkpoint_path)
self.assertEqual(checkpoint_state.all_model_checkpoint_paths,
all_model_checkpoint_paths)
def testMaxToKeepEager(self):
with context.eager_mode():
save_dir = self._get_test_dir("max_to_keep_eager")
v = variable_scope.variable(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=2)
self.evaluate(variables.global_variables_initializer())
if not context.executing_eagerly():
self.assertEqual([], save.last_checkpoints)
s1 = save.save(None, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s1],
save_dir=save_dir)
s2 = save.save(None, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s1, s2],
save_dir=save_dir)
s3 = save.save(None, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s2, s3],
save_dir=save_dir)
# Create a second helper, identical to the first.
save2 = saver_module.Saver({"v": v}, max_to_keep=2)
save2.set_last_checkpoints(save.last_checkpoints)
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(None, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(None, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
s2 = save2.save(None, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
# Deleted by the first helper.
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
@test_util.run_deprecated_v1
def testNonSharded(self):
save_dir = self._get_test_dir("max_to_keep_non_sharded")
with self.cached_session() as sess:
v = variables.VariableV1(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=2)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s1],
save_dir=save_dir)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s1, s2],
save_dir=save_dir)
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s2, s3],
save_dir=save_dir)
# Create a second helper, identical to the first.
save2 = saver_module.Saver(saver_def=save.as_saver_def())
save2.set_last_checkpoints(save.last_checkpoints)
# Create a third helper, with the same configuration but no knowledge of
# previous checkpoints.
save3 = saver_module.Saver(saver_def=save.as_saver_def())
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s1))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the second helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save2.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
# Deleted by the first helper.
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save2.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save2.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the third helper.
# Adding s2 again (but helper is unaware of previous s2)
s2 = save3.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s2], save3.last_checkpoints)
# Created by the first helper.
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
# Deleted by the first helper.
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
# Even though the file for s1 exists, this saver isn't aware of it, which
# is why it doesn't end up in the checkpoint state.
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s2],
save_dir=save_dir)
# Adding s1 (s3 should not be deleted because helper is unaware of it)
s1 = save3.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save3.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
def testSharded(self):
save_dir = self._get_test_dir("max_to_keep_sharded")
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.VariableV1(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = variables.VariableV1(222, name="v1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, sharded=True, max_to_keep=2)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(
gfile.Exists(checkpoint_management.meta_graph_filename(s1)))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(
gfile.Exists(checkpoint_management.meta_graph_filename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(
gfile.Exists(checkpoint_management.meta_graph_filename(s2)))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertEqual(0, len(gfile.Glob(s1 + "*")))
self.assertFalse(
gfile.Exists(checkpoint_management.meta_graph_filename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(
gfile.Exists(checkpoint_management.meta_graph_filename(s2)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s3)))
else:
self.assertEqual(4, len(gfile.Glob(s3 + "*")))
self.assertTrue(
gfile.Exists(checkpoint_management.meta_graph_filename(s3)))
def testNoMaxToKeep(self):
save_dir = self._get_test_dir("no_max_to_keep")
save_dir2 = self._get_test_dir("max_to_keep_0")
with self.cached_session() as sess:
v = variables.VariableV1(10.0, name="v")
self.evaluate(variables.global_variables_initializer())
# Test max_to_keep being None.
save = saver_module.Saver({"v": v}, max_to_keep=None)
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
# Test max_to_keep being 0.
save2 = saver_module.Saver({"v": v}, max_to_keep=0)
self.assertEqual([], save2.last_checkpoints)
s1 = save2.save(sess, os.path.join(save_dir2, "s1"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
s2 = save2.save(sess, os.path.join(save_dir2, "s2"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
def testNoMetaGraph(self):
save_dir = self._get_test_dir("no_meta_graph")
with self.cached_session() as sess:
v = variables.VariableV1(10.0, name="v")
save = saver_module.Saver({"v": v})
self.evaluate(variables.global_variables_initializer())
s1 = save.save(sess, os.path.join(save_dir, "s1"), write_meta_graph=False)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertFalse(
gfile.Exists(checkpoint_management.meta_graph_filename(s1)))
class RecoverLastCheckpointsTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def assertCheckpointState(self, model_checkpoint_path,
all_model_checkpoint_paths, save_dir):
checkpoint_state = checkpoint_management.get_checkpoint_state(save_dir)
self.assertEqual(checkpoint_state.model_checkpoint_path,
model_checkpoint_path)
self.assertEqual(checkpoint_state.all_model_checkpoint_paths,
all_model_checkpoint_paths)
def test_recover_last_checkpoints(self):
with context.eager_mode():
save_dir = self._get_test_dir("recover_last_checkpoints")
v = variable_scope.variable(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=10)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([], save.last_checkpoints)
s1 = save.save(None, os.path.join(save_dir, "ckpt-1"))
s2 = save.save(None, os.path.join(save_dir, "ckpt-2"))
s3 = save.save(None, os.path.join(save_dir, "ckpt-3"))
self.assertEqual([s1, s2, s3], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s1, s2, s3],
save_dir=save_dir)
# Create another saver and recover last checkpoints.
save2 = saver_module.Saver({"v": v}, max_to_keep=10)
self.assertEqual([], save2.last_checkpoints)
save2.recover_last_checkpoints([s1, s2, s3])
self.assertEqual([s1, s2, s3], save2.last_checkpoints)
# Remove a checkpoint and check that last checkpoints are
# restored correctly.
for fname in gfile.Glob("{}*".format(s1)):
gfile.Remove(fname)
self.assertFalse(checkpoint_management.checkpoint_exists(s1))
# Create another saver and recover last checkpoints. The removed
# checkpoint would be correctly omitted.
save3 = saver_module.Saver({"v": v}, max_to_keep=10)
self.assertEqual([], save3.last_checkpoints)
save3.recover_last_checkpoints([s1, s2, s3])
self.assertEqual([s2, s3], save3.last_checkpoints)
s4 = save3.save(None, os.path.join(save_dir, "ckpt-4"))
self.assertCheckpointState(
model_checkpoint_path=s4,
all_model_checkpoint_paths=[s2, s3, s4],
save_dir=save_dir)
class KeepCheckpointEveryNHoursTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
@test_util.run_in_graph_and_eager_modes
@test.mock.patch.object(saver_module, "time")
def testNonSharded(self, mock_time):
save_dir = self._get_test_dir("keep_checkpoint_every_n_hours")
with self.cached_session() as sess:
v = variable_scope.variable([10.0], name="v")
# Run the initializer NOW to avoid the 0.5s overhead of the first Run()
# call, which throws the test timing off in fastbuild mode.
self.evaluate(variables.global_variables_initializer())
# Create a saver that will keep the last 2 checkpoints plus one every 0.7
# seconds.
start_time = time.time()
mock_time.time.return_value = start_time
save = saver_module.Saver(
{
"v": v
}, max_to_keep=2, keep_checkpoint_every_n_hours=0.7 / 3600)
self.assertEqual([], save.last_checkpoints)
# Wait till 1 seconds have elapsed so s1 will be old enough to keep.
# sleep may return early, don't trust it.
mock_time.time.return_value = start_time + 1.0
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
# We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(),
# would normally delete s1, because max_to_keep is 2. However, s1 is
# older than 0.7s so we must keep it.
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
# s1 should still be here, we are Not checking now to reduce time
# variance in the test.
# We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next
# call to Save(), will delete s2, because max_to_keep is 2, and because
# we already kept the old s1. s2 is very close in time to s1 so it gets
# deleted.
s4 = save.save(sess, os.path.join(save_dir, "s4"))
self.assertEqual([s3, s4], save.last_checkpoints)
# Check that s1 is still here, but s2 is gone.
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertFalse(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertTrue(checkpoint_management.checkpoint_exists(s4))
class SaveRestoreWithVariableNameMap(test.TestCase):
def _testNonReshape(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "non_reshape")
with self.session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
self.evaluate(variables.global_variables_initializer())
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Save the initialized values in the file at "save_path"
# Use a variable name map to set the saved tensor names
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Verify that the original names are not in the Saved file
save = saver_module.Saver({"v0": v0, "v1": v1})
with self.assertRaisesOpError("not found in checkpoint"):
save.restore(sess, save_path)
# Verify that the mapped names are present in the Saved file and can be
# Restored using remapped names.
with self.session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
if not context.executing_eagerly():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
if not context.executing_eagerly():
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Add a prefix to the node names in the current graph and Restore using
# remapped names.
with self.session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="restore_prefix/v0")
v1 = variable_op(-1.0, name="restore_prefix/v1")
if not context.executing_eagerly():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
@test_util.run_in_graph_and_eager_modes
def testNonReshapeResourceVariable(self):
self._testNonReshape(resource_variable_ops.ResourceVariable)
def testNonReshapeVariable(self):
self._testNonReshape(variables.Variable)
class MetaGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
@test_util.run_v1_only("b/120545219")
def testAddCollectionDef(self):
test_dir = self._get_test_dir("good_collection")
filename = os.path.join(test_dir, "metafile")
with self.cached_session():
# Creates a graph.
v0 = variables.VariableV1(1.0, name="v0")
control_flow_ops.cond(
math_ops.less(v0, 10), lambda: math_ops.add(v0, 1),
lambda: math_ops.subtract(v0, 1))
control_flow_ops.while_loop(lambda i: math_ops.less(i, 10),
lambda i: math_ops.add(i, 1), [v0])
var = variables.VariableV1(constant_op.constant(0, dtype=dtypes.int64))
count_up_to = var.count_up_to(3)
input_queue = data_flow_ops.FIFOQueue(
30, dtypes.float32, shared_name="collection_queue")
qr = queue_runner_impl.QueueRunner(input_queue, [count_up_to])
variables.global_variables_initializer()
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Adds a set of collections.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("float_collection", 3.5)
ops_lib.add_to_collection("string_collection", "hello")
ops_lib.add_to_collection("variable_collection", v0)
# Add QueueRunners.
queue_runner_impl.add_queue_runner(qr)
# Adds user_defined proto in three formats: string, bytes and Any.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph(filename)
self.assertTrue(meta_graph_def.HasField("saver_def"))
self.assertTrue(meta_graph_def.HasField("graph_def"))
self.assertTrue(meta_graph_def.HasField("meta_info_def"))
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_version, "")
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_git_version,
"")
collection_def = meta_graph_def.collection_def
self.assertEqual(len(collection_def), 12)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
test_util.assert_meta_graph_protos_equal(
self, meta_graph_def, new_meta_graph_def)
def testAddCollectionDefFails(self):
with self.cached_session():
# Creates a graph.
v0 = variables.VariableV1(10.0, name="v0")
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Generates MetaGraphDef.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Verifies that collection with unsupported key will not be added.
ops_lib.add_to_collection(save, 3)
save._add_collection_def(meta_graph_def, save)
self.assertEqual(len(meta_graph_def.collection_def), 0)
# Verifies that collection where item type does not match expected
# type will not be added.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("int_collection", 3.5)
save._add_collection_def(meta_graph_def, "int_collection")
self.assertEqual(len(meta_graph_def.collection_def), 0)
def _testMultiSaverCollectionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.VariableV1([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.VariableV1(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
self.evaluate(variables.global_variables_initializer())
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph()
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def0.
collection_def = meta_graph_def0.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
def _testMultiSaverCollectionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.session(graph=ops_lib.Graph()) as sess:
# Imports from meta_graph.
saver_module.import_meta_graph(filename)
# Retrieves SAVERS collection. Verifies there are 2 entries.
savers = ops_lib.get_collection("savers")
self.assertEqual(2, len(savers))
# Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1.
new_saver0 = savers[0]
new_saver0.restore(sess, saver0_ckpt)
v0 = sess.graph.get_tensor_by_name("v0:0")
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertAllEqual([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
self.evaluate(v0))
self.assertEqual([3, 2], v0.get_shape())
self.assertEqual([], v1.get_shape())
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
self.evaluate(v1)
# Retrieves saver1. Verifies that new_saver1 can restore v1.
new_saver1 = savers[1]
new_saver1.restore(sess, saver1_ckpt)
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertEqual(11.0, self.evaluate(v1))
@test_util.run_v1_only("b/120545219")
def testMultiSaverCollection(self):
test_dir = self._get_test_dir("saver_collection")
self._testMultiSaverCollectionSave(test_dir)
self._testMultiSaverCollectionRestore(test_dir)
@test_util.run_v1_only("b/120545219")
def testClearExtraneousSavers(self):
test_dir = self._get_test_dir("clear_extraneous_savers")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.VariableV1([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.VariableV1(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
self.evaluate(variables.global_variables_initializer())
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph(clear_extraneous_savers=True)
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def1.
collection_def = meta_graph_def1.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there is 1 entry in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(1, len(savers.value))
# Verifies that saver0 graph nodes are omitted from the saver1 export
self.assertEqual(33, len(meta_graph_def0.graph_def.node))
self.assertEqual(21, len(meta_graph_def1.graph_def.node))
@test_util.run_deprecated_v1
def testBinaryAndTextFormat(self):
test_dir = self._get_test_dir("binary_and_text")
filename = os.path.join(test_dir, "metafile")
with self.session(graph=ops_lib.Graph()):
# Creates a graph.
variables.VariableV1(10.0, name="v0")
# Exports the graph as binary format.
saver_module.export_meta_graph(filename, as_text=False)
with self.session(graph=ops_lib.Graph()):
# Imports the binary format graph.
saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(saver)
# Exports the graph as text format.
saver.export_meta_graph(filename, as_text=True)
with self.session(graph=ops_lib.Graph()):
# Imports the text format graph.
saver_module.import_meta_graph(filename)
# Writes wrong contents to the file.
graph_io.write_graph(saver.as_saver_def(),
os.path.dirname(filename),
os.path.basename(filename))
with self.session(graph=ops_lib.Graph()):
# Import should fail.
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "Cannot parse file"):
saver_module.import_meta_graph(filename)
# Deletes the file
gfile.Remove(filename)
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "does not exist"):
saver_module.import_meta_graph(filename)
@test_util.run_v1_only("b/120545219")
def testSliceVariable(self):
test_dir = self._get_test_dir("slice_saver")
filename = os.path.join(test_dir, "metafile")
with self.cached_session():
v1 = variables.VariableV1([20.0], name="v1")
v2 = variables.VariableV1([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# The names are different and will work.
slice_saver = saver_module.Saver({"first": v1, "second": v2})
self.evaluate(variables.global_variables_initializer())
# Exports to meta_graph
meta_graph_def = slice_saver.export_meta_graph(filename)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(new_saver)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
test_util.assert_meta_graph_protos_equal(self, meta_graph_def,
new_meta_graph_def)
def _testGraphExtensionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(1.2, dtypes.float32, shape=[100, 28])
with ops_lib.name_scope("hidden1"):
weights = variables.VariableV1(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test coverage
# the save and restore of control flow context (which doesn't make any
# sense here from a machine learning perspective). The typical biases is
# a simple Variable without the conditions.
biases = variables.VariableV1(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights) + biases)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.VariableV1(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(
loop_cond, loop_body,
[constant_op.constant(0),
variables.VariableV1(array_ops.zeros([32]))])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.VariableV1(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.VariableV1(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
init_all_op = variables.global_variables_initializer()
with self.cached_session() as sess:
# Initializes all the variables.
self.evaluate(init_all_op)
# Runs to logit.
self.evaluate(logits)
# Creates a saver.
saver0 = saver_module.Saver()
saver0.save(sess, saver0_ckpt)
# Generates MetaGraphDef.
saver0.export_meta_graph(filename)
def _testGraphExtensionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_saver.export_meta_graph()
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
# Adds loss and train.
labels = constant_op.constant(0, dtypes.int32, shape=[100], name="labels")
batch_size = array_ops.size(labels)
labels = array_ops.expand_dims(labels, 1)
indices = array_ops.expand_dims(math_ops.range(0, batch_size), 1)
concated = array_ops.concat([indices, labels], 1)
onehot_labels = sparse_ops.sparse_to_dense(
concated, array_ops.stack([batch_size, 10]), 1.0, 0.0)
logits = ops_lib.get_collection("logits")[0]
cross_entropy = nn_ops.softmax_cross_entropy_with_logits(
labels=onehot_labels, logits=logits, name="xentropy")
loss = math_ops.reduce_mean(cross_entropy, name="xentropy_mean")
summary.scalar("loss", loss)
# Creates the gradient descent optimizer with the given learning rate.
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
# Runs train_op.
train_op = optimizer.minimize(loss)
ops_lib.add_to_collection("train_op", train_op)
# Runs train_op.
self.evaluate(train_op)
# Generates MetaGraphDef.
saver_module.export_meta_graph(train_filename)
def _testRestoreFromTrainGraphWithControlContext(self, test_dir):
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(train_filename)
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
train_op = ops_lib.get_collection("train_op")[0]
self.evaluate(train_op)
@test_util.run_deprecated_v1
def testGraphExtension(self):
test_dir = self._get_test_dir("graph_extension")
self._testGraphExtensionSave(test_dir)
self._testGraphExtensionRestore(test_dir)
self._testRestoreFromTrainGraphWithControlContext(test_dir)
def _testGradientSerDes(self, graph_fn):
"""Tests that gradients can be computed after exporting and importing.
Builds a graph, exports it, and verifies that it can be imported and the
gradient can be built and run correctly.
Args:
graph_fn: takes a single float Tensor argument as input, outputs a single
Tensor
"""
test_dir = self._get_test_dir("nested_control_flow")
filename = os.path.join(test_dir, "metafile")
saver_ckpt = os.path.join(test_dir, "saver.ckpt")
# Create while loop using `outer_body_fn`.
with ops_lib.Graph().as_default():
var = variables.VariableV1(0.0)
var_name = var.name
output = graph_fn(var)
output_name = output.name
init_op = variables.global_variables_initializer()
# Generate a MetaGraphDef containing the while loop.
with session.Session() as sess:
self.evaluate(init_op)
self.evaluate(output)
saver = saver_module.Saver()
saver.save(sess, saver_ckpt)
saver.export_meta_graph(filename)
# Build and run the gradients of the while loop. We use this below to
# verify that the gradients are correct with an imported MetaGraphDef.
grad = gradients_impl.gradients([output], [var])
# Turn off constant folding to avoid breaking testNestedControlFlowSerDes.
# It appears that a missing control dependency in the gradient graph
# causes the fetch node to not be triggered.
no_constfold_config = config_pb2.ConfigProto()
no_constfold_config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
with session.Session(config=no_constfold_config) as sess:
self.evaluate(init_op)
expected_grad_value = self.evaluate(grad)
# Restore the MetaGraphDef into a new Graph.
with ops_lib.Graph().as_default():
with session.Session() as sess:
saver = saver_module.import_meta_graph(filename)
saver.restore(sess, saver_ckpt)
# Make sure we can still build gradients and get the same result.
var = ops_lib.get_default_graph().get_tensor_by_name(var_name)
output = ops_lib.get_default_graph().get_tensor_by_name(output_name)
grad = gradients_impl.gradients([output], [var])
init_op = variables.global_variables_initializer()
with session.Session(config=no_constfold_config) as sess:
self.evaluate(init_op)
actual_grad_value = self.evaluate(grad)
self.assertEqual(expected_grad_value, actual_grad_value)
def _testWhileLoopAndGradientSerDes(self, outer_body_fn):
# Build a while loop with `outer_body_fn`, export it, and verify that it can
# be imported and the gradient can be built and run correctly.
# pylint: disable=g-long-lambda
return self._testGradientSerDes(
lambda x: control_flow_ops.while_loop(
lambda i, y: i < 5, outer_body_fn, [0, x])[1])
# pylint: enable=g-long-lambda
def testNestedWhileLoopsSerDes(self):
# Test two simple nested while loops.
def body(i, x):
_, r = control_flow_ops.while_loop(lambda j, y: j < 3,
lambda j, y: (j + 1, y + x),
[0, 0.0])
return i + 1, x + r
self._testWhileLoopAndGradientSerDes(body)
def testNestedControlFlowSerDes(self):
# Test while loop in a cond in a while loop.
# pylint: disable=g-long-lambda
def body(i, x):
cond_result = control_flow_ops.cond(
i > 0,
lambda: control_flow_ops.while_loop(
lambda j, y: j < 3,
lambda j, y: (j + 1, y + x),
[0, 0.0])[1],
lambda: x)
return i + 1, cond_result
# pylint: enable=g-long-lambda
self._testWhileLoopAndGradientSerDes(body)
def testNestedCondsSerDes(self):
# Test conds in a cond.
# pylint: disable=g-long-lambda
self._testGradientSerDes(lambda x: control_flow_ops.cond(
x > 0,
lambda: control_flow_ops.cond(x > 3,
lambda: array_ops.identity(x),
lambda: math_ops.multiply(x, 2.0)),
lambda: control_flow_ops.cond(x < -3,
lambda: constant_op.constant(1.0),
lambda: math_ops.multiply(x, -1.0))))
# pylint: enable=g-long-lambda
@test_util.run_v1_only("b/120545219")
def testStrippedOpListDef(self):
with self.cached_session():
# Creates a graph.
v0 = variables.VariableV1(0.0)
var = variables.VariableV1(10.0)
math_ops.add(v0, var)
@function.Defun(dtypes.float32)
def minus_one(x):
return x - 1
minus_one(array_ops.identity(v0))
save = saver_module.Saver({"v0": v0})
variables.global_variables_initializer()
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph()
ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(ops, [
"Add", "Assign", "Const", "Identity", "NoOp",
"PlaceholderWithDefault", "RestoreV2", "SaveSlices", "Sub",
"VariableV2"
])
else:
self.assertEqual(ops, [
"Add", "Assign", "Const", "Identity", "NoOp",
"PlaceholderWithDefault", "RestoreV2", "SaveV2", "Sub", "VariableV2"
])
# Test calling stripped_op_list_for_graph directly
op_list = meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def)
self.assertEqual(ops, [o.name for o in op_list.op])
for o in op_list.op:
self.assertEqual(o.summary, "")
self.assertEqual(o.description, "")
@test_util.run_deprecated_v1
def testStripDefaultValuedAttrs(self):
"""Verifies that default valued attrs are stripped, unless disabled."""
# With strip_default_attrs enabled, attributes "T" (float32) and "Tout"
# (complex64) in the "Complex" op must be removed.
with self.cached_session():
real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
save = saver_module.Saver({"real_num": real_num, "imag_num": imag_num})
variables.global_variables_initializer()
meta_graph_def = save.export_meta_graph(strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
# With strip_default_attrs disabled, attributes "T" (float32) and "Tout"
# (complex64) in the "Complex" op must *not* be removed, even if they map
# to their defaults.
with self.session(graph=ops_lib.Graph()):
real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
save = saver_module.Saver({"real_num": real_num, "imag_num": imag_num})
variables.global_variables_initializer()
meta_graph_def = save.export_meta_graph(strip_default_attrs=False)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
@test_util.run_deprecated_v1
def testImportIntoNamescope(self):
# Test that we can import a meta graph into a namescope.
test_dir = self._get_test_dir("import_into_namescope")
filename = os.path.join(test_dir, "ckpt")
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
with session.Session() as sess:
weights = variables.VariableV1(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.VariableV1(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias, name="logits")
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit, name="cost")
adam.AdamOptimizer().minimize(cost, name="optimize")
saver = saver_module.Saver()
self.evaluate(variables.global_variables_initializer())
saver.save(sess, filename)
graph = ops_lib.Graph()
with session.Session(graph=graph) as sess:
new_saver = saver_module.import_meta_graph(
filename + ".meta", graph=graph, import_scope="new_model")
new_saver.restore(sess, filename)
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testImportIntoNamescopeWithoutVariables(self):
# Save a simple graph that contains no variables into a checkpoint.
test_dir = self._get_test_dir("no_vars_graph")
filename = os.path.join(test_dir, "ckpt")
graph_1 = ops_lib.Graph()
with session.Session(graph=graph_1) as sess:
constant_op.constant([1, 2, 3], name="x")
constant_op.constant([1, 2, 3], name="y")
saver = saver_module.Saver(allow_empty=True)
saver.save(sess, filename)
# Create a fresh graph.
graph_2 = ops_lib.Graph()
with session.Session(graph=graph_2) as sess:
# Restore the above checkpoint under scope "subgraph_1".
new_saver_1 = saver_module.import_meta_graph(
filename + ".meta", graph=graph_2, import_scope="subgraph_1")
# There are no variables to restore, so import_meta_graph should not
# return a Saver.
self.assertIsNone(new_saver_1)
# Create a variable in graph_2 under scope "my_scope".
variables.VariableV1(array_ops.zeros([10]), name="my_scope/my_var")
self.evaluate(variables.global_variables_initializer())
# Restore the checkpoint into a different scope "subgraph_2".
new_saver_2 = saver_module.import_meta_graph(
filename + ".meta", graph=graph_2, import_scope="subgraph_2")
# Because the variable does not live in scope "subgraph_2",
# import_meta_graph should not attempt to restore the variable. So,
# import_meta_graph still won't return a Saver instance.
self.assertIsNone(new_saver_2)
# However, if we restore the checkpoint under scope "my_scope",
# import_meta_graph will detect the variable and return a Saver for
# restoring it. This should happen even when the variable does not
# originate from graph_1.
new_saver_3 = saver_module.import_meta_graph(
filename + ".meta", graph=graph_2, import_scope="my_scope")
self.assertIsInstance(new_saver_3, saver_module.Saver)
@test_util.run_deprecated_v1
def testImportIntoImplicitNamescope(self):
# Test that we can import a meta graph into an implicit namescope.
test_dir = self._get_test_dir("import_into_namescope")
filename = os.path.join(test_dir, "ckpt")
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
with session.Session() as sess:
weights = variables.VariableV1(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.VariableV1(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias, name="logits")
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit, name="cost")
adam.AdamOptimizer().minimize(cost, name="optimize")
saver = saver_module.Saver()
self.evaluate(variables.global_variables_initializer())
saver.save(sess, filename)
graph = ops_lib.Graph()
with session.Session(graph=graph) as sess:
with ops_lib.name_scope("new_model"):
new_saver = saver_module.import_meta_graph(
filename + ".meta", graph=graph)
new_saver.restore(sess, filename)
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnImport(self):
# Test that we import a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.VariableV1(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.VariableV1(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph()
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=False, import_scope="new_model")
# Device refers to GPU, which is not available here.
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(variables.global_variables_initializer())
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=True, import_scope="new_model")
self.evaluate(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnExport(self):
# Test that we export a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.VariableV1(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.VariableV1(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph(clear_devices=True)
graph_io.write_graph(meta_graph_def, self.get_temp_dir(),
"meta_graph.pbtxt")
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(meta_graph_def, import_scope="new_model")
self.evaluate(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testPreserveDatasetAndFunctions(self):
with ops_lib.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(10).map(lambda x: x * x)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
_ = array_ops.identity(next_element, name="output")
# Generate three MetaGraphDef protos using different code paths.
meta_graph_def_simple = saver_module.export_meta_graph()
meta_graph_def_devices_cleared = saver_module.export_meta_graph(
clear_devices=True)
meta_graph_def_from_graph_def = saver_module.export_meta_graph(
clear_devices=True, graph_def=g.as_graph_def())
for meta_graph_def in [meta_graph_def_simple,
meta_graph_def_devices_cleared,
meta_graph_def_from_graph_def]:
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(meta_graph_def, import_scope="new_model")
self.evaluate(variables.global_variables_initializer())
for i in range(10):
self.assertEqual(i * i, sess.run("new_model/output:0"))
with self.assertRaises(errors.OutOfRangeError):
sess.run("new_model/output:0")
class CheckpointReaderTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
@test_util.run_deprecated_v1
def testDebugString(self):
# Builds a graph.
v0 = variables.VariableV1(
[[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
v1 = variables.VariableV1(
[[[1], [2]], [[3], [4]], [[5], [6]]], dtype=dtypes.float32, name="v1")
init_all_op = variables.global_variables_initializer()
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, write_version=self._WRITE_VERSION)
save_path = os.path.join(self.get_temp_dir(),
"ckpt_for_debug_string" + str(self._WRITE_VERSION))
with self.cached_session() as sess:
self.evaluate(init_all_op)
# Saves a checkpoint.
save.save(sess, save_path)
# Creates a reader.
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
# Verifies that the tensors exist.
self.assertTrue(reader.has_tensor("v0"))
self.assertTrue(reader.has_tensor("v1"))
debug_string = reader.debug_string()
# Verifies that debug string contains the right strings.
self.assertTrue(compat.as_bytes("v0 (DT_FLOAT) [2,3]") in debug_string)
self.assertTrue(compat.as_bytes("v1 (DT_FLOAT) [3,2,1]") in debug_string)
# Verifies get_variable_to_shape_map() returns the correct information.
var_map = reader.get_variable_to_shape_map()
self.assertEqual([2, 3], var_map["v0"])
self.assertEqual([3, 2, 1], var_map["v1"])
# Verifies get_tensor() returns the tensor value.
v0_tensor = reader.get_tensor("v0")
v1_tensor = reader.get_tensor("v1")
self.assertAllEqual(v0.eval(), v0_tensor)
self.assertAllEqual(v1.eval(), v1_tensor)
# Verifies get_tensor() fails for non-existent tensors.
with self.assertRaisesRegexp(errors.NotFoundError,
"v3 not found in checkpoint"):
reader.get_tensor("v3")
def testNonexistentPath(self):
with self.assertRaisesRegexp(errors.NotFoundError,
"Unsuccessful TensorSliceReader"):
pywrap_tensorflow.NewCheckpointReader("non-existent")
class CheckpointReaderForV2Test(CheckpointReaderTest):
_WRITE_VERSION = saver_pb2.SaverDef.V2
class WriteGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testWriteGraph(self):
test_dir = self._get_test_dir("write_graph_dir")
variables.VariableV1(
[[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph(),
os.path.join(test_dir, "l1"), "graph.pbtxt")
truth = os.path.join(test_dir, "l1", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
def testRecursiveCreate(self):
test_dir = self._get_test_dir("deep_dir")
variables.VariableV1(
[[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph().as_graph_def(),
os.path.join(test_dir, "l1", "l2", "l3"),
"graph.pbtxt")
truth = os.path.join(test_dir, "l1", "l2", "l3", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
class ScopedGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def _testScopedSave(self, test_dir, exported_filename, ckpt_filename):
graph = ops_lib.Graph()
with graph.as_default():
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with ops_lib.name_scope("hidden1"):
weights1 = variables.VariableV1(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
biases1 = variables.VariableV1(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights1) + biases1)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights2 = variables.VariableV1(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases2):
biases2 += constant_op.constant(0.1, shape=[32])
return it + 1, biases2
_, biases2 = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.VariableV1(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights2) + biases2)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights3 = variables.VariableV1(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases3 = variables.VariableV1(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights3) + biases3
ops_lib.add_to_collection("logits", logits)
# Adds user_defined proto in three formats: string, bytes and Any.
# Any proto should just pass through.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
_, var_list = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filename),
graph=ops_lib.get_default_graph(),
export_scope="hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
with self.session(graph=graph) as sess:
self.evaluate(variables.global_variables_initializer())
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.save(sess, os.path.join(test_dir, ckpt_filename), write_state=False)
def _testScopedRestore(self, test_dir, exported_filename,
new_exported_filename, ckpt_filename):
graph = ops_lib.Graph()
# Create all the missing inputs.
with graph.as_default():
new_image = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filename),
graph=graph,
input_map={"$unbound_inputs_images": new_image},
import_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
hidden1 = graph.as_graph_element("new_hidden1/Relu:0")
weights1 = graph.as_graph_element("new_hidden1/weights:0")
biases1 = graph.as_graph_element("new_hidden1/biases:0")
with graph.as_default():
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.VariableV1(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.VariableV1(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.VariableV1(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.VariableV1(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
# The rest of the variables.
rest_variables = list(
set(variables.global_variables()) - set(var_list.keys()))
init_rest_op = variables.variables_initializer(rest_variables)
with self.session(graph=graph) as sess:
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.restore(sess, os.path.join(test_dir, ckpt_filename))
# Verify that we have restored weights1 and biases1.
self.evaluate([weights1, biases1])
# Initialize the rest of the variables and run logits.
self.evaluate(init_rest_op)
self.evaluate(logits)
# Verifies that we can save the subgraph under "hidden1" and restore it
# into "new_hidden1" in the new graph.
@test_util.run_deprecated_v1
def testScopedSaveAndRestore(self):
test_dir = self._get_test_dir("scoped_export_import")
ckpt_filename = "ckpt"
self._testScopedSave(test_dir, "exported_hidden1.pbtxt", ckpt_filename)
self._testScopedRestore(test_dir, "exported_hidden1.pbtxt",
"exported_new_hidden1.pbtxt", ckpt_filename)
# Verifies that we can copy the subgraph under "hidden1" and copy it
# to different name scope in the same graph or different graph.
@test_util.run_deprecated_v1
def testCopyScopedGraph(self):
test_dir = self._get_test_dir("scoped_copy")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.VariableV1(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.VariableV1([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with self.session(graph=graph1) as sess:
self.evaluate(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies copy to the same graph with the same name fails.
with graph1.as_default():
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "need to be different" in str(e)):
meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden1")
# Verifies copy to the same graph.
with graph1.as_default():
var_list_2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden2")
with self.session(graph=graph1) as sess:
saver1 = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver1.restore(sess, saver0_ckpt)
saver2 = saver_module.Saver(var_list=var_list_2, max_to_keep=1)
saver2.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("hidden1/relu:0"))
self.assertAllClose(expected, sess.run("hidden2/relu:0"))
# Verifies copy to differen graph.
graph2 = ops_lib.Graph()
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.session(graph=graph2) as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
@test_util.run_deprecated_v1
def testExportGraphDefWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.VariableV1(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.VariableV1([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with self.session(graph=graph1) as sess:
self.evaluate(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
graph_def=graph1.as_graph_def(), export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies that we can run successfully after restoring.
graph2 = ops_lib.Graph()
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.session(graph=graph2) as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
@test_util.run_deprecated_v1
def testSerializeSaverWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
saver2_ckpt = os.path.join(test_dir, "saver2.ckpt")
graph = ops_lib.Graph()
with graph.as_default():
with ops_lib.name_scope("hidden1"):
variable1 = variables.VariableV1([1.0], name="variable1")
saver1 = saver_module.Saver(var_list=[variable1])
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver1)
with ops_lib.name_scope("hidden2"):
variable2 = variables.VariableV1([2.0], name="variable2")
saver2 = saver_module.Saver(var_list=[variable2], name="hidden2/")
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver2)
with self.session(graph=graph) as sess:
self.evaluate(variables.global_variables_initializer())
saver1.save(sess, saver1_ckpt, write_state=False)
saver2.save(sess, saver2_ckpt, write_state=False)
graph1 = ops_lib.Graph()
var_dict1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph,
to_graph=graph1)
self.assertEqual(1, len(var_dict1))
saver_list1 = graph1.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list1))
with self.session(graph=graph1) as sess:
saver_list1[0].restore(sess, saver1_ckpt)
self.assertEqual(1.0, self.evaluate(var_dict1["variable1:0"]))
graph2 = ops_lib.Graph()
var_dict2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden2",
to_scope="new_hidden2",
from_graph=graph,
to_graph=graph2)
self.assertEqual(1, len(var_dict2))
saver_list2 = graph2.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list2))
with self.session(graph=graph2) as sess:
saver_list2[0].restore(sess, saver2_ckpt)
self.assertEqual(2.0, self.evaluate(var_dict2["variable2:0"]))
class _OwnsAVariableSimple(trackable_base.Trackable):
"""A Trackable object which can be saved using a tf.train.Saver."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
def _gather_saveables_for_checkpoint(self):
return {trackable_base.VARIABLE_VALUE_KEY: self.non_dep_variable}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class _MirroringSaveable(
saver_module.BaseSaverBuilder.ResourceVariableSaveable):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
super(_MirroringSaveable, self).__init__(
self._primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class _OwnsMirroredVariables(trackable_base.Trackable):
"""A Trackable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self.non_dep_variable.name):
return _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored,
name=name)
return {trackable_base.VARIABLE_VALUE_KEY: _saveable_factory}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class NonLayerTrackable(trackable_tracking.AutoTrackable):
def __init__(self):
super(NonLayerTrackable, self).__init__()
self.a_variable = trackable_utils.add_variable(
self, name="a_variable", shape=[])
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Trackables which aren't Layers.
self._non_layer = NonLayerTrackable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class TrackableCompatibilityTests(test.TestCase):
# TODO(allenl): Track down python3 reference cycles in these tests.
@test_util.run_in_graph_and_eager_modes
def testNotSaveableButIsTrackable(self):
v = _OwnsAVariableSimple()
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
for saver in (saver_module.Saver(var_list=[v]),
saver_module.Saver(var_list={"v": v})):
with self.cached_session() as sess:
self.evaluate(v.non_dep_variable.assign(42.))
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturned(self):
v = _OwnsMirroredVariables()
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
for saver in (saver_module.Saver(var_list=[v]),
saver_module.Saver(var_list={"v": v})):
with self.cached_session() as sess:
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
def testSingleTensorEvaluation(self):
class _CountingSaveable(saver_module.BaseSaverBuilder.SaveableObject):
def __init__(self, name):
self.eval_count = 0
def _tensor():
self.eval_count += 1
return constant_op.constant([1.])
dummy_op = constant_op.constant([2.])
super(_CountingSaveable, self).__init__(
dummy_op,
[saver_module.BaseSaverBuilder.SaveSpec(
_tensor, "", name, dtype=dummy_op.dtype,
device=dummy_op.device)],
name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
pass
with context.eager_mode():
v = _CountingSaveable("foo")
saver = saver_module.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
with self.cached_session() as sess:
save_path = saver.save(sess, prefix)
self.assertEqual(1, v.eval_count)
saver.restore(sess, save_path)
self.assertEqual(1, v.eval_count)
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
train_op = optimizer.minimize(
functools.partial(model, input_value),
global_step=optimizer_step)
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
return root_trackable
def _set_sentinels(self, root_trackable):
self.evaluate(root_trackable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, name="m")
.assign([102.]))
beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(103.))
def _check_sentinels(self, root_trackable):
self.assertAllEqual(
[1.], self.evaluate(root_trackable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, name="m")))
beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
def testVariableNotFoundErrorRaised(self):
# Restore does some tricky exception handling to figure out if it should
# load an object-based checkpoint. Tests that the exception handling isn't
# too broad.
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
a = resource_variable_ops.ResourceVariable(1., name="a")
b = resource_variable_ops.ResourceVariable(1., name="b")
a_saver = saver_module.Saver([a])
b_saver = saver_module.Saver([b])
with self.cached_session() as sess:
self.evaluate(a.initializer)
save_path = a_saver.save(sess=sess, save_path=checkpoint_prefix)
with self.assertRaisesRegexp(
errors.NotFoundError, "Key b not found in checkpoint"):
b_saver.restore(sess=sess, save_path=save_path)
with self.assertRaises(errors.NotFoundError) as cs:
b_saver.restore(sess=sess, save_path=save_path)
# Make sure we don't have a confusing "During handling of the above
# exception" block in Python 3.
self.assertNotIn("NewCheckpointReader", cs.exception.message)
@test_util.run_v1_only("b/120545219")
def testGraphChangedForRestoreErrorRaised(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with ops_lib.Graph().as_default() as g:
a = variables.VariableV1(1., name="a")
a_saver = saver_module.Saver([a])
with self.session(graph=g) as sess:
self.evaluate(a.initializer)
save_path = a_saver.save(sess=sess, save_path=checkpoint_prefix)
with ops_lib.Graph().as_default() as g:
a = variables.VariableV1([1.], name="a")
a_saver = saver_module.Saver([a])
with self.session(graph=g) as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"a mismatch between the current graph and the graph"):
a_saver.restore(sess=sess, save_path=save_path)
def testLoadFromObjectBasedGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_graph = ops_lib.Graph()
with save_graph.as_default(), self.session(graph=save_graph) as sess:
root = self._initialized_model()
object_saver = trackable_utils.Checkpoint(root=root)
save_path = object_saver.save(file_prefix=checkpoint_prefix)
# An incompatible object-based checkpoint to check error messages
var = resource_variable_ops.ResourceVariable(1., name="a")
self.evaluate(var.initializer)
second_saver = trackable_utils.Checkpoint(v=var)
second_path = second_saver.save(file_prefix=os.path.join(
checkpoint_directory, "second"))
restore_graph = ops_lib.Graph()
with restore_graph.as_default(), self.session(
graph=restore_graph) as sess:
root = self._initialized_model()
self._set_sentinels(root)
saver = saver_module.Saver()
saver.restore(sess=sess, save_path=save_path)
self._check_sentinels(root)
before_second_restore_ops = restore_graph.get_operations()
# Test that multiple restores do not pollute the graph
saver.restore(sess=sess, save_path=save_path)
self.assertEqual(before_second_restore_ops,
restore_graph.get_operations())
with self.assertRaisesRegexp(errors.NotFoundError,
"Could not find some variables"):
saver.restore(sess=sess, save_path=second_path)
def testLoadFromObjectBasedEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_graph = ops_lib.Graph()
with save_graph.as_default(), self.session(graph=save_graph):
root = self._initialized_model()
object_saver = trackable_utils.Checkpoint(root=root)
save_path = object_saver.save(file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
saver = saver_module.Saver(
root.model.variables + root.optimizer.variables())
saver.restore(sess=None, save_path=save_path)
self._check_sentinels(root)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/saver_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to warm-start TF.Learn Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_ops
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.VocabInfo"])
class VocabInfo(
collections.namedtuple("VocabInfo", [
"new_vocab",
"new_vocab_size",
"num_oov_buckets",
"old_vocab",
"old_vocab_size",
"backup_initializer",
"axis",
])):
"""Vocabulary information for warm-starting.
See `tf.estimator.WarmStartSettings` for examples of using
VocabInfo to warm-start.
Args:
new_vocab: [Required] A path to the new vocabulary file (used with the model
to be trained).
new_vocab_size: [Required] An integer indicating how many entries of the new
vocabulary will used in training.
num_oov_buckets: [Required] An integer indicating how many OOV buckets are
associated with the vocabulary.
old_vocab: [Required] A path to the old vocabulary file (used with the
checkpoint to be warm-started from).
old_vocab_size: [Optional] An integer indicating how many entries of the old
vocabulary were used in the creation of the checkpoint. If not provided,
the entire old vocabulary will be used.
backup_initializer: [Optional] A variable initializer used for variables
corresponding to new vocabulary entries and OOV. If not provided, these
entries will be zero-initialized.
axis: [Optional] Denotes what axis the vocabulary corresponds to. The
default, 0, corresponds to the most common use case (embeddings or
linear weights for binary classification / regression). An axis of 1
could be used for warm-starting output layers with class vocabularies.
Returns:
A `VocabInfo` which represents the vocabulary information for warm-starting.
Raises:
ValueError: `axis` is neither 0 or 1.
Example Usage:
```python
embeddings_vocab_info = tf.VocabInfo(
new_vocab='embeddings_vocab',
new_vocab_size=100,
num_oov_buckets=1,
old_vocab='pretrained_embeddings_vocab',
old_vocab_size=10000,
backup_initializer=tf.compat.v1.truncated_normal_initializer(
mean=0.0, stddev=(1 / math.sqrt(embedding_dim))),
axis=0)
softmax_output_layer_kernel_vocab_info = tf.VocabInfo(
new_vocab='class_vocab',
new_vocab_size=5,
num_oov_buckets=0, # No OOV for classes.
old_vocab='old_class_vocab',
old_vocab_size=8,
backup_initializer=tf.compat.v1.glorot_uniform_initializer(),
axis=1)
softmax_output_layer_bias_vocab_info = tf.VocabInfo(
new_vocab='class_vocab',
new_vocab_size=5,
num_oov_buckets=0, # No OOV for classes.
old_vocab='old_class_vocab',
old_vocab_size=8,
backup_initializer=tf.compat.v1.zeros_initializer(),
axis=0)
#Currently, only axis=0 and axis=1 are supported.
```
"""
def __new__(cls,
new_vocab,
new_vocab_size,
num_oov_buckets,
old_vocab,
old_vocab_size=-1,
backup_initializer=None,
axis=0):
if axis != 0 and axis != 1:
raise ValueError("The only supported values for the axis argument are 0 "
"and 1. Provided axis: {}".format(axis))
return super(VocabInfo, cls).__new__(
cls,
new_vocab,
new_vocab_size,
num_oov_buckets,
old_vocab,
old_vocab_size,
backup_initializer,
axis,
)
def _infer_var_name(var):
"""Returns name of the `var`.
Args:
var: A list. The list can contain either of the following:
(i) A single `Variable`
(ii) A single `ResourceVariable`
(iii) Multiple `Variable` objects which must be slices of the same larger
variable.
(iv) A single `PartitionedVariable`
Returns:
Name of the `var`
"""
name_to_var_dict = saveable_object_util.op_list_to_dict(var)
if len(name_to_var_dict) > 1:
raise TypeError("`var` = %s passed as arg violates the constraints. "
"name_to_var_dict = %s" % (var, name_to_var_dict))
return list(name_to_var_dict.keys())[0]
def _get_var_info(var, prev_tensor_name=None):
"""Helper method for standarizing Variable and naming.
Args:
var: Current graph's variable that needs to be warm-started (initialized).
Can be either of the following: (i) `Variable` (ii) `ResourceVariable`
(iii) list of `Variable`: The list must contain slices of the same larger
variable. (iv) `PartitionedVariable`
prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If
None, we lookup tensor with same name as given `var`.
Returns:
A tuple of the Tensor name and var.
"""
if checkpoint_utils._is_variable(var): # pylint: disable=protected-access
current_var_name = _infer_var_name([var])
elif (isinstance(var, list) and
all(checkpoint_utils._is_variable(v) for v in var)): # pylint: disable=protected-access
current_var_name = _infer_var_name(var)
elif isinstance(var, variables_lib.PartitionedVariable):
current_var_name = _infer_var_name([var])
var = var._get_variable_list() # pylint: disable=protected-access
else:
raise TypeError(
"var MUST be one of the following: a Variable, list of Variable or "
"PartitionedVariable, but is {}".format(type(var)))
if not prev_tensor_name:
# Assume tensor name remains the same.
prev_tensor_name = current_var_name
return prev_tensor_name, var
# pylint: disable=protected-access
# Accesses protected members of tf.Variable to reset the variable's internal
# state.
def _warm_start_var_with_vocab(var,
current_vocab_path,
current_vocab_size,
prev_ckpt,
prev_vocab_path,
previous_vocab_size=-1,
current_oov_buckets=0,
prev_tensor_name=None,
initializer=None,
axis=0):
"""Warm-starts given variable from `prev_tensor_name` tensor in `prev_ckpt`.
Use this method when the `var` is backed by vocabulary. This method stitches
the given `var` such that values corresponding to individual features in the
vocabulary remain consistent irrespective of changing order of the features
between old and new vocabularies.
Args:
var: Current graph's variable that needs to be warm-started (initialized).
Can be either of the following:
(i) `Variable`
(ii) `ResourceVariable`
(iii) list of `Variable`: The list must contain slices of the same larger
variable.
(iv) `PartitionedVariable`
current_vocab_path: Path to the vocab file used for the given `var`.
current_vocab_size: An `int` specifying the number of entries in the current
vocab.
prev_ckpt: A string specifying the directory with checkpoint file(s) or path
to checkpoint. The given checkpoint must have tensor with name
`prev_tensor_name` (if not None) or tensor with name same as given `var`.
prev_vocab_path: Path to the vocab file used for the tensor in `prev_ckpt`.
previous_vocab_size: If provided, will constrain previous vocab to the first
`previous_vocab_size` entries. -1 means use the entire previous vocab.
current_oov_buckets: An `int` specifying the number of out-of-vocabulary
buckets used for given `var`.
prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If
None, we lookup tensor with same name as given `var`.
initializer: Variable initializer to be used for missing entries. If None,
missing entries will be zero-initialized.
axis: Axis of the variable that the provided vocabulary corresponds to.
Raises:
ValueError: If required args are not provided.
"""
if not (current_vocab_path and current_vocab_size and prev_ckpt and
prev_vocab_path):
raise ValueError("Invalid args: Must provide all of [current_vocab_path, "
"current_vocab_size, prev_ckpt, prev_vocab_path}.")
if checkpoint_utils._is_variable(var):
var = [var]
elif (isinstance(var, list) and
all(checkpoint_utils._is_variable(v) for v in var)):
var = var
elif isinstance(var, variables_lib.PartitionedVariable):
var = var._get_variable_list()
else:
raise TypeError(
"var MUST be one of the following: a Variable, list of Variable or "
"PartitionedVariable, but is {}".format(type(var)))
if not prev_tensor_name:
# Assume tensor name remains the same.
prev_tensor_name = _infer_var_name(var)
total_v_first_axis = sum(v.get_shape().as_list()[0] for v in var)
for v in var:
v_shape = v.get_shape().as_list()
slice_info = v._get_save_slice_info()
partition_info = None
if slice_info:
partition_info = variable_scope._PartitionInfo(
full_shape=slice_info.full_shape, var_offset=slice_info.var_offset)
if axis == 0:
new_row_vocab_size = current_vocab_size
new_col_vocab_size = v_shape[1]
old_row_vocab_size = previous_vocab_size
old_row_vocab_file = prev_vocab_path
new_row_vocab_file = current_vocab_path
old_col_vocab_file = None
new_col_vocab_file = None
num_row_oov_buckets = current_oov_buckets
num_col_oov_buckets = 0
elif axis == 1:
# Note that we must compute this value across all partitions, whereas
# in the axis = 0 case, we can simply use v_shape[1] because we don't
# allow partitioning across axis = 1.
new_row_vocab_size = total_v_first_axis
new_col_vocab_size = current_vocab_size
old_row_vocab_size = -1
old_row_vocab_file = None
new_row_vocab_file = None
old_col_vocab_file = prev_vocab_path
new_col_vocab_file = current_vocab_path
num_row_oov_buckets = 0
num_col_oov_buckets = current_oov_buckets
else:
raise ValueError("The only supported values for the axis argument are 0 "
"and 1. Provided axis: {}".format(axis))
init = checkpoint_ops._load_and_remap_matrix_initializer(
ckpt_path=checkpoint_utils._get_checkpoint_filename(prev_ckpt),
old_tensor_name=prev_tensor_name,
new_row_vocab_size=new_row_vocab_size,
new_col_vocab_size=new_col_vocab_size,
old_row_vocab_size=old_row_vocab_size,
old_row_vocab_file=old_row_vocab_file,
new_row_vocab_file=new_row_vocab_file,
old_col_vocab_file=old_col_vocab_file,
new_col_vocab_file=new_col_vocab_file,
num_row_oov_buckets=num_row_oov_buckets,
num_col_oov_buckets=num_col_oov_buckets,
initializer=initializer)
new_init_val = ops.convert_to_tensor(
init(shape=v_shape, partition_info=partition_info))
v._initializer_op = state_ops.assign(v, new_init_val)
# pylint: enable=protected-access
def _get_grouped_variables(vars_to_warm_start):
"""Collects and groups (possibly partitioned) variables into a dictionary.
The variables can be provided explicitly through vars_to_warm_start, or they
are retrieved from collections (see below).
Args:
vars_to_warm_start: One of the following:
- A regular expression (string) that captures which variables to
warm-start (see tf.compat.v1.get_collection). This expression will
only consider variables in the TRAINABLE_VARIABLES collection.
- A list of strings, each representing a full variable name to warm-start.
These will consider variables in GLOBAL_VARIABLES collection.
- A list of Variables to warm-start.
- `None`, in which case all variables in TRAINABLE_VARIABLES will be used.
Returns:
A dictionary mapping variable names (strings) to lists of Variables.
Raises:
ValueError: If vars_to_warm_start is not a string, `None`, a list of
`Variables`, or a list of strings.
"""
if isinstance(vars_to_warm_start, str) or vars_to_warm_start is None:
# Both vars_to_warm_start = '.*' and vars_to_warm_start = None will match
# everything (in TRAINABLE_VARIABLES) here.
logging.info("Warm-starting variables only in TRAINABLE_VARIABLES.")
list_of_vars = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES, scope=vars_to_warm_start)
elif isinstance(vars_to_warm_start, list):
if all(isinstance(v, str) for v in vars_to_warm_start):
list_of_vars = []
for v in vars_to_warm_start:
list_of_vars += ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES, scope=v)
elif all(checkpoint_utils._is_variable(v) for v in vars_to_warm_start): # pylint: disable=protected-access
list_of_vars = vars_to_warm_start
else:
raise ValueError("If `vars_to_warm_start` is a list, it must be all "
"`Variable` or all `str`. Given types are {}".format(
[type(v) for v in vars_to_warm_start]))
else:
raise ValueError("`vars_to_warm_start must be a `list` or `str`. Given "
"type is {}".format(type(vars_to_warm_start)))
# We have to deal with partitioned variables, since get_collection flattens
# out the list.
grouped_variables = {}
for v in list_of_vars:
if not isinstance(v, list):
var_name = _infer_var_name([v])
else:
var_name = _infer_var_name(v)
grouped_variables.setdefault(var_name, []).append(v)
return grouped_variables
def _get_object_checkpoint_renames(path, variable_names):
"""Returns a dictionary mapping variable names to checkpoint keys.
The warm-starting utility expects variable names to match with the variable
names in the checkpoint. For object-based checkpoints, the variable names
and names in the checkpoint are different. Thus, for object-based checkpoints,
this function is used to obtain the map from variable names to checkpoint
keys.
Args:
path: path to checkpoint directory or file.
variable_names: list of variable names to load from the checkpoint.
Returns:
If the checkpoint is object-based, this function returns a map from variable
names to their corresponding checkpoint keys.
If the checkpoint is name-based, this returns an empty dict.
Raises:
ValueError: If the object-based checkpoint is missing variables.
"""
fname = checkpoint_utils._get_checkpoint_filename(path) # pylint: disable=protected-access
try:
names_to_keys = saver_lib.object_graph_key_mapping(fname)
except errors.NotFoundError:
# If an error is raised from `object_graph_key_mapping`, then the
# checkpoint is name-based. There are no renames, so return an empty dict.
return {}
missing_names = set(variable_names) - set(names_to_keys.keys())
if missing_names:
raise ValueError(
"Attempting to warm-start from an object-based checkpoint, but found "
"that the checkpoint did not contain values for all variables. The "
"following variables were missing: {}"
.format(missing_names))
return {name: names_to_keys[name] for name in variable_names}
@tf_export(v1=["train.warm_start"])
def warm_start(ckpt_to_initialize_from,
vars_to_warm_start=".*",
var_name_to_vocab_info=None,
var_name_to_prev_var_name=None):
"""Warm-starts a model using the given settings.
If you are using a tf.estimator.Estimator, this will automatically be called
during training.
Args:
ckpt_to_initialize_from: [Required] A string specifying the directory with
checkpoint file(s) or path to checkpoint from which to warm-start the
model parameters.
vars_to_warm_start: [Optional] One of the following:
- A regular expression (string) that captures which variables to
warm-start (see tf.compat.v1.get_collection). This expression will only
consider variables in the TRAINABLE_VARIABLES collection -- if you need
to warm-start non_TRAINABLE vars (such as optimizer accumulators or
batch norm statistics), please use the below option.
- A list of strings, each a regex scope provided to
tf.compat.v1.get_collection with GLOBAL_VARIABLES (please see
tf.compat.v1.get_collection). For backwards compatibility reasons,
this is separate from the single-string argument type.
- A list of Variables to warm-start. If you do not have access to the
`Variable` objects at the call site, please use the above option.
- `None`, in which case only TRAINABLE variables specified in
`var_name_to_vocab_info` will be warm-started.
Defaults to `'.*'`, which warm-starts all variables in the
TRAINABLE_VARIABLES collection. Note that this excludes variables such
as accumulators and moving statistics from batch norm.
var_name_to_vocab_info: [Optional] Dict of variable names (strings) to
`tf.estimator.VocabInfo`. The variable names should be "full" variables,
not the names of the partitions. If not explicitly provided, the variable
is assumed to have no (changes to) vocabulary.
var_name_to_prev_var_name: [Optional] Dict of variable names (strings) to
name of the previously-trained variable in `ckpt_to_initialize_from`. If
not explicitly provided, the name of the variable is assumed to be same
between previous checkpoint and current model. Note that this has no
effect on the set of variables that is warm-started, and only controls
name mapping (use `vars_to_warm_start` for controlling what variables to
warm-start).
Raises:
ValueError: If the WarmStartSettings contains prev_var_name or VocabInfo
configuration for variable names that are not used. This is to ensure
a stronger check for variable configuration than relying on users to
examine the logs.
"""
logging.info("Warm-starting from: {}".format(ckpt_to_initialize_from))
grouped_variables = _get_grouped_variables(vars_to_warm_start)
if var_name_to_vocab_info is None:
var_name_to_vocab_info = {}
if not var_name_to_prev_var_name:
# Detect whether the checkpoint is object-based, in which case the
# var_name_to_prev_var_name dictionary should map variable names to
# checkpoint keys. If the user has specified var_name_to_prev_var_name, we
# do not override it.
var_name_to_prev_var_name = _get_object_checkpoint_renames(
ckpt_to_initialize_from, grouped_variables.keys())
warmstarted_count = 0
# Keep track of which var_names in var_name_to_prev_var_name and
# var_name_to_vocab_info have been used. Err on the safer side by throwing an
# exception if any are unused by the end of the loop. It is easy to misname
# a variable during this configuration, in which case without this check, we
# would fail to warm-start silently.
prev_var_name_used = set()
vocab_info_used = set()
# Group the vocabless vars into one call to init_from_checkpoint.
vocabless_vars = {}
for var_name, variable in six.iteritems(grouped_variables):
prev_var_name = var_name_to_prev_var_name.get(var_name)
if prev_var_name:
prev_var_name_used.add(var_name)
vocab_info = var_name_to_vocab_info.get(var_name)
if vocab_info:
vocab_info_used.add(var_name)
warmstarted_count += 1
logging.debug(
"Warm-starting variable: {}; current_vocab: {} current_vocab_size: {}"
" prev_vocab: {} prev_vocab_size: {} current_oov: {} prev_tensor: {}"
" initializer: {}".format(
var_name, vocab_info.new_vocab, vocab_info.new_vocab_size,
vocab_info.old_vocab, (vocab_info.old_vocab_size if
vocab_info.old_vocab_size > 0 else "All"),
vocab_info.num_oov_buckets, prev_var_name or "Unchanged",
vocab_info.backup_initializer or "zero-initialized"))
_warm_start_var_with_vocab(
variable,
current_vocab_path=vocab_info.new_vocab,
current_vocab_size=vocab_info.new_vocab_size,
prev_ckpt=ckpt_to_initialize_from,
prev_vocab_path=vocab_info.old_vocab,
previous_vocab_size=vocab_info.old_vocab_size,
current_oov_buckets=vocab_info.num_oov_buckets,
prev_tensor_name=prev_var_name,
initializer=vocab_info.backup_initializer,
axis=vocab_info.axis)
else:
# For the special value of vars_to_warm_start = None,
# we only warm-start variables with explicitly specified vocabularies.
if vars_to_warm_start:
warmstarted_count += 1
logging.debug("Warm-starting variable: {}; prev_var_name: {}".format(
var_name, prev_var_name or "Unchanged"))
# Because we use a default empty list in grouped_variables, single
# unpartitioned variables will be lists here, which we rectify in order
# for init_from_checkpoint logic to work correctly.
if len(variable) == 1:
variable = variable[0]
prev_tensor_name, var = _get_var_info(variable, prev_var_name)
vocabless_vars[prev_tensor_name] = var
checkpoint_utils.init_from_checkpoint(ckpt_to_initialize_from, vocabless_vars)
prev_var_name_not_used = set(
var_name_to_prev_var_name.keys()) - prev_var_name_used
vocab_info_not_used = set(var_name_to_vocab_info.keys()) - vocab_info_used
logging.info("Warm-started %d variables.", warmstarted_count)
if prev_var_name_not_used:
raise ValueError(
"You provided the following variables in "
"var_name_to_prev_var_name that were not used: "
"{0}. Perhaps you misspelled them? Here is the list of viable "
"variable names: {1}".format(prev_var_name_not_used,
grouped_variables.keys()))
if vocab_info_not_used:
raise ValueError(
"You provided the following variables in "
"var_name_to_vocab_info that were not used: {0}. "
" Perhaps you misspelled them? Here is the list of viable variable "
"names: {1}".format(vocab_info_not_used, grouped_variables.keys()))
|
tensorflow-master
|
tensorflow/python/training/warm_starting_util.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
class SaverLargePartitionedVariableTest(test.TestCase):
# Need to do this in a separate test because of the amount of memory needed
# to run this test.
def testLargePartitionedVariables(self):
save_path = os.path.join(self.get_temp_dir(), "large_variable")
var_name = "my_var"
# Saving large partition variable.
with session.Session("", graph=ops.Graph()) as sess:
with ops.device("/cpu:0"):
# Create a partitioned variable which is larger than int32 size but
# split into smaller sized variables.
init = lambda shape, dtype, partition_info: constant_op.constant(
True, dtype, shape)
partitioned_var = list(variable_scope.get_variable(
var_name,
shape=[1 << 31],
partitioner=partitioned_variables.fixed_size_partitioner(4),
initializer=init,
dtype=dtypes.bool))
variables.global_variables_initializer().run()
save = saver.Saver(partitioned_var)
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/saver_large_partitioned_variable_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.learning.training_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.framework.test_util import TensorFlowTestCase
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import training_ops
class TrainingOpsTest(TensorFlowTestCase):
def _toType(self, dtype):
if dtype == np.float16:
return dtypes.float16
elif dtype == np.float32:
return dtypes.float32
elif dtype == np.float64:
return dtypes.float64
elif dtype == np.int32:
return dtypes.int32
elif dtype == np.int64:
return dtypes.int64
else:
assert False, (dtype)
def _testTypes(self, x, alpha, delta, use_gpu=None):
self.setUp()
with self.session(use_gpu=use_gpu):
var = variables.VariableV1(x)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
apply_sgd = training_ops.apply_gradient_descent(var, alpha, delta)
out = self.evaluate(apply_sgd)
self.assertShapeEqual(out, apply_sgd)
self.assertAllCloseAccordingToType(x - alpha * delta, out)
@test_util.run_v1_only("b/120545219")
def testApplyGradientDescent(self):
for (dtype, use_gpu) in itertools.product(
[np.float16, np.float32, np.float64], [False, True]):
x = np.arange(100).astype(dtype)
alpha = np.array(2.0).astype(dtype)
delta = np.arange(100).astype(dtype)
self._testTypes(x, alpha, delta, use_gpu)
def _testTypesForAdagrad(self, x, y, lr, grad, use_gpu=None):
self.setUp()
with self.session(use_gpu=use_gpu):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
apply_adagrad = training_ops.apply_adagrad(var, accum, lr, grad)
out = self.evaluate(apply_adagrad)
self.assertShapeEqual(out, apply_adagrad)
self.assertAllCloseAccordingToType(x - lr * grad * (y + grad * grad)**
(-0.5), out)
self.assertAllCloseAccordingToType(y + grad * grad, self.evaluate(accum))
def _testTypesForFtrl(self,
x,
y,
z,
lr,
grad,
use_gpu=None,
l1=0.0,
l2=0.0,
lr_power=-0.5):
self.setUp()
with self.session(use_gpu=use_gpu):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
linear = variables.VariableV1(z)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
apply_ftrl = training_ops.apply_ftrl(var, accum, linear, grad, lr, l1, l2,
lr_power)
out = self.evaluate(apply_ftrl)
self.assertShapeEqual(out, apply_ftrl)
accum_update = y + grad * grad
linear_update = z + grad - (accum_update**(-lr_power) - y**
(-lr_power)) / lr * x
quadratic = 1.0 / (accum_update**(lr_power) * lr) + 2 * l2
expected_out = np.array([(
np.sign(linear_update[i]) * l1 - linear_update[i]) / (quadratic[i]) if
np.abs(linear_update[i]) > l1 else 0.0
for i in range(linear_update.size)])
self.assertAllCloseAccordingToType(accum_update, self.evaluate(accum))
if x.dtype == np.float16:
# The calculations here really are not very precise in float16.
self.assertAllClose(
linear_update, self.evaluate(linear), rtol=2e-2, atol=2e-2)
self.assertAllClose(expected_out, out, rtol=2e-2, atol=2e-2)
elif x.dtype == np.float32:
# The calculations here not sufficiently precise in float32.
self.assertAllClose(
linear_update, self.evaluate(linear), rtol=1e-5, atol=1e-5)
self.assertAllClose(expected_out, out, rtol=1e-5, atol=1e-5)
else:
self.assertAllClose(linear_update, self.evaluate(linear))
self.assertAllClose(expected_out, out)
@test_util.run_v1_only("b/120545219")
def testApplyAdagrad(self):
for (dtype, use_gpu) in itertools.product(
[np.float16, np.float32, np.float64], [False, True]):
x = np.arange(100).astype(dtype)
y = np.arange(1, 101).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForAdagrad(x, y, lr, grad, use_gpu)
@test_util.run_v1_only("b/120545219")
def testApplyFtrl(self):
for dtype in [np.float16, np.float32, np.float64]:
x = np.arange(100).astype(dtype)
y = np.arange(1, 101).astype(dtype)
z = np.arange(102, 202).astype(dtype)
lr = np.array(2.0).astype(dtype)
l1 = np.array(3.0).astype(dtype)
l2 = np.array(4.0).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForFtrl(x, y, z, lr, grad, use_gpu=False, l1=l1, l2=l2)
def _testTypesForSparseAdagrad(self, x, y, lr, grad, indices):
self.setUp()
with self.session(use_gpu=False):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
sparse_apply_adagrad = training_ops.sparse_apply_adagrad(
var, accum, lr, grad,
constant_op.constant(indices, self._toType(indices.dtype)))
out = self.evaluate(sparse_apply_adagrad)
self.assertShapeEqual(out, sparse_apply_adagrad)
for (i, index) in enumerate(indices):
self.assertAllCloseAccordingToType(
x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i])**(-0.5),
self.evaluate(var)[index])
self.assertAllCloseAccordingToType(y[index] + grad[i] * grad[i],
self.evaluate(accum)[index])
def _testTypesForSparseFtrl(self,
x,
y,
z,
lr,
grad,
indices,
l1=0.0,
l2=0.0,
lr_power=-0.5):
self.setUp()
with self.session(use_gpu=False):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
linear = variables.VariableV1(z)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
sparse_apply_ftrl = training_ops.sparse_apply_ftrl(
var,
accum,
linear,
grad,
constant_op.constant(indices, self._toType(indices.dtype)),
lr,
l1,
l2,
lr_power=lr_power)
out = self.evaluate(sparse_apply_ftrl)
self.assertShapeEqual(out, sparse_apply_ftrl)
for (i, index) in enumerate(indices):
self.assertAllCloseAccordingToType(
x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i])**
(lr_power),
self.evaluate(var)[index])
self.assertAllCloseAccordingToType(y[index] + grad[i] * grad[i],
self.evaluate(accum)[index])
@test_util.run_v1_only("b/120545219")
def testSparseApplyAdagrad(self):
for (dtype, index_type) in itertools.product(
[np.float16, np.float32, np.float64], [np.int32, np.int64]):
x_val = [np.arange(10), np.arange(10, 20), np.arange(20, 30)]
y_val = [np.arange(1, 11), np.arange(11, 21), np.arange(21, 31)]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [np.arange(10), np.arange(10)]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseAdagrad(x, y, lr, grad, indices)
@test_util.run_v1_only("b/120545219")
def testSparseApplyAdagradDim1(self):
for (dtype, index_type) in itertools.product(
[np.float16, np.float32, np.float64], [np.int32, np.int64]):
x_val = [[1.0], [2.0], [3.0]]
y_val = [[4.0], [5.0], [6.0]]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [[1.5], [2.5]]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseAdagrad(x, y, lr, grad, indices)
@test_util.run_v1_only("b/120545219")
def testSparseApplyFtrlDim1(self):
for (dtype, index_type) in itertools.product(
[np.float16, np.float32, np.float64], [np.int32, np.int64]):
x_val = [[0.0], [0.0], [0.0]]
y_val = [[4.0], [5.0], [6.0]]
z_val = [[0.0], [0.0], [0.0]]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
z = np.array(z_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [[1.5], [2.5]]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseFtrl(x, y, z, lr, grad, indices)
@test_util.run_v1_only("b/120545219")
def testApplyAdam(self):
for dtype, use_gpu in itertools.product(
[np.float16, np.float32, np.float64], [False, True]):
var = np.arange(100).astype(dtype)
m = np.arange(1, 101).astype(dtype)
v = np.arange(101, 201).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForAdam(var, m, v, grad, use_gpu)
def _testTypesForAdam(self, var, m, v, grad, use_gpu):
self.setUp()
with self.session(use_gpu=use_gpu):
var_t = variables.VariableV1(var)
m_t = variables.VariableV1(m)
v_t = variables.VariableV1(v)
t = 1
beta1 = np.array(0.9, dtype=var.dtype)
beta2 = np.array(0.999, dtype=var.dtype)
beta1_power = beta1**t
beta2_power = beta2**t
lr = np.array(0.001, dtype=var.dtype)
epsilon = np.array(1e-8, dtype=var.dtype)
beta1_t = constant_op.constant(beta1, self._toType(var.dtype), [])
beta2_t = constant_op.constant(beta2, self._toType(var.dtype), [])
beta1_power_t = variables.VariableV1(beta1_power)
beta2_power_t = variables.VariableV1(beta2_power)
lr_t = constant_op.constant(lr, self._toType(var.dtype), [])
epsilon_t = constant_op.constant(epsilon, self._toType(var.dtype), [])
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(var, self.evaluate(var_t))
new_var, _, _ = self._adamUpdateNumpy(var, grad, t, m, v, lr, beta1,
beta2, epsilon)
apply_adam = training_ops.apply_adam(var_t, m_t, v_t, beta1_power_t,
beta2_power_t, lr_t, beta1_t,
beta2_t, epsilon_t, grad)
out = self.evaluate(apply_adam)
self.assertShapeEqual(out, apply_adam)
self.assertAllCloseAccordingToType(new_var, out)
def _adamUpdateNumpy(self, param, g_t, t, m, v, alpha, beta1, beta2, epsilon):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/training/training_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpoints tools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.tracking import util as trackable_utils
def _create_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
v1 = variable_scope.get_variable("var1", [1, 10])
v2 = variable_scope.get_variable("var2", [10, 10])
v3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
v4 = variable_scope.get_variable("var4", [9, 9])
sess.run(variables.global_variables_initializer())
v1_value, v2_value, v3_value, v4_value = sess.run([v1, v2, v3, v4])
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value, v2_value, v3_value, v4_value
def _create_partition_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
with variable_scope.variable_scope("scope"):
v1 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
sess.run(variables.global_variables_initializer())
v1_value = sess.run(v1._get_variable_list())
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value
class CheckpointsTest(test.TestCase):
def testNoCheckpoints(self):
checkpoint_dir = self.get_temp_dir() + "/no_checkpoints"
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), [])
def testNoTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var5"), [])
def testGetTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), v1)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var2"), v2)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var3"), v3)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "useful_scope/var4"), v4)
def testGetAllVariables(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_create_checkpoints(session, checkpoint_dir)
self.assertEqual(
checkpoint_utils.list_variables(checkpoint_dir),
[("useful_scope/var4", [9, 9]), ("var1", [1, 10]), ("var2", [10, 10]),
("var3", [100, 100])])
def testInitFromCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("my1", [1, 10])
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable("my2", [10, 10])
with variable_scope.variable_scope("other_useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
my3 = variable_scope.get_variable("my3", [100, 100])
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var1": "some_scope/my1",
"useful_scope/": "some_scope/some_other_scope/other_useful_scope/",
})
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var2": "some_scope/some_other_scope/my2",
"var3": my3,
})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
# Check that tensors are not explicitly in the graph.
self.assertLess(len(str(session.graph.as_graph_def())), 29000)
def testInitialValueComesFromCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope(
"some_scope", initializer=init_ops.zeros_initializer()):
my1 = variable_scope.get_variable("my1", [1, 10])
before = my1.initialized_value()
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {"var1": my1})
after = my1.initialized_value()
self.assertAllEqual(session.run(before), [[0.0] * 10])
self.assertAllEqual(session.run(after), v1)
session.run(variables.global_variables_initializer())
self.assertAllEqual(session.run(my1), v1)
self.assertAllEqual(session.run(my1.initialized_value()), v1)
self.assertAllClose(session.run(before), v1)
self.assertAllClose(session.run(after), v1)
with self.assertRaises(AssertionError):
self.assertAllClose(v1, [[0.0] * 10])
def testInitWithScopeDoesNotCaptureSuffixes(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, v4 = _create_checkpoints(session, checkpoint_dir)
with ops.Graph().as_default() as g:
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
with variable_scope.variable_scope("useful_scope_1"):
my5_init = [[1.0, 2.0], [3.0, 4.0]]
my5 = variable_scope.get_variable("var5", initializer=my5_init)
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope/": "useful_scope/"})
with self.session(graph=g) as session:
session.run(variables.global_variables_initializer())
self.assertAllEqual(my4.eval(session), v4)
self.assertAllEqual(my5.eval(session), my5_init)
def testRestoreRunsOnSameDevice(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_create_checkpoints(session, checkpoint_dir)
with ops.Graph().as_default():
with ops.device("/job:ps"):
with variable_scope.variable_scope("useful_scope"):
variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope/": "useful_scope/"})
def testInitFromRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("var1", [1, 10])
my2 = variable_scope.get_variable("var2", [10, 10])
my3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"/": "some_scope/",})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
def testInitToRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
my1 = variable_scope.get_variable("var1", [1, 10])
my2 = variable_scope.get_variable("var2", [10, 10])
my3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"/": "/",})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
def testInitFromPartitionVar(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1 = _create_partition_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.zeros_initializer(),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
# Create another variable with different partitions than the variable in
# the checkpoint.
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.zeros_initializer(),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=16 << 10))
my2_var_list = my2._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"scope/var1": "some_scope/my1",
"scope/": "some_other_scope/"})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
my2_values = session.run(my2_var_list)
# Verify we created different number of partitions.
self.assertNotEquals(len(my2_values), len(v1))
# Verify the values were correctly initialized inspite of different
# partitions.
full_my2_values = np.concatenate(my2_values, axis=0)
full_v1_values = np.concatenate(v1, axis=0)
self.assertAllEqual(full_my2_values, full_v1_values)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"scope/var1": my1_var_list,})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
def testInitFromCheckpointMissing(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
_ = variable_scope.get_variable("my1", [10, 10])
_ = variable_scope.get_variable(
"my2", [1, 10],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer())
# No directory.
with self.assertRaises(errors_impl.OpError):
checkpoint_utils.init_from_checkpoint("no_dir",
{"var1": "some_scope/my1"})
# No variable in checkpoint.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"no_var": "some_scope/my1"})
# No variable in the graph.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"var3": "some_scope/no_var"})
# Shape mismatch.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"var1": "some_scope/my1"})
# Variable 'my1' and 'my2' are missing in given checkpoint scope.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(
checkpoint_dir, {"useful_scope/": "some_scope/"})
# Mapping is not to scope name.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope": "some_scope/"})
def testNoAdditionalReadOpsForResourceVariables(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
my1 = resource_variable_ops.ResourceVariable([[0.0] * 10], name="my1")
with ops.name_scope("init_from_checkpoint"):
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {"var1": my1})
# Basic sanity checks:
session.run(variables.global_variables_initializer())
self.assertAllEqual(session.run(my1), v1)
ops_in_init_from_checkpoint_scope = [
op for op in g.get_operations()
if (op.name.startswith("init_from_checkpoint/") and
not op.name.startswith("init_from_checkpoint/checkpoint_initializer"
) and
op.type != "AssignVariableOp" and
op.type != "Identity")
]
self.assertEqual(ops_in_init_from_checkpoint_scope, [])
class CheckpointIteratorTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testReturnsEmptyIfNoCheckpointsFound(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "no_checkpoints_found")
num_found = 0
for _ in checkpoint_utils.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 0)
@test_util.run_in_graph_and_eager_modes
def testReturnsSingleCheckpointIfOneCheckpointFound(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "one_checkpoint_found")
if not gfile.Exists(checkpoint_dir):
gfile.MakeDirs(checkpoint_dir)
save_path = os.path.join(checkpoint_dir, "model.ckpt")
a = resource_variable_ops.ResourceVariable(5)
self.evaluate(a.initializer)
checkpoint = trackable_utils.Checkpoint(a=a)
checkpoint.save(file_prefix=save_path)
num_found = 0
for _ in checkpoint_utils.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 1)
@test_util.run_v1_only("Tests v1-style checkpoint sharding")
def testReturnsSingleCheckpointIfOneShardedCheckpoint(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
"one_checkpoint_found_sharded")
if not gfile.Exists(checkpoint_dir):
gfile.MakeDirs(checkpoint_dir)
global_step = variables.Variable(0, name="v0")
# This will result in 3 different checkpoint shard files.
with ops.device("/cpu:0"):
variables.Variable(10, name="v1")
with ops.device("/cpu:1"):
variables.Variable(20, name="v2")
saver = saver_lib.Saver(sharded=True)
with session_lib.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as session:
session.run(variables.global_variables_initializer())
save_path = os.path.join(checkpoint_dir, "model.ckpt")
saver.save(session, save_path, global_step=global_step)
num_found = 0
for _ in checkpoint_utils.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 1)
@test_util.run_in_graph_and_eager_modes
def testTimeoutFn(self):
timeout_fn_calls = [0]
def timeout_fn():
timeout_fn_calls[0] += 1
return timeout_fn_calls[0] > 3
results = list(
checkpoint_utils.checkpoints_iterator(
"/non-existent-dir", timeout=0.1, timeout_fn=timeout_fn))
self.assertEqual([], results)
self.assertEqual(4, timeout_fn_calls[0])
@test_util.run_all_in_graph_and_eager_modes
class WaitForNewCheckpointTest(test.TestCase):
def testReturnsNoneAfterTimeout(self):
start = time.time()
ret = checkpoint_utils.wait_for_new_checkpoint(
"/non-existent-dir", "foo", timeout=1.0, seconds_to_sleep=0.5)
end = time.time()
self.assertIsNone(ret)
# We've waited one second.
self.assertGreater(end, start + 0.5)
# The timeout kicked in.
self.assertLess(end, start + 1.1)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/checkpoint_utils_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensorboard_logging provides logging that is also written to the events file.
Any messages logged via this module will be logged both via the platform logging
mechanism and to the SummaryWriter set via `set_summary_writer`. This is useful
for logging messages that you might want to be visible from inside TensorBoard
or that should be permanently associated with the training session.
You can use this just like the logging module:
>>> tensorboard_logging.set_summary_writer(summary_writer)
>>> tensorboard_logging.info("my %s", "message")
>>> tensorboard_logging.log(tensorboard_logging.WARN, "something")
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.core.util import event_pb2
from tensorflow.python.platform import tf_logging as logging
DEBUG = 'DEBUG'
INFO = 'INFO'
WARN = 'WARN'
ERROR = 'ERROR'
FATAL = 'FATAL'
# Messages with levels below this verbosity will not be logged.
_verbosity = WARN
# A value meaning 'not set yet' so we can use None to mean 'user actively told
# us they don't want a SummaryWriter'.
_sentinel_summary_writer = object()
# The SummaryWriter instance to use when logging, or None to not log, or
# _sentinel_summary_writer to indicate that the user hasn't called
# set_summary_writer yet.
_summary_writer = _sentinel_summary_writer
# Map from the tensorboard_logging logging enum values to the proto's enum
# values.
_LEVEL_PROTO_MAP = {
DEBUG: event_pb2.LogMessage.DEBUGGING,
INFO: event_pb2.LogMessage.INFO,
WARN: event_pb2.LogMessage.WARN,
ERROR: event_pb2.LogMessage.ERROR,
FATAL: event_pb2.LogMessage.FATAL,
}
# Map from the tensorboard_logging module levels to the logging module levels.
_PLATFORM_LOGGING_LEVEL_MAP = {
DEBUG: logging.DEBUG,
INFO: logging.INFO,
WARN: logging.WARN,
ERROR: logging.ERROR,
FATAL: logging.FATAL
}
def get_verbosity():
return _verbosity
def set_verbosity(verbosity):
_check_verbosity(verbosity)
global _verbosity
_verbosity = verbosity
def _check_verbosity(verbosity):
if verbosity not in _LEVEL_PROTO_MAP:
raise ValueError('Level %s is not a valid tensorboard_logging level' %
verbosity)
def set_summary_writer(summary_writer):
"""Sets the summary writer that events will be logged to.
Calling any logging methods inside this module without calling this method
will fail. If you don't want to log, call `set_summary_writer(None)`.
Args:
summary_writer: Either a SummaryWriter or None. None will cause messages not
to be logged to any SummaryWriter, but they will still be passed to the
platform logging module.
"""
global _summary_writer
_summary_writer = summary_writer
def _clear_summary_writer():
"""Makes all subsequent log invocations error.
This is only used for testing. If you want to disable TensorBoard logging,
call `set_summary_writer(None)` instead.
"""
global _summary_writer
_summary_writer = _sentinel_summary_writer
def log(level, message, *args):
"""Conditionally logs `message % args` at the level `level`.
Note that tensorboard_logging verbosity and logging verbosity are separate;
the message will always be passed through to the logging module regardless of
whether it passes the tensorboard_logging verbosity check.
Args:
level: The verbosity level to use. Must be one of
tensorboard_logging.{DEBUG, INFO, WARN, ERROR, FATAL}.
message: The message template to use.
*args: Arguments to interpolate to the message template, if any.
Raises:
ValueError: If `level` is not a valid logging level.
RuntimeError: If the `SummaryWriter` to use has not been set.
"""
if _summary_writer is _sentinel_summary_writer:
raise RuntimeError('Must call set_summary_writer before doing any '
'logging from tensorboard_logging')
_check_verbosity(level)
proto_level = _LEVEL_PROTO_MAP[level]
if proto_level >= _LEVEL_PROTO_MAP[_verbosity]:
log_message = event_pb2.LogMessage(level=proto_level,
message=message % args)
event = event_pb2.Event(wall_time=time.time(), log_message=log_message)
if _summary_writer:
_summary_writer.add_event(event)
logging.log(_PLATFORM_LOGGING_LEVEL_MAP[level], message, *args)
def debug(message, *args):
log(DEBUG, message, *args)
def info(message, *args):
log(INFO, message, *args)
def warn(message, *args):
log(WARN, message, *args)
def error(message, *args):
log(ERROR, message, *args)
def fatal(message, *args):
log(FATAL, message, *args)
|
tensorflow-master
|
tensorflow/python/training/tensorboard_logging.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions for evaluation and summarization of metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
def _get_or_create_eval_step():
"""Gets or creates the eval step `Tensor`.
Returns:
A `Tensor` representing a counter for the evaluation step.
Raises:
ValueError: If multiple `Tensors` have been added to the
`tf.GraphKeys.EVAL_STEP` collection.
"""
graph = ops.get_default_graph()
eval_steps = graph.get_collection(ops.GraphKeys.EVAL_STEP)
if len(eval_steps) == 1:
return eval_steps[0]
elif len(eval_steps) > 1:
raise ValueError('Multiple tensors added to tf.GraphKeys.EVAL_STEP')
else:
counter = variable_scope.get_variable(
'eval_step',
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.EVAL_STEP])
return counter
def _get_latest_eval_step_value(update_ops):
"""Gets the eval step `Tensor` value after running `update_ops`.
Args:
update_ops: A list of `Tensors` or a dictionary of names to `Tensors`, which
are run before reading the eval step value.
Returns:
A `Tensor` representing the value for the evaluation step.
"""
if isinstance(update_ops, dict):
update_ops = list(update_ops.values())
with ops.control_dependencies(update_ops):
return array_ops.identity(_get_or_create_eval_step().read_value())
class _MultiStepStopAfterNEvalsHook(session_run_hook.SessionRunHook):
"""Run hook used by the evaluation routines to run the `eval_ops` N times."""
def __init__(self, num_evals, steps_per_run=1):
"""Constructs the run hook.
Args:
num_evals: The number of evaluations to run for. if set to None, will
iterate the dataset until all inputs are exhausted.
steps_per_run: Number of steps executed per run call.
"""
self._num_evals = num_evals
self._evals_completed = None
self._steps_per_run_initial_value = steps_per_run
def _set_evals_completed_tensor(self, updated_eval_step):
self._evals_completed = updated_eval_step
def begin(self):
self._steps_per_run_variable = \
basic_session_run_hooks.get_or_create_steps_per_run_variable()
def after_create_session(self, session, coord):
# Update number of steps to run in the first run call
if self._num_evals is None:
steps = self._steps_per_run_initial_value
else:
steps = min(self._steps_per_run_initial_value, self._num_evals)
self._steps_per_run_variable.load(steps, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(
{'evals_completed': self._evals_completed})
def after_run(self, run_context, run_values):
evals_completed = run_values.results['evals_completed']
# Update number of steps to run in the next iteration
if self._num_evals is None:
steps = self._steps_per_run_initial_value
else:
steps = min(self._num_evals - evals_completed,
self._steps_per_run_initial_value)
self._steps_per_run_variable.load(steps, session=run_context.session)
if self._num_evals is None:
logging.info('Evaluation [%d]', evals_completed)
else:
logging.info('Evaluation [%d/%d]', evals_completed, self._num_evals)
if self._num_evals is not None and evals_completed >= self._num_evals:
run_context.request_stop()
class _StopAfterNEvalsHook(session_run_hook.SessionRunHook):
"""Run hook used by the evaluation routines to run the `eval_ops` N times."""
def __init__(self, num_evals, log_progress=True):
"""Constructs the run hook.
Args:
num_evals: The number of evaluations to run for. if set to None, will
iterate the dataset until all inputs are exhausted.
log_progress: Whether to log evaluation progress, defaults to True.
"""
# The number of evals to run for.
self._num_evals = num_evals
self._evals_completed = None
self._log_progress = log_progress
# Reduce logging frequency if there are 20 or more evaluations.
self._log_frequency = (1 if (num_evals is None or num_evals < 20) else
math.floor(num_evals / 10.))
def _set_evals_completed_tensor(self, updated_eval_step):
self._evals_completed = updated_eval_step
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(
{'evals_completed': self._evals_completed})
def after_run(self, run_context, run_values):
evals_completed = run_values.results['evals_completed']
if self._log_progress:
if self._num_evals is None:
logging.info('Evaluation [%d]', evals_completed)
else:
if ((evals_completed % self._log_frequency) == 0 or
(self._num_evals == evals_completed)):
logging.info('Evaluation [%d/%d]', evals_completed, self._num_evals)
if self._num_evals is not None and evals_completed >= self._num_evals:
run_context.request_stop()
def _evaluate_once(checkpoint_path,
master='',
scaffold=None,
eval_ops=None,
feed_dict=None,
final_ops=None,
final_ops_feed_dict=None,
hooks=None,
config=None):
"""Evaluates the model at the given checkpoint path.
During a single evaluation, the `eval_ops` is run until the session is
interrupted or requested to finish. This is typically requested via a
`tf.contrib.training.StopAfterNEvalsHook` which results in `eval_ops` running
the requested number of times.
Optionally, a user can pass in `final_ops`, a single `Tensor`, a list of
`Tensors` or a dictionary from names to `Tensors`. The `final_ops` is
evaluated a single time after `eval_ops` has finished running and the fetched
values of `final_ops` are returned. If `final_ops` is left as `None`, then
`None` is returned.
One may also consider using a `tf.contrib.training.SummaryAtEndHook` to record
summaries after the `eval_ops` have run. If `eval_ops` is `None`, the
summaries run immediately after the model checkpoint has been restored.
Note that `evaluate_once` creates a local variable used to track the number of
evaluations run via `tf.contrib.training.get_or_create_eval_step`.
Consequently, if a custom local init op is provided via a `scaffold`, the
caller should ensure that the local init op also initializes the eval step.
Args:
checkpoint_path: The path to a checkpoint to use for evaluation.
master: The BNS address of the TensorFlow master.
scaffold: An tf.compat.v1.train.Scaffold instance for initializing variables
and restoring variables. Note that `scaffold.init_fn` is used by the
function to restore the checkpoint. If you supply a custom init_fn, then
it must also take care of restoring the model from its checkpoint.
eval_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names to
`Tensors`, which is run until the session is requested to stop, commonly
done by a `tf.contrib.training.StopAfterNEvalsHook`.
feed_dict: The feed dictionary to use when executing the `eval_ops`.
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when evaluating `final_ops`.
hooks: List of `tf.estimator.SessionRunHook` callbacks which are run inside
the evaluation loop.
config: An instance of `tf.compat.v1.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
Returns:
The fetched values of `final_ops` or `None` if `final_ops` is `None`.
"""
eval_step = _get_or_create_eval_step()
# Prepare the run hooks.
hooks = list(hooks or [])
if eval_ops is not None:
if any(isinstance(h, _MultiStepStopAfterNEvalsHook) for h in hooks):
steps_per_run_variable = \
basic_session_run_hooks.get_or_create_steps_per_run_variable()
update_eval_step = state_ops.assign_add(
eval_step,
math_ops.cast(steps_per_run_variable, dtype=eval_step.dtype),
use_locking=True)
else:
update_eval_step = state_ops.assign_add(eval_step, 1, use_locking=True)
if isinstance(eval_ops, dict):
eval_ops['update_eval_step'] = update_eval_step
elif isinstance(eval_ops, (tuple, list)):
eval_ops = list(eval_ops) + [update_eval_step]
else:
eval_ops = [eval_ops, update_eval_step]
eval_step_value = _get_latest_eval_step_value(eval_ops)
for h in hooks:
if isinstance(h, (_StopAfterNEvalsHook, _MultiStepStopAfterNEvalsHook)):
h._set_evals_completed_tensor(eval_step_value) # pylint: disable=protected-access
logging.info('Starting evaluation at ' +
time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime()))
# Prepare the session creator.
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master=master,
config=config)
final_ops_hook = basic_session_run_hooks.FinalOpsHook(final_ops,
final_ops_feed_dict)
hooks.append(final_ops_hook)
with monitored_session.MonitoredSession(
session_creator=session_creator, hooks=hooks) as session:
if eval_ops is not None:
while not session.should_stop():
session.run(eval_ops, feed_dict)
logging.info('Finished evaluation at ' +
time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime()))
return final_ops_hook.final_ops_values
|
tensorflow-master
|
tensorflow/python/training/evaluation.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for aggregate operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
class AdagradOptimizerTest(test.TestCase):
def doTestBasic(self,
use_locking=False,
use_resource=False,
use_callable_params=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
if use_resource:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 3.0
if not use_callable_params:
learning_rate = learning_rate()
ada_opt = adagrad.AdagradOptimizer(
learning_rate, initial_accumulator_value=0.1, use_locking=use_locking)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for _ in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Validate updated params
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), v0_val)
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), v1_val)
def testBasic(self):
self.doTestBasic(use_locking=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testBasicResource(self):
self.doTestBasic(use_locking=False, use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(
use_locking=False, use_resource=True, use_callable_params=True)
def testBasicLocked(self):
self.doTestBasic(use_locking=True)
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0], [3.0, 4.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = adagrad.AdagradOptimizer(1.0).minimize(loss)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0], [3.0, 4.0]],
self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[0, 1], [3, 4]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(
constant_op.constant(3.0), initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([[1.0], [2.0]], self.evaluate(var0))
self.assertAllClose([[3.0], [4.0]], self.evaluate(var1))
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([[-1.6026098728179932], [2.0]]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([[3.0], [3.715679168701172]]), self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
@test_util.run_deprecated_v1
def testSparseRepeatedIndicesResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var_repeated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_repeated = math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_repeated, [0, 0]))
var_aggregated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_aggregated = 2 * math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_aggregated, [0]))
update_op_repeated = adagrad.AdagradOptimizer(
2.0).minimize(loss_repeated)
update_op_aggregated = adagrad.AdagradOptimizer(
2.0).minimize(loss_aggregated)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(
self.evaluate(var_repeated), self.evaluate(var_aggregated))
for _ in range(3):
update_op_repeated.run()
update_op_aggregated.run()
self.assertAllCloseAccordingToType(
self.evaluate(var_repeated), self.evaluate(var_aggregated))
@test_util.run_deprecated_v1
def testSparseStability(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
shape = [1, 6]
var0 = variables.Variable(
[[
0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257,
-0.0105945
]],
dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[[
-5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05,
-8.4877e-05, -9.48906e-05
]],
shape=shape,
dtype=dtype),
constant_op.constant([0]),
constant_op.constant(shape))
ada_opt = adagrad.AdagradOptimizer(1.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
init = variables.global_variables_initializer()
for _ in range(100):
init.run()
ada_update.run()
self.assertAllCloseAccordingToType(
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([[
0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573,
-0.01029443
]]), self.evaluate(var0))
@test_util.run_deprecated_v1
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1))
@test_util.run_v1_only("b/120545219")
def testDynamicShapeVariable_Ok(self):
with self.cached_session():
v = variable_scope.get_variable("v", initializer=constant_op.constant(1.),
validate_shape=False)
self.assertFalse(v.shape.is_fully_defined())
# Creating optimizer should cause no exception.
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
@test_util.run_v1_only("b/120545219")
def testDynamicShapeVariableWithCallableInit(self):
var0 = variable_scope.get_variable("var0",
initializer=constant_op.constant(1.),
validate_shape=False)
self.assertFalse(var0.shape.is_fully_defined())
grads0 = constant_op.constant(0.1, dtype=dtypes.float32)
learning_rate = lambda: 3.0
ada_opt = adagrad.AdagradOptimizer(
learning_rate, initial_accumulator_value=0.1, use_locking=True)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0], [var0]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val = self.evaluate([var0])
self.assertAllClose([1.0], v0_val)
# Run 3 steps of adagrad
for _ in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0], [var0]))
# Validate updated params
v0_val = self.evaluate([var0])
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932]), v0_val)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/adagrad_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training.input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test as test_lib
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import input as inp
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.util import compat
class MatchFilenamesOnceTest(test_lib.TestCase):
@test_util.run_deprecated_v1
def test(self):
temp_dir = self.get_temp_dir()
filenames = [os.path.join(temp_dir, n) for n in os.listdir(temp_dir)]
additional = [
os.path.join(self.get_temp_dir(), "match_filenames.%d" % i)
for i in range(3)
]
for name in additional:
open(name, "w").write("Some contents")
filenames = list(set(filenames + additional))
with self.cached_session():
star = inp.match_filenames_once(os.path.join(self.get_temp_dir(), "*"))
question = inp.match_filenames_once(
os.path.join(self.get_temp_dir(), "match_filenames.?"))
one = inp.match_filenames_once(additional[1])
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
self.assertItemsEqual(
map(compat.as_bytes, filenames), self.evaluate(star))
self.assertItemsEqual(
map(compat.as_bytes, additional), self.evaluate(question))
self.assertItemsEqual([compat.as_bytes(additional[1])],
self.evaluate(one))
class LimitEpochsTest(test_lib.TestCase):
@test_util.run_deprecated_v1
def testNoLimit(self):
with self.cached_session():
seven = constant_op.constant(7)
seven_forever = inp.limit_epochs(seven)
variables.local_variables_initializer().run()
for _ in range(100):
self.assertEqual(7, self.evaluate(seven_forever))
@test_util.run_deprecated_v1
def testLimit(self):
with self.cached_session():
love_me = constant_op.constant("Love Me")
love_me_two_times = inp.limit_epochs(love_me, num_epochs=2)
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
self.assertEqual(b"Love Me", self.evaluate(love_me_two_times))
self.assertEqual(b"Love Me", self.evaluate(love_me_two_times))
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(love_me_two_times)
class InputProducerTest(test_lib.TestCase):
@test_util.run_deprecated_v1
def testNoShuffle(self):
with self.cached_session():
input_tensor = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
num_epochs = 2
queue = inp.input_producer(
input_tensor, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(input_tensor) * num_epochs)
dequeue = queue.dequeue()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
self.assertAllEqual(input_tensor * num_epochs,
self.evaluate(dequeue_many))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testNoShapeInference(self):
with self.cached_session():
# Disable shape inference for the input.
input_value = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
input_tensor = array_ops.placeholder_with_default(input_value, shape=None)
num_epochs = 2
queue = inp.input_producer(
input_tensor, element_shape=[4], num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(input_value) * num_epochs)
dequeue = queue.dequeue()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
self.assertAllEqual(input_value * num_epochs, self.evaluate(dequeue_many))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testShapeError(self):
input_tensor = array_ops.placeholder(dtypes.float32, None)
with self.assertRaisesRegexp(ValueError, "fully defined shape"):
_ = inp.input_producer(input_tensor)
class StringInputProducerTest(test_lib.TestCase):
@test_util.run_deprecated_v1
def testNoShuffle(self):
with self.cached_session():
strings = [b"to", b"be", b"or", b"not", b"to", b"be"]
num_epochs = 3
queue = inp.string_input_producer(
strings, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(strings) * num_epochs)
dequeue = queue.dequeue()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
output = self.evaluate(dequeue_many)
self.assertAllEqual(strings * num_epochs, output)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testShuffle(self):
with self.cached_session():
strings = [b"a", b"b", b"c"]
num_epochs = 600
queue = inp.string_input_producer(
strings, num_epochs=num_epochs, shuffle=True, seed=271828)
dequeue_many = queue.dequeue_many(len(strings))
dequeue = queue.dequeue()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Validate that we only shuffle the strings within an epoch and
# count how often each possible order appears.
expected = [b"abc", b"acb", b"bac", b"bca", b"cab", b"cba"]
frequency = {}
for e in expected:
frequency[e] = 0
for _ in range(num_epochs):
output = self.evaluate(dequeue_many)
key = b"".join(output)
self.assertIn(key, expected)
frequency[key] += 1
# Expect an approximately even distribution over all possible orders.
expected_frequency = num_epochs / len(expected)
margin = expected_frequency * 0.4
tf_logging.info("Observed counts: %s", frequency)
for key in expected:
value = frequency[key]
self.assertGreater(value, expected_frequency - margin)
self.assertLess(value, expected_frequency + margin)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
for thread in threads:
thread.join()
def testNullStringPython(self):
# Graph-construction time check for empty string list:
with self.cached_session():
with self.assertRaises(ValueError):
_ = inp.string_input_producer([])
@test_util.run_deprecated_v1
def testNullString(self):
# Runtime check for empty string list. This is slightly oblique:
# The queue runner should die with an assertion error on the null
# input tensor, causing the dequeue to fail with an OutOfRangeError.
with self.cached_session():
coord = coordinator.Coordinator()
queue = inp.string_input_producer(
constant_op.constant(
[], dtype=dtypes.string))
dequeue = queue.dequeue()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners(coord=coord)
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
coord.request_stop()
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testSharedName(self):
with self.cached_session():
strings = [b"to", b"be", b"or", b"not", b"to", b"be"]
queue = inp.string_input_producer(
strings, shared_name="SHARED_NAME_XYZ", name="Q")
self.assertProtoEquals("s: 'SHARED_NAME_XYZ'",
queue.queue_ref.op.node_def.attr["shared_name"])
@test_util.run_deprecated_v1
def testConstructionRace(self):
with self.cached_session() as sess:
strings = [b"to", b"be", b"or", b"not", b"to", b"be"]
queue = inp.string_input_producer(strings, shuffle=False)
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(2):
for string in strings:
# NOTE(mrry): This is not the recommended way to write
# dequeuing code (instead you should create a single dequeue
# op before starting the queue runners, and run it
# repeatedly), because it leads to concurrent reading and
# writing of the `tf.Graph` object. However, many users
# write code this way, so we include this test to ensure
# that we can support it.
self.assertEquals(string, self.evaluate(queue.dequeue()))
coord.request_stop()
coord.join(threads)
class RangeInputProducerTest(test_lib.TestCase):
@test_util.run_deprecated_v1
def testNoShuffle(self):
with self.cached_session():
num_epochs = 3
range_size = 5
queue = inp.range_input_producer(
range_size, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(range_size * num_epochs)
dequeue = queue.dequeue()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
output = self.evaluate(dequeue_many)
self.assertAllEqual(list(xrange(range_size)) * num_epochs, output)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testShuffle(self):
with self.cached_session():
num_epochs = 200
range_size = 2
queue = inp.range_input_producer(
range_size, num_epochs=num_epochs, shuffle=True, seed=314159)
dequeue_many = queue.dequeue_many(range_size)
dequeue = queue.dequeue()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Validate that we only shuffle the integers within an epoch and
# count how often each possible order appears.
expected = [12, 21]
frequency = {}
for e in expected:
frequency[e] = 0
for _ in range(num_epochs):
output = self.evaluate(dequeue_many)
key = 10 * (output[0] + 1) + (output[1] + 1)
self.assertIn(key, expected)
frequency[key] += 1
# Expect an approximately even distribution over all possible orders.
expected_frequency = num_epochs / len(expected)
margin = expected_frequency * 0.4
tf_logging.info("Observed counts: %s", frequency)
for key in expected:
value = frequency[key]
self.assertGreater(value, expected_frequency - margin)
self.assertLess(value, expected_frequency + margin)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testSharedName(self):
with self.cached_session():
range_size = 5
queue = inp.range_input_producer(
range_size, shared_name="SHARED_NAME_XYZ", name="Q")
self.assertProtoEquals("s: 'SHARED_NAME_XYZ'",
queue.queue_ref.op.node_def.attr["shared_name"])
class SliceInputProducerTest(test_lib.TestCase):
@test_util.run_deprecated_v1
def testNoShuffle(self):
with self.cached_session() as sess:
num_epochs = 3
source_strings = [b"Alpha", b"Beta", b"Delta", b"Gamma"]
source_ints = [2, 3, 5, 7]
slices = inp.slice_input_producer(
[source_strings, source_ints], num_epochs=num_epochs, shuffle=False)
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
num_items = len(source_strings) * num_epochs
output = [self.evaluate(slices) for _ in range(num_items)]
out_strings, out_ints = zip(*output)
self.assertAllEqual(source_strings * num_epochs, out_strings)
self.assertAllEqual(source_ints * num_epochs, out_ints)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(slices)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testShuffle(self):
with self.cached_session() as sess:
num_epochs = 1200
source_strings = ["A", "B", "D", "G"]
source_ints = [7, 3, 5, 2]
slices = inp.slice_input_producer(
[source_strings, source_ints],
num_epochs=num_epochs,
shuffle=True,
seed=161803)
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Validate that we only shuffle the integers within an epoch and
# count how often each possible order appears.
expected = [
b",".join(x)
for x in itertools.permutations([b"A7", b"B3", b"D5", b"G2"])
]
frequency = {}
for e in expected:
frequency[e] = 0
for _ in range(num_epochs):
output = [self.evaluate(slices) for _ in range(len(source_strings))]
key = b",".join([s + compat.as_bytes(str(i)) for s, i in output])
self.assertIn(key, expected)
frequency[key] += 1
# Expect an approximately even distribution over all possible orders.
expected_frequency = num_epochs / len(expected)
margin = expected_frequency * 0.4
tf_logging.info("Observed counts: %s", frequency)
for key in expected:
value = frequency[key]
self.assertGreater(value, expected_frequency - margin)
self.assertLess(value, expected_frequency + margin)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(slices)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testSharedName(self):
with self.cached_session():
source_strings = ["A", "B", "D", "G"]
source_ints = [7, 3, 5, 2]
slices = inp.slice_input_producer(
[source_strings, source_ints],
shared_name="SHARED_NAME_XYZ",
name="sip")
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
slices[0].op.inputs[1].op.inputs[0].op.node_def.attr["shared_name"])
class DictHelperTest(test_lib.TestCase):
def testListInputs(self):
l = [1, 2, 3, 11, 22, 33]
l2 = inp._as_tensor_list(l)
self.assertEquals(l, l2)
l3 = inp._as_original_type(l, l2)
self.assertEquals(l, l3)
def testDictInputs(self):
d = {"a": 1, "b": 2, "c": 3, "aa": 11, "bb": 22, "cc": 33}
l = inp._as_tensor_list(d)
self.assertEquals([1, 11, 2, 22, 3, 33], l)
d2 = inp._as_original_type(d, l)
self.assertEquals(d, d2)
def testHeterogeneousKeysDictInputs(self):
d = {"z": 1, 1: 42, ("a", "b"): 100}
l = inp._as_tensor_list(d)
self.assertEquals([100, 42, 1], l)
d2 = inp._as_original_type(d, l)
self.assertEquals(d, d2)
class BatchTest(test_lib.TestCase):
def _testOneThreadHelper(self, use_dict):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(
array_ops.stack([zero64, zero64 + 1]), [2, 1]),
values=math_ops.cast(
array_ops.stack([counter, -counter]), dtypes.float32),
dense_shape=[2])
if use_dict:
batched = inp.batch(
{
"c": counter,
"s": sparse_counter,
"S": "string"
},
batch_size=batch_size)
batched_fetch = [batched["c"], batched["s"], batched["S"]]
else:
batched = inp.batch(
[counter, sparse_counter, "string"], batch_size=batch_size)
batched_fetch = batched
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = self.evaluate(batched_fetch)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(
results[1].indices,
np.vstack((
np.arange(2 * batch_size) // 2, # 0, 0, 1, 1, ...
[0, 1] * batch_size)).T)
# [x, -x, x+1, -(x+1), ...]
expected = np.arange(2 * i * batch_size, 2 * (i + 1) * batch_size) // 2
expected *= ([1, -1] * batch_size) # mult by [1, -1, 1, -1, ...]
self.assertAllEqual(results[1].values, expected)
self.assertAllEqual(results[1].dense_shape, [batch_size, 2])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched_fetch)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testOneThread(self):
self._testOneThreadHelper(use_dict=False)
@test_util.run_deprecated_v1
def testOneThreadDict(self):
self._testOneThreadHelper(use_dict=True)
@test_util.run_deprecated_v1
def testUint32DataTypes(self):
values = constant_op.constant([0, 1, 2, 3, 4, 5], dtype=dtypes.uint32)
batched = inp.batch([values], batch_size=2)
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
self.evaluate(batched)
coord.request_stop()
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testUint64DataTypes(self):
values = constant_op.constant([0, 1, 2, 3, 4, 5], dtype=dtypes.uint64)
batched = inp.batch([values], batch_size=2)
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
self.evaluate(batched)
coord.request_stop()
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testOneThreadDynamicPad(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
string = array_ops.tile(["string"],
math_ops.cast(array_ops.stack([counter]),
dtypes.int32))
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
batched = inp.batch(
[counter, string], batch_size=batch_size, dynamic_pad=True)
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = self.evaluate(batched)
expected_results = np.arange(i * batch_size, (i + 1) * batch_size)
max_len = expected_results[-1]
self.assertAllEqual(results[0], expected_results)
expected_strings = [[b"string"] * rep + [b""] * (max_len - rep)
for rep in expected_results]
self.assertAllEqual(results[1], expected_strings)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testOneThreadEnqueueMany(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
pre_batched = inp.batch([counter, sparse_counter, "string"], batch_size=2)
batched = inp.batch(pre_batched, enqueue_many=True, batch_size=batch_size)
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = self.evaluate(batched)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].values,
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testManyThreads(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
num_threads=4)
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = self.evaluate(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
all_counts.extend(results[0])
self.assertAllEqual(results[2], [b"string"] * batch_size)
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testOneThreadSmallerBatch(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
extra_elements = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size + extra_elements)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(
array_ops.stack([zero64, zero64 + 1]), [2, 1]),
values=math_ops.cast(
array_ops.stack([counter, -counter]), dtypes.float32),
dense_shape=[2])
batched = inp.batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
allow_smaller_final_batch=True)
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = self.evaluate(batched)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(
results[1].indices,
np.vstack((
np.arange(2 * batch_size) // 2, # 0, 0, 1, 1, ...
[0, 1] * batch_size)).T)
# [x, -x, x+1, -(x+1), ...]
expected = np.arange(2 * i * batch_size, 2 * (i + 1) * batch_size) // 2
expected *= ([1, -1] * batch_size) # mult by [1, -1, 1, -1, ...]
self.assertAllEqual(results[1].values, expected)
self.assertAllEqual(results[1].dense_shape, [batch_size, 2])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the final batch with extra_elements.
results = self.evaluate(batched)
self.assertAllEqual(results[0],
np.arange(num_batches * batch_size,
num_batches * batch_size + extra_elements))
self.assertAllEqual(
results[1].indices,
np.vstack((
np.arange(2 * extra_elements) // 2, # 0, 0, 1, 1, ...
[0, 1] * extra_elements)).T)
self.assertAllEqual(results[1].dense_shape, [extra_elements, 2])
self.assertAllEqual(results[2], [b"string"] * extra_elements)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testManyThreadsSmallerBatch(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
extra_elements = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size + extra_elements)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
num_threads=4,
allow_smaller_final_batch=True)
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = self.evaluate(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
all_counts.extend(results[0])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the final batch with extra_elements.
results = self.evaluate(batched)
tf_logging.info("Last Batch: %s", results[0])
self.assertEqual(len(results[0]), extra_elements)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(extra_elements), np.zeros(extra_elements))).T)
self.assertAllEqual(results[1].dense_shape, [extra_elements, 1])
all_counts.extend(results[0])
self.assertAllEqual(results[2], [b"string"] * extra_elements)
self.assertItemsEqual(all_counts,
range(num_batches * batch_size + extra_elements))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testSharedName(self):
with self.cached_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = inp.batch(
[counter, "string"],
batch_size=batch_size,
shared_name="SHARED_NAME_XYZ",
name="Q")
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
batched[0].op.inputs[0].op.node_def.attr["shared_name"])
@test_util.run_deprecated_v1
def testCannotInferRankError(self):
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.int64)
with self.assertRaisesRegexp(ValueError, "Cannot infer Tensor's rank"):
inp.batch([x], batch_size=2)
@test_util.run_deprecated_v1
def testBatchedSparseTensorInferredShape(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.batch([sparse], batch_size=2)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
@test_util.run_deprecated_v1
def testBatchedSparseTensorInferredShapeEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.batch([sparse], batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
@test_util.run_deprecated_v1
def testBatchedSparseTensorInferredShapeUnknownRank(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.batch([sparse], batch_size=2)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
@test_util.run_deprecated_v1
def testBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.batch([sparse], batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
@test_util.run_deprecated_v1
def testSingleElementDict(self):
x = inp.batch({"c": [12, 12]}, batch_size=8)
self.assertAllEqual((8, 2), x["c"].get_shape().as_list())
def _testKeepInputHelper(self, num_threads, enqueue_many,
keep_input_vector=False):
with self.cached_session() as sess:
batch_size = 5
num_batches = 4
examples = variables.Variable(0)
counter = examples.count_up_to(num_batches * batch_size * 2)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.zeros(
[1, 1], dtype=dtypes.int64),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
to_batch = [counter, sparse_counter, "string"]
if enqueue_many:
to_batch = inp.batch(to_batch, 4 if keep_input_vector else 1)
keep_input = array_ops.squeeze(
math_ops.equal(0, math_ops.mod(to_batch[0], 2)))
batched = inp.maybe_batch(
to_batch,
keep_input,
batch_size,
num_threads=num_threads,
enqueue_many=enqueue_many)
variables.initialize_all_variables().run()
variables.initialize_local_variables().run()
threads = queue_runner_impl.start_queue_runners()
for _ in range(num_batches):
results = self.evaluate(batched)
self.assertAllEqual([0] * batch_size, np.mod(results[0], 2))
self.assertAllEqual([0] * batch_size, np.mod(results[1].values, 2))
self.assertAllEqual([b"string"] * batch_size, results[2])
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_v1_only("b/120545219")
def testSingleThreadKeepInput(self):
self._testKeepInputHelper(1, False)
@test_util.run_v1_only("b/120545219")
def testSingleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(1, True)
@test_util.run_v1_only("b/120545219")
def testMultipleThreadKeepInput(self):
self._testKeepInputHelper(5, False)
@test_util.run_v1_only("b/120545219")
def testMultipleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(5, True)
@test_util.run_deprecated_v1
def testMaybeEnqueuePerExample(self):
self._testKeepInputHelper(1, True, keep_input_vector=True)
@test_util.run_deprecated_v1
def testMultipleThreadMaybeEnqueuePerExample(self):
self._testKeepInputHelper(5, True, keep_input_vector=True)
@test_util.run_deprecated_v1
def testInvalidKeepInputVector(self):
# Can't have vector `keep_input` with `enqueue_many=False`.
with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
inp.maybe_batch([array_ops.zeros(5)],
keep_input=constant_op.constant([True, False]),
batch_size=1,
enqueue_many=False)
# Can't have `keep_input` with more than one dimension.
with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
inp.maybe_batch([array_ops.zeros(5)],
keep_input=constant_op.constant([[True], [False]]),
batch_size=1,
enqueue_many=True)
# `keep_input` must have dimensions determined at graph construction.
with self.assertRaisesRegexp(ValueError,
"must be known at graph construction"):
inp.maybe_batch([array_ops.zeros(5)],
keep_input=array_ops.placeholder(dtypes.bool),
batch_size=1,
enqueue_many=True)
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShape(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch([sparse], keep_input=True, batch_size=2)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch(
[sparse], keep_input=True, batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeEnqueueManyPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0], [0]], values=[1.0, 2.0], dense_shape=[2])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch(
[sparse], keep_input=[True, False], batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeUnknownRank(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch([sparse], keep_input=True, batch_size=2)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch(
[sparse], keep_input=True, batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeUnknownRankPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch(
[sparse], keep_input=[True, False], batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
@test_util.run_deprecated_v1
def testMaybeBatchCorrectValues(self):
sparse_t = sparse_tensor.SparseTensor(
indices=[[0, 1], [0, 2], [1, 0], [1, 3]],
dense_shape=[2, 4],
values=[5, 4, 7, 2])
keep = constant_op.constant([True, False])
batched = inp.maybe_batch(
[sparse_t], keep_input=keep, batch_size=1, enqueue_many=True)
with self.cached_session():
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
batched_np = self.evaluate(batched)
coord.request_stop()
for thread in threads:
thread.join()
self.assertAllEqual([[0, 1], [0, 2]], batched_np.indices)
self.assertAllEqual([5, 4], batched_np.values)
self.assertAllEqual([1, 4], batched_np.dense_shape)
class BatchJoinTest(test_lib.TestCase):
def _testTwoThreadsHelper(self, use_dict):
with self.cached_session() as sess:
# Two threads, the first generates (0..69, "a").
num_a = 70
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
# The second generates (99, "b") 90 times and then stops.
num_b = 90
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
sparse_ninety_nine = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(ninety_nine, dtypes.float32)]),
dense_shape=[1])
# These get joined together and grouped into batches of 5.
batch_size = 5
if use_dict:
batched = inp.batch_join(
[{
"c": counter,
"s": sparse_counter,
"S": "a"
}, {
"c": ninety_nine,
"s": sparse_ninety_nine,
"S": "b"
}],
batch_size=batch_size)
batched_fetch = [batched["c"], batched["s"], batched["S"]]
else:
batched = inp.batch_join(
[[counter, sparse_counter, "a"],
[ninety_nine, sparse_ninety_nine, "b"]],
batch_size=batch_size)
batched_fetch = batched
# Shapes.
self.assertEqual(3, len(batched_fetch))
self.assertAllEqual((batch_size,), batched_fetch[0].get_shape().as_list())
self.assertAllEqual((None, 2),
batched_fetch[1].indices.get_shape().as_list())
self.assertAllEqual((None,),
batched_fetch[1].values.get_shape().as_list())
self.assertAllEqual((2,),
batched_fetch[1].dense_shape.get_shape().as_list())
self.assertAllEqual((batch_size,), batched_fetch[2].get_shape().as_list())
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = self.evaluate(batched_fetch)
self.assertEqual(3, len(results))
self.assertEqual(batch_size, len(results[0]))
self.assertEqual(batch_size, len(results[2]))
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# We'd like to see some minimum level of mixing of the results of both
# threads, but we can't rely on fair thread scheduling, so we just log.
# self.assertGreater(saw_both, 1)
tf_logging.info("testTwoThreads%s saw both count: %s",
"Dict" if use_dict else "", saw_both)
# Verify the order of results from "a" were preserved.
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched_fetch)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testTwoThreads(self):
self._testTwoThreadsHelper(use_dict=False)
@test_util.run_deprecated_v1
def testTwoThreadsDict(self):
self._testTwoThreadsHelper(use_dict=True)
@test_util.run_deprecated_v1
def testMismatchedDictKeys(self):
with self.assertRaisesRegexp(ValueError, "must have the same keys"):
inp.batch_join(
[{
"c": 12,
"s": 123,
"S": "a"
}, {
"cool": -12,
"s": 99,
"S": "b"
}],
batch_size=8)
@test_util.run_deprecated_v1
def testTwoThreadsDynamicPad(self):
with self.cached_session() as sess:
# Two threads, the first generates (0..69, ["a"] * 1..70).
num_a = 70
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
# The second generates (99, ["b"] * 99) 90 times and then stops.
num_b = 90
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
# These get joined together and grouped into batches of 5.
batch_size = 5
a = array_ops.tile(
["a"],
math_ops.cast(array_ops.stack([counter + 1]), dtypes.int32))
b = array_ops.tile(
["b"],
math_ops.cast(array_ops.stack([ninety_nine]), dtypes.int32))
batched = inp.batch_join(
[[counter, a], [ninety_nine, b]],
batch_size=batch_size,
dynamic_pad=True)
# Shapes.
self.assertEqual(2, len(batched))
self.assertAllEqual((batch_size,), batched[0].get_shape().as_list())
self.assertAllEqual((batch_size, None), batched[1].get_shape().as_list())
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
count_string_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = self.evaluate(batched)
self.assertEqual(2, len(results))
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[1]), batch_size)
for s in results[1]:
if s[0] == b"b":
self.assertAllEqual(s, [b"b"] * 99)
else:
count_string_a.append(sum(x == b"a" for x in s))
which_a = [i for i, s in enumerate(results[1]) if s[0] == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s[0] == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# We'd like to see some minimum level of mixing of the results of both
# threads, but we can't rely on fair thread scheduling, so we just log.
# self.assertGreater(saw_both, 1)
tf_logging.info("testTwoThreadsDynamicPad saw both count: %s", saw_both)
# Verify the order of results from "a" were preserved.
self.assertAllEqual( # tiled "a" with counter + 1
count_string_a, np.arange(num_a) + 1)
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testTwoThreadsSmallerBatch(self):
with self.cached_session() as sess:
extra_elements = 2
# Two threads, the first generates (0..69, "a").
num_a = 70 + extra_elements
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
# The second generates (99, "b") 90 times and then stops.
num_b = 90 + extra_elements
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
sparse_ninety_nine = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(ninety_nine, dtypes.float32)]),
dense_shape=[1])
# These get joined together and grouped into batches of 5.
batch_size = 5
batched = inp.batch_join(
[[counter, sparse_counter, "a"],
[ninety_nine, sparse_ninety_nine, "b"]],
batch_size=batch_size,
allow_smaller_final_batch=True)
# Shapes.
self.assertEqual(3, len(batched))
self.assertAllEqual((None,), batched[0].get_shape().as_list())
self.assertAllEqual((None, 2), batched[1].indices.get_shape().as_list())
self.assertAllEqual((None,), batched[1].values.get_shape().as_list())
self.assertAllEqual((2,), batched[1].dense_shape.get_shape().as_list())
self.assertAllEqual((None,), batched[2].get_shape().as_list())
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = self.evaluate(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[2]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Reached the final batch with 2 * extra_elements.
results = self.evaluate(batched)
tf_logging.info("Last Batch: %s", results[0])
self.assertEqual(len(results[0]), 2 * extra_elements)
self.assertEqual(len(results[2]), 2 * extra_elements)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].indices,
np.vstack((np.arange(2 * extra_elements),
np.zeros(2 * extra_elements))).T)
self.assertAllEqual(results[1].dense_shape, [2 * extra_elements, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), 2 * extra_elements)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
# We'd like to see some minimum level of mixing of the results of both
# threads, but we can't rely on fair thread scheduling, so we just log.
# self.assertGreater(saw_both, 1)
tf_logging.info("testTwoThreadsSmallerBatch saw both count: %s", saw_both)
# Verify the order of results from "a" were preserved.
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testTwoThreadsDynamicPadSmallerBatch(self):
with self.cached_session() as sess:
extra_elements = 2
# Two threads, the first generates (0..69, ["a"] * 1..70).
num_a = 70 + extra_elements
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
# The second generates (99, ["b"] * 99) 90 times and then stops.
num_b = 90 + extra_elements
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
# These get joined together and grouped into batches of 5.
batch_size = 5
a = array_ops.tile(
["a"],
math_ops.cast(array_ops.stack([counter + 1]), dtypes.int32))
b = array_ops.tile(
["b"],
math_ops.cast(array_ops.stack([ninety_nine]), dtypes.int32))
batched = inp.batch_join(
[[counter, a], [ninety_nine, b]],
batch_size=batch_size,
dynamic_pad=True,
allow_smaller_final_batch=True)
# Shapes.
self.assertEqual(2, len(batched))
self.assertAllEqual((None,), batched[0].get_shape().as_list())
self.assertAllEqual((None, None), batched[1].get_shape().as_list())
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
count_string_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = self.evaluate(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[1]), batch_size)
for s in results[1]:
if s[0] == b"b":
self.assertAllEqual(s, [b"b"] * 99)
else:
count_string_a.append(sum(x == b"a" for x in s))
which_a = [i for i, s in enumerate(results[1]) if s[0] == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s[0] == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Reached the final batch with 2 * extra_elements.
results = self.evaluate(batched)
tf_logging.info("Last Batch: %s", results[0])
self.assertEqual(len(results[0]), 2 * extra_elements)
self.assertEqual(len(results[1]), 2 * extra_elements)
for s in results[1]:
if s[0] == b"b":
self.assertAllEqual(s, [b"b"] * 99)
else:
count_string_a.append(sum(x == b"a" for x in s))
which_a = [i for i, s in enumerate(results[1]) if s[0] == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s[0] == b"b"]
self.assertEqual(len(which_a) + len(which_b), 2 * extra_elements)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
# We'd like to see some minimum level of mixing of the results of both
# threads, but we can't rely on fair thread scheduling, so we just log.
# self.assertGreater(saw_both, 1)
tf_logging.info("testTwoThreadsDynamicPadSmallerBatch saw both count: %s",
saw_both)
# Verify the order of results from "a" were preserved.
self.assertAllEqual( # tiled "a" with counter + 1
count_string_a, np.arange(num_a) + 1)
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testSharedName(self):
with self.cached_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = inp.batch_join(
[[counter, "string"]],
batch_size=batch_size,
shared_name="SHARED_NAME_XYZ",
name="Q")
# Shapes.
self.assertEqual(2, len(batched))
self.assertAllEqual((batch_size,), batched[0].get_shape().as_list())
self.assertAllEqual((batch_size,), batched[1].get_shape().as_list())
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
batched[0].op.inputs[0].op.node_def.attr["shared_name"])
@test_util.run_deprecated_v1
def testCannotInferRankError(self):
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.int64)
with self.assertRaisesRegexp(ValueError, "Cannot infer Tensor's rank"):
inp.batch_join([[x]], batch_size=2)
@test_util.run_deprecated_v1
def testSingleElementDict(self):
x = inp.batch_join([{"c": [12, 12]}], batch_size=8)
self.assertAllEqual((8, 2), x["c"].get_shape().as_list())
def _testKeepInputHelper(self, num_threads, enqueue_many,
keep_input_vector=False):
with self.cached_session() as sess:
batch_size = 5
num_batches = 4
examples = variables.Variable(0)
counter = examples.count_up_to(num_batches * batch_size * 2)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.zeros(
[1, 1], dtype=dtypes.int64),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
to_batch = [counter, sparse_counter, "string"]
if enqueue_many:
to_batch = inp.batch(to_batch, 4 if keep_input_vector else 1)
keep_input = array_ops.squeeze(
math_ops.equal(0, math_ops.mod(to_batch[0], 2)))
batched = inp.maybe_batch_join(
[to_batch] * num_threads,
keep_input,
batch_size,
enqueue_many=enqueue_many)
variables.initialize_all_variables().run()
variables.initialize_local_variables().run()
threads = queue_runner_impl.start_queue_runners()
for _ in range(num_batches):
results = self.evaluate(batched)
self.assertAllEqual(
[0] * batch_size,
np.mod(results[0], 2),)
self.assertAllEqual(
[0] * batch_size,
np.mod(results[1].values, 2),)
self.assertAllEqual([b"string"] * batch_size, results[2])
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_v1_only("b/120545219")
def testSingleThreadKeepInput(self):
self._testKeepInputHelper(1, False)
@test_util.run_v1_only("b/120545219")
def testSingleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(1, True)
@test_util.run_v1_only("b/120545219")
def testMultipleThreadKeepInput(self):
self._testKeepInputHelper(5, False)
@test_util.run_v1_only("b/120545219")
def testMultipleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(5, True)
@test_util.run_deprecated_v1
def testSingleThreadKeepInputPerExample(self):
self._testKeepInputHelper(1, True, keep_input_vector=True)
@test_util.run_deprecated_v1
def testMultipleThreadKeepInputPerExample(self):
self._testKeepInputHelper(5, True, keep_input_vector=True)
@test_util.run_deprecated_v1
def testInvalidKeepInputVector(self):
# Can't have vector `keep_input` with `enqueue_many=False`.
with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
inp.maybe_batch_join([[array_ops.zeros(5)]],
keep_input=constant_op.constant([True, False]),
batch_size=1,
enqueue_many=False)
# Can't have `keep_input` with more than one dimension.
with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
inp.maybe_batch_join([[array_ops.zeros(5)]],
keep_input=constant_op.constant([[True], [False]]),
batch_size=1,
enqueue_many=True)
# `keep_input` must have dimensions determined at graph construction.
with self.assertRaisesRegexp(ValueError,
"must be known at graph construction"):
inp.maybe_batch_join([[array_ops.zeros(5)]],
keep_input=array_ops.placeholder(dtypes.bool),
batch_size=1,
enqueue_many=True)
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShape(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch_join([[sparse]], keep_input=True, batch_size=2)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch_join(
[[sparse]], keep_input=True, batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeEnqueueManyPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0], [0]], values=[1.0, 2.0], dense_shape=[2])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch_join(
[[sparse]], keep_input=[True, False], batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeUnknownRank(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch_join([[sparse]], keep_input=True, batch_size=2)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch_join(
[[sparse]], keep_input=True, batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeUnknownRankPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch_join(
[[sparse]], keep_input=[True, False], batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
@test_util.run_deprecated_v1
def testMaybeBatchCorrectValues(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0, 1], [0, 2], [1, 0], [1, 3]],
dense_shape=[2, 4],
values=[5, 4, 7, 2])
keep = constant_op.constant([True, False])
batched = inp.maybe_batch_join(
[[sparse]], keep_input=keep, batch_size=1, enqueue_many=True)
with self.cached_session():
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
batched_np = self.evaluate(batched)
coord.request_stop()
for thread in threads:
thread.join()
self.assertAllEqual([[0, 1], [0, 2]], batched_np.indices)
self.assertAllEqual([5, 4], batched_np.values)
self.assertAllEqual([1, 4], batched_np.dense_shape)
class ShuffleBatchTest(test_lib.TestCase):
def _testOneThreadHelper(self, use_dict):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
if use_dict:
batched = inp.shuffle_batch(
{
"c": counter,
"s": sparse_counter,
"S": "string"
},
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=141421)
batched_fetch = [batched["c"], batched["s"], batched["S"]]
else:
batched = inp.shuffle_batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=141421)
batched_fetch = batched
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = self.evaluate(batched_fetch)
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Results scrambled, but include all the expected numbers.
deltas = [
all_counts[i + 1] - all_counts[i] for i in range(len(all_counts) - 1)
]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched_fetch)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testOneThread(self):
self._testOneThreadHelper(use_dict=False)
@test_util.run_deprecated_v1
def testOneThreadDict(self):
self._testOneThreadHelper(use_dict=True)
@test_util.run_deprecated_v1
def testOneThreadSmallerBatch(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
extra_elements = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
total_elements = num_batches * batch_size + extra_elements
counter = examples.count_up_to(total_elements)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.shuffle_batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=141421,
allow_smaller_final_batch=True)
batched_fetch = batched
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for _ in range(num_batches):
results = self.evaluate(batched_fetch)
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the final batch with extra elements.
results = self.evaluate(batched)
self.assertAllEqual(results[1].dense_shape, [extra_elements, 1])
self.assertAllEqual(results[2], [b"string"] * extra_elements)
all_counts.extend(results[0])
# Results scrambled, but include all the expected numbers.
deltas = [
all_counts[i + 1] - all_counts[i] for i in range(len(all_counts) - 1)
]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(total_elements))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched_fetch)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testManyThreads(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.shuffle_batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=173205,
num_threads=4)
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = self.evaluate(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Results scrambled, but include all the expected numbers.
deltas = [
all_counts[i + 1] - all_counts[i] for i in range(len(all_counts) - 1)
]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testManyThreadsSmallerBatch(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
extra_elements = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
total_elements = num_batches * batch_size + extra_elements
counter = examples.count_up_to(total_elements)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.shuffle_batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=173205,
num_threads=4,
allow_smaller_final_batch=True)
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = self.evaluate(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the final batch with extra elements.
results = self.evaluate(batched)
self.assertAllEqual(results[0].shape, [extra_elements])
self.assertAllEqual(results[1].dense_shape, [extra_elements, 1])
self.assertAllEqual(results[2], [b"string"] * extra_elements)
all_counts.extend(results[0])
# Results scrambled, but include all the expected numbers.
deltas = [
all_counts[i + 1] - all_counts[i] for i in range(len(all_counts) - 1)
]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(total_elements))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testSharedName(self):
with self.cached_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = inp.shuffle_batch(
[counter, "string"],
batch_size=batch_size,
capacity=32,
min_after_dequeue=10,
shared_name="SHARED_NAME_XYZ",
name="Q")
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
batched[0].op.inputs[0].op.node_def.attr["shared_name"])
def _testKeepInputHelper(self, num_threads, enqueue_many,
keep_input_vector=False):
with self.cached_session() as sess:
batch_size = 5
num_batches = 4
examples = variables.Variable(0)
counter = examples.count_up_to(num_batches * batch_size * 2)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.zeros(
[1, 1], dtype=dtypes.int64),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
to_batch = [counter, sparse_counter, "string"]
if enqueue_many:
to_batch = inp.batch(to_batch, 4 if keep_input_vector else 1)
keep_input = array_ops.squeeze(
math_ops.equal(0, math_ops.mod(to_batch[0], 2)))
batched = inp.maybe_shuffle_batch(
to_batch,
batch_size,
10,
1,
keep_input,
num_threads=num_threads,
enqueue_many=enqueue_many)
variables.initialize_all_variables().run()
variables.initialize_local_variables().run()
threads = queue_runner_impl.start_queue_runners()
for _ in range(num_batches):
results = self.evaluate(batched)
self.assertAllEqual([0] * batch_size, np.mod(results[0], 2))
self.assertAllEqual([0] * batch_size, np.mod(results[1].values, 2))
self.assertAllEqual([b"string"] * batch_size, results[2])
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_v1_only("b/120545219")
def testSingleThreadKeepInput(self):
self._testKeepInputHelper(1, False)
@test_util.run_v1_only("b/120545219")
def testSingleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(1, True)
@test_util.run_v1_only("b/120545219")
def testMultipleThreadKeepInput(self):
self._testKeepInputHelper(5, False)
@test_util.run_v1_only("b/120545219")
def testMultipleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(5, True)
@test_util.run_deprecated_v1
def testSingleThreadKeepInputPerExample(self):
self._testKeepInputHelper(1, True, keep_input_vector=True)
@test_util.run_deprecated_v1
def testMultipleThreadKeepInputPerExample(self):
self._testKeepInputHelper(5, True, keep_input_vector=True)
@test_util.run_deprecated_v1
def testInvalidKeepInputVector(self):
# Can't have vector `keep_input` with `enqueue_many=False`.
with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
inp.maybe_shuffle_batch([array_ops.zeros(5)], 1, 10, 1,
keep_input=constant_op.constant([True, False]),
enqueue_many=False)
# Can't have `keep_input` with more than one dimension.
with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
inp.maybe_shuffle_batch([array_ops.zeros(5)], 1, 10, 1,
keep_input=constant_op.constant([[True]]),
enqueue_many=True)
# `keep_input` must have dimensions determined at graph construction.
with self.assertRaisesRegexp(ValueError,
"must be known at graph construction"):
inp.maybe_shuffle_batch([array_ops.zeros(5)], 1, 10, 1,
keep_input=array_ops.placeholder(dtypes.bool),
enqueue_many=True)
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShape(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch([sparse], 2, 10, 1, True)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch(
[sparse], 2, 10, 1, True, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeEnqueueManyPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0], [0]], values=[1.0, 2.0], dense_shape=[2])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch(
[sparse], 2, 10, 1, [True, False], enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeUnknownRank(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch([sparse], 2, 10, 1, True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch(
[sparse], 2, 10, 1, True, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeUnknownRankPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch(
[sparse], 2, 10, 1, [True, False], enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
class ShuffleBatchJoinTest(test_lib.TestCase):
def _testTwoThreadsHelper(self, use_dict):
with self.cached_session() as sess:
# Two threads, the first generates (0..24, "a").
num_a = 25
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
# The second generates (99, "b") 35 times and then stops.
num_b = 35
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
sparse_ninety_nine = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(ninety_nine, dtypes.float32)]),
dense_shape=[1])
# These get joined together and grouped into batches of 5.
batch_size = 5
if use_dict:
batched = inp.shuffle_batch_join(
[{
"c": counter,
"s": sparse_counter,
"S": "a"
}, {
"c": ninety_nine,
"s": sparse_ninety_nine,
"S": "b"
}],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=223607)
batched_fetch = [batched["c"], batched["s"], batched["S"]]
else:
batched = inp.shuffle_batch_join(
[[counter, sparse_counter, "a"],
[ninety_nine, sparse_ninety_nine, "b"]],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=223607)
batched_fetch = batched
# Shapes.
self.assertEqual(3, len(batched_fetch))
self.assertAllEqual((batch_size,), batched_fetch[0].get_shape().as_list())
self.assertAllEqual((None, 2),
batched_fetch[1].indices.get_shape().as_list())
self.assertAllEqual((None,),
batched_fetch[1].values.get_shape().as_list())
self.assertAllEqual((2,),
batched_fetch[1].dense_shape.get_shape().as_list())
self.assertAllEqual((batch_size,), batched_fetch[2].get_shape().as_list())
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = self.evaluate(batched_fetch)
self.assertEqual(3, len(results))
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[2]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Some minimum level of mixing of the results of both threads.
self.assertGreater(saw_both, 1)
# Saw all the items from "a", but scrambled.
self.assertItemsEqual(all_a, range(num_a))
deltas = [all_a[i + 1] - all_a[i] for i in range(len(all_a) - 1)]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched_fetch)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testTwoThreads(self):
self._testTwoThreadsHelper(use_dict=False)
@test_util.run_deprecated_v1
def testTwoThreadsDict(self):
self._testTwoThreadsHelper(use_dict=True)
@test_util.run_deprecated_v1
def testTwoThreadsSmallerBatch(self):
with self.cached_session() as sess:
# Two threads, the first generates (0..26, "a").
extra_elements = 2
num_a = 25 + extra_elements
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
# The second generates (99, "b") 37 times and then stops.
num_b = 35 + extra_elements
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
sparse_ninety_nine = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(ninety_nine, dtypes.float32)]),
dense_shape=[1])
# These get joined together and grouped into batches of 5.
batch_size = 5
batched = inp.shuffle_batch_join(
[[counter, sparse_counter, "a"],
[ninety_nine, sparse_ninety_nine, "b"]],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=223607,
allow_smaller_final_batch=True)
# Shapes.
self.assertEqual(3, len(batched))
self.assertAllEqual((None,), batched[0].get_shape().as_list())
self.assertAllEqual((None, 2), batched[1].indices.get_shape().as_list())
self.assertAllEqual((None,), batched[1].values.get_shape().as_list())
self.assertAllEqual((2,), batched[1].dense_shape.get_shape().as_list())
self.assertAllEqual((None,), batched[2].get_shape().as_list())
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = self.evaluate(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[2]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Reached end with 2 * extra_elements left
results = self.evaluate(batched)
self.assertEqual(len(results[0]), 2 * extra_elements)
self.assertAllEqual(results[1].dense_shape, [2 * extra_elements, 1])
self.assertEqual(len(results[2]), 2 * extra_elements)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].indices,
np.vstack((np.arange(2 * extra_elements),
np.zeros(2 * extra_elements))).T)
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), 2 * extra_elements)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
# Some minimum level of mixing of the results of both threads.
self.assertGreater(saw_both, 1)
# Saw all the items from "a", but scrambled, including extras.
self.assertItemsEqual(all_a, range(num_a))
deltas = [all_a[i + 1] - all_a[i] for i in range(len(all_a) - 1)]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_deprecated_v1
def testMismatchedDictKeys(self):
with self.assertRaisesRegexp(ValueError, "must have the same keys"):
inp.shuffle_batch_join(
[{
"c": 12,
"s": 123,
"S": "a"
}, {
"cool": -12,
"s": 99,
"S": "b"
}],
batch_size=8,
capacity=32,
min_after_dequeue=16,
seed=223607)
@test_util.run_deprecated_v1
def testSharedName(self):
with self.cached_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = inp.shuffle_batch_join(
[[counter, "string"]],
batch_size=batch_size,
capacity=32,
min_after_dequeue=10,
shared_name="SHARED_NAME_XYZ",
name="Q")
# Shapes.
self.assertEqual(2, len(batched))
self.assertAllEqual((batch_size,), batched[0].get_shape().as_list())
self.assertAllEqual((batch_size,), batched[1].get_shape().as_list())
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
batched[0].op.inputs[0].op.node_def.attr["shared_name"])
def _testKeepInputHelper(self, num_threads, enqueue_many,
keep_input_vector=False):
with self.cached_session() as sess:
batch_size = 5
num_batches = 4
examples = variables.Variable(0)
counter = examples.count_up_to(num_batches * batch_size * 2)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.zeros(
[1, 1], dtype=dtypes.int64),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
to_batch = [counter, sparse_counter, "string"]
if enqueue_many:
to_batch = inp.batch(to_batch, 4 if keep_input_vector else 1)
keep_input = array_ops.squeeze(
math_ops.equal(0, math_ops.mod(to_batch[0], 2)))
batched = inp.maybe_shuffle_batch_join(
[to_batch] * num_threads,
batch_size,
10,
1,
keep_input,
enqueue_many=enqueue_many)
variables.initialize_all_variables().run()
variables.initialize_local_variables().run()
threads = queue_runner_impl.start_queue_runners()
for _ in range(num_batches):
results = self.evaluate(batched)
self.assertAllEqual([0] * batch_size, np.mod(results[0], 2))
self.assertAllEqual([0] * batch_size, np.mod(results[1].values, 2))
self.assertAllEqual([b"string"] * batch_size, results[2])
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_v1_only("b/120545219")
def testSingleThreadKeepInput(self):
self._testKeepInputHelper(1, False)
@test_util.run_v1_only("b/120545219")
def testSingleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(1, True)
@test_util.run_v1_only("b/120545219")
def testMultipleThreadKeepInput(self):
self._testKeepInputHelper(5, False)
@test_util.run_v1_only("b/120545219")
def testMultipleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(5, True)
@test_util.run_deprecated_v1
def testSingleThreadKeepInputPerExample(self):
self._testKeepInputHelper(1, True, keep_input_vector=True)
@test_util.run_deprecated_v1
def testMultipleThreadKeepInputPerExample(self):
self._testKeepInputHelper(5, True, keep_input_vector=True)
@test_util.run_deprecated_v1
def testInvalidKeepInputVector(self):
# Can't have vector `keep_input` with `enqueue_many=False`.
with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
inp.maybe_shuffle_batch_join(
[[array_ops.zeros(5)]], 1, 10, 1,
keep_input=constant_op.constant([True, False]),
enqueue_many=False)
# Can't have `keep_input` with more than one dimension.
with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
inp.maybe_shuffle_batch_join(
[[array_ops.zeros(5)]], 1, 10, 1,
keep_input=constant_op.constant([[True]]),
enqueue_many=True)
# `keep_input` must have dimensions determined at graph construction.
with self.assertRaisesRegexp(ValueError,
"must be known at graph construction"):
inp.maybe_shuffle_batch_join(
[[array_ops.zeros(5)]], 1, 10, 1,
keep_input=array_ops.placeholder(dtypes.bool),
enqueue_many=True)
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShape(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch_join([[sparse]], 2, 10, 1, True)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch_join(
[[sparse]], 2, 10, 1, True, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeEnqueueManyPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0], [0]], values=[1.0, 2.0], dense_shape=[2])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch_join(
[[sparse]], 2, 10, 1, [True, False], enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeUnknownRank(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch_join([[sparse]], 2, 10, 1, True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch_join(
[[sparse]], 2, 10, 1, True, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
@test_util.run_deprecated_v1
def testMaybeBatchedSparseTensorInferredShapeUnknownRankPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch_join(
[[sparse]], 2, 10, 1, [True, False], enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
if __name__ == "__main__":
test_lib.main()
|
tensorflow-master
|
tensorflow/python/training/input_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create threads to run multiple enqueue ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.training.queue_runner_impl import *
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/python/training/queue_runner.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for basic_loops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.training import basic_loops
from tensorflow.python.training import supervisor
def _test_dir(test_name):
test_dir = os.path.join(test.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
return test_dir
class BasicTrainLoopTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasicTrainLoop(self):
logdir = _test_dir("basic_train_loop")
sv = supervisor.Supervisor(logdir=logdir)
# Counts the number of calls.
num_calls = [0]
def train_fn(unused_sess, sv, y, a):
num_calls[0] += 1
self.assertEqual("y", y)
self.assertEqual("A", a)
if num_calls[0] == 3:
sv.request_stop()
with ops.Graph().as_default():
basic_loops.basic_train_loop(
sv, train_fn, args=(sv, "y"), kwargs={"a": "A"})
self.assertEqual(3, num_calls[0])
@test_util.run_deprecated_v1
def testBasicTrainLoopExceptionAborts(self):
logdir = _test_dir("basic_train_loop_exception_aborts")
sv = supervisor.Supervisor(logdir=logdir)
def train_fn(unused_sess):
train_fn.counter += 1
if train_fn.counter == 3:
raise RuntimeError("Failed")
# Function attribute use to count the number of calls.
train_fn.counter = 0
with ops.Graph().as_default():
with self.assertRaisesRegexp(RuntimeError, "Failed"):
basic_loops.basic_train_loop(sv, train_fn)
@test_util.run_deprecated_v1
def testBasicTrainLoopRetryOnAborted(self):
logdir = _test_dir("basic_train_loop_exception_aborts")
sv = supervisor.Supervisor(logdir=logdir)
class AbortAndRetry(object):
def __init__(self):
self.num_calls = 0
self.retries_left = 2
def train_fn(self, unused_sess):
self.num_calls += 1
if self.num_calls % 3 == 2:
self.retries_left -= 1
if self.retries_left > 0:
raise errors_impl.AbortedError(None, None, "Aborted here")
else:
raise RuntimeError("Failed Again")
with ops.Graph().as_default():
aar = AbortAndRetry()
with self.assertRaisesRegexp(RuntimeError, "Failed Again"):
basic_loops.basic_train_loop(sv, aar.train_fn)
self.assertEquals(0, aar.retries_left)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/basic_loops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and creates session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.util.tf_export import tf_export
def _maybe_name(obj):
"""Returns object name if it has one, or a message otherwise.
This is useful for names that apper in error messages.
Args:
obj: Object to get the name of.
Returns:
name, "None", or a "no name" message.
"""
if obj is None:
return "None"
elif hasattr(obj, "name"):
return obj.name
else:
return "<no name for %s>" % type(obj)
@tf_export(v1=["train.SessionManager"])
class SessionManager(object):
"""Training helper that restores from checkpoint and creates session.
This class is a small wrapper that takes care of session creation and
checkpoint recovery. It also provides functions that to facilitate
coordination among multiple training threads or processes.
* Checkpointing trained variables as the training progresses.
* Initializing variables on startup, restoring them from the most recent
checkpoint after a crash, or wait for checkpoints to become available.
### Usage:
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a SessionManager that will checkpoint the model in '/tmp/mydir'.
sm = SessionManager()
sess = sm.prepare_session(master, init_op, saver, checkpoint_dir)
# Use the session to train the graph.
while True:
sess.run(<my_train_op>)
```
`prepare_session()` initializes or restores a model. It requires `init_op`
and `saver` as an argument.
A second process could wait for the model to be ready by doing the following:
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a SessionManager that will wait for the model to become ready.
sm = SessionManager()
sess = sm.wait_for_session(master)
# Use the session to train the graph.
while True:
sess.run(<my_train_op>)
```
`wait_for_session()` waits for a model to be initialized by other processes.
"""
def __init__(self,
local_init_op=None,
ready_op=None,
ready_for_local_init_op=None,
graph=None,
recovery_wait_secs=30,
local_init_run_options=None):
"""Creates a SessionManager.
The `local_init_op` is an `Operation` that is run always after a new session
was created. If `None`, this step is skipped.
The `ready_op` is an `Operation` used to check if the model is ready. The
model is considered ready if that operation returns an empty 1D string
tensor. If the operation returns a non empty 1D string tensor, the elements
are concatenated and used to indicate to the user why the model is not
ready.
The `ready_for_local_init_op` is an `Operation` used to check if the model
is ready to run local_init_op. The model is considered ready if that
operation returns an empty 1D string tensor. If the operation returns a non
empty 1D string tensor, the elements are concatenated and used to indicate
to the user why the model is not ready.
If `ready_op` is `None`, the model is not checked for readiness.
`recovery_wait_secs` is the number of seconds between checks that
the model is ready. It is used by processes to wait for a model to
be initialized or restored. Defaults to 30 seconds.
Args:
local_init_op: An `Operation` run immediately after session creation.
Usually used to initialize tables and local variables.
ready_op: An `Operation` to check if the model is initialized.
ready_for_local_init_op: An `Operation` to check if the model is ready
to run local_init_op.
graph: The `Graph` that the model will use.
recovery_wait_secs: Seconds between checks for the model to be ready.
local_init_run_options: RunOptions to be passed to session.run when
executing the local_init_op.
Raises:
ValueError: If ready_for_local_init_op is not None but local_init_op is
None
"""
# Sets default values of arguments.
if graph is None:
graph = ops.get_default_graph()
self._local_init_op = local_init_op
self._ready_op = ready_op
self._ready_for_local_init_op = ready_for_local_init_op
self._graph = graph
self._recovery_wait_secs = recovery_wait_secs
self._target = None
self._local_init_run_options = local_init_run_options
if ready_for_local_init_op is not None and local_init_op is None:
raise ValueError("If you pass a ready_for_local_init_op "
"you must also pass a local_init_op "
", ready_for_local_init_op [%s]" %
ready_for_local_init_op)
def _restore_checkpoint(self,
master,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None):
"""Creates a `Session`, and tries to restore a checkpoint.
Args:
master: `String` representation of the TensorFlow master to use.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
Returns:
A pair (sess, is_restored) where 'is_restored' is `True` if
the session could be restored, `False` otherwise.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
self._target = master
# This is required to so that we initialize the TPU device before
# restoring from checkpoint since we'll be placing variables on the device
# and TPUInitialize wipes out the memory of the device.
strategy = distribution_strategy_context.get_strategy()
if strategy and hasattr(strategy.extended,
"_experimental_initialize_system"):
strategy.extended._experimental_initialize_system() # pylint: disable=protected-access
sess = session.Session(self._target, graph=self._graph, config=config)
if checkpoint_dir and checkpoint_filename_with_path:
raise ValueError("Can not provide both checkpoint_dir and "
"checkpoint_filename_with_path.")
# If either saver or checkpoint_* is not specified, cannot restore. Just
# return.
if not saver or not (checkpoint_dir or checkpoint_filename_with_path):
return sess, False
if checkpoint_filename_with_path:
saver.restore(sess, checkpoint_filename_with_path)
return sess, True
# Waits up until max_wait_secs for checkpoint to become available.
wait_time = 0
ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir)
while not ckpt or not ckpt.model_checkpoint_path:
if wait_for_checkpoint and wait_time < max_wait_secs:
logging.info("Waiting for checkpoint to be available.")
time.sleep(self._recovery_wait_secs)
wait_time += self._recovery_wait_secs
ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir)
else:
return sess, False
# Loads the checkpoint.
saver.restore(sess, ckpt.model_checkpoint_path)
saver.recover_last_checkpoints(ckpt.all_model_checkpoint_paths)
return sess, True
def prepare_session(self,
master,
init_op=None,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None,
init_feed_dict=None,
init_fn=None):
"""Creates a `Session`. Makes sure the model is ready to be used.
Creates a `Session` on 'master'. If a `saver` object is passed in, and
`checkpoint_dir` points to a directory containing valid checkpoint
files, then it will try to recover the model from checkpoint. If
no checkpoint files are available, and `wait_for_checkpoint` is
`True`, then the process would check every `recovery_wait_secs`,
up to `max_wait_secs`, for recovery to succeed.
If the model cannot be recovered successfully then it is initialized by
running the `init_op` and calling `init_fn` if they are provided.
The `local_init_op` is also run after init_op and init_fn, regardless of
whether the model was recovered successfully, but only if
`ready_for_local_init_op` passes.
If the model is recovered from a checkpoint it is assumed that all
global variables have been initialized, in particular neither `init_op`
nor `init_fn` will be executed.
It is an error if the model cannot be recovered and no `init_op`
or `init_fn` or `local_init_op` are passed.
Args:
master: `String` representation of the TensorFlow master to use.
init_op: Optional `Operation` used to initialize the model.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
init_feed_dict: Optional dictionary that maps `Tensor` objects to feed
values. This feed dictionary is passed to the session `run()` call when
running the init op.
init_fn: Optional callable used to initialize the model. Called after the
optional `init_op` is called. The callable must accept one argument,
the session being initialized.
Returns:
A `Session` object that can be used to drive the model.
Raises:
RuntimeError: If the model cannot be initialized or recovered.
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
sess, is_loaded_from_checkpoint = self._restore_checkpoint(
master,
saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path,
wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs,
config=config)
if not is_loaded_from_checkpoint:
if init_op is None and not init_fn and self._local_init_op is None:
raise RuntimeError("Model is not initialized and no init_op or "
"init_fn or local_init_op was given")
if init_op is not None:
sess.run(init_op, feed_dict=init_feed_dict)
if init_fn:
init_fn(sess)
local_init_success, msg = self._try_run_local_init_op(sess)
if not local_init_success:
raise RuntimeError(
"Init operations did not make model ready for local_init. "
"Init op: %s, init fn: %s, error: %s" % (_maybe_name(init_op),
init_fn,
msg))
is_ready, msg = self._model_ready(sess)
if not is_ready:
raise RuntimeError(
"Init operations did not make model ready. "
"Init op: %s, init fn: %s, local_init_op: %s, error: %s" %
(_maybe_name(init_op), init_fn, self._local_init_op, msg))
return sess
def recover_session(self,
master,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None):
"""Creates a `Session`, recovering if possible.
Creates a new session on 'master'. If the session is not initialized
and can be recovered from a checkpoint, recover it.
Args:
master: `String` representation of the TensorFlow master to use.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
Returns:
A pair (sess, initialized) where 'initialized' is `True` if
the session could be recovered and initialized, `False` otherwise.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
sess, is_loaded_from_checkpoint = self._restore_checkpoint(
master,
saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path,
wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs,
config=config)
# Always try to run local_init_op
local_init_success, msg = self._try_run_local_init_op(sess)
if not is_loaded_from_checkpoint:
# Do not need to run checks for readiness
return sess, False
restoring_file = checkpoint_dir or checkpoint_filename_with_path
if not local_init_success:
logging.info(
"Restoring model from %s did not make model ready for local init:"
" %s", restoring_file, msg)
return sess, False
is_ready, msg = self._model_ready(sess)
if not is_ready:
logging.info("Restoring model from %s did not make model ready: %s",
restoring_file, msg)
return sess, False
logging.info("Restored model from %s", restoring_file)
return sess, is_loaded_from_checkpoint
def wait_for_session(self, master, config=None, max_wait_secs=float("Inf")):
"""Creates a new `Session` and waits for model to be ready.
Creates a new `Session` on 'master'. Waits for the model to be
initialized or recovered from a checkpoint. It's expected that
another thread or process will make the model ready, and that this
is intended to be used by threads/processes that participate in a
distributed training configuration where a different thread/process
is responsible for initializing or recovering the model being trained.
NB: The amount of time this method waits for the session is bounded
by max_wait_secs. By default, this function will wait indefinitely.
Args:
master: `String` representation of the TensorFlow master to use.
config: Optional ConfigProto proto used to configure the session.
max_wait_secs: Maximum time to wait for the session to become available.
Returns:
A `Session`. May be None if the operation exceeds the timeout
specified by config.operation_timeout_in_ms.
Raises:
tf.DeadlineExceededError: if the session is not available after
max_wait_secs.
"""
self._target = master
if max_wait_secs is None:
max_wait_secs = float("Inf")
timer = _CountDownTimer(max_wait_secs)
while True:
sess = session.Session(self._target, graph=self._graph, config=config)
not_ready_msg = None
not_ready_local_msg = None
local_init_success, not_ready_local_msg = self._try_run_local_init_op(
sess)
if local_init_success:
# Successful if local_init_op is None, or ready_for_local_init_op passes
is_ready, not_ready_msg = self._model_ready(sess)
if is_ready:
return sess
self._safe_close(sess)
# Do we have enough time left to try again?
remaining_ms_after_wait = (
timer.secs_remaining() - self._recovery_wait_secs)
if remaining_ms_after_wait < 0:
raise errors.DeadlineExceededError(
None, None,
"Session was not ready after waiting %d secs." % (max_wait_secs,))
logging.info("Waiting for model to be ready. "
"Ready_for_local_init_op: %s, ready: %s",
not_ready_local_msg, not_ready_msg)
time.sleep(self._recovery_wait_secs)
def _safe_close(self, sess):
"""Closes a session without raising an exception.
Just like sess.close() but ignores exceptions.
Args:
sess: A `Session`.
"""
# pylint: disable=broad-except
try:
sess.close()
except Exception:
# Intentionally not logging to avoid user complaints that
# they get cryptic errors. We really do not care that Close
# fails.
pass
# pylint: enable=broad-except
def _model_ready(self, sess):
"""Checks if the model is ready or not.
Args:
sess: A `Session`.
Returns:
A tuple (is_ready, msg), where is_ready is True if ready and False
otherwise, and msg is `None` if the model is ready, a `String` with the
reason why it is not ready otherwise.
"""
return _ready(self._ready_op, sess, "Model not ready")
def _model_ready_for_local_init(self, sess):
"""Checks if the model is ready to run local_init_op.
Args:
sess: A `Session`.
Returns:
A tuple (is_ready, msg), where is_ready is True if ready to run
local_init_op and False otherwise, and msg is `None` if the model is
ready to run local_init_op, a `String` with the reason why it is not ready
otherwise.
"""
return _ready(self._ready_for_local_init_op, sess,
"Model not ready for local init")
def _try_run_local_init_op(self, sess):
"""Tries to run _local_init_op, if not None, and is ready for local init.
Args:
sess: A `Session`.
Returns:
A tuple (is_successful, msg), where is_successful is True if
_local_init_op is None, or we ran _local_init_op, and False otherwise;
and msg is a `String` with the reason why the model was not ready to run
local init.
"""
if self._local_init_op is not None:
is_ready_for_local_init, msg = self._model_ready_for_local_init(sess)
if is_ready_for_local_init:
logging.info("Running local_init_op.")
sess.run(self._local_init_op, options=self._local_init_run_options)
logging.info("Done running local_init_op.")
return True, None
else:
return False, msg
return True, None
def _ready(op, sess, msg):
"""Checks if the model is ready or not, as determined by op.
Args:
op: An op, either _ready_op or _ready_for_local_init_op, which defines the
readiness of the model.
sess: A `Session`.
msg: A message to log to warning if not ready
Returns:
A tuple (is_ready, msg), where is_ready is True if ready and False
otherwise, and msg is `None` if the model is ready, a `String` with the
reason why it is not ready otherwise.
"""
if op is None:
return True, None
else:
try:
ready_value = sess.run(op)
# The model is considered ready if ready_op returns an empty 1-D tensor.
# Also compare to `None` and dtype being int32 for backward
# compatibility.
if (ready_value is None or ready_value.dtype == np.int32 or
ready_value.size == 0):
return True, None
else:
# TODO(sherrym): If a custom ready_op returns other types of tensor,
# or strings other than variable names, this message could be
# confusing.
non_initialized_varnames = ", ".join(
[i.decode("utf-8") for i in ready_value])
return False, "Variables not initialized: " + non_initialized_varnames
except errors.FailedPreconditionError as e:
if "uninitialized" not in str(e):
logging.warning("%s : error [%s]", msg, str(e))
raise e
return False, str(e)
class _CountDownTimer(object):
def __init__(self, duration_secs):
self._start_time_secs = time.time()
self._duration_secs = duration_secs
def secs_remaining(self):
diff = self._duration_secs - (time.time() - self._start_time_secs)
return max(0, diff)
|
tensorflow-master
|
tensorflow/python/training/session_manager.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(test.TestCase):
def doTestSparse(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.RefVariable(var0_np)
var1 = variables.RefVariable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparse(self):
self.doTestSparse(use_resource=False)
@test_util.run_deprecated_v1
def testResourceSparse(self):
self.doTestSparse(use_resource=True)
@test_util.run_deprecated_v1
def testSparseDevicePlacement(self):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = adam.AdamOptimizer(3.0)
minimize_op = optimizer.minimize(gathered_sum)
variables.global_variables_initializer().run()
minimize_op.run()
@test_util.run_deprecated_v1
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adam.AdamOptimizer().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adam.AdamOptimizer().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
def doTestBasic(self, use_resource=False, use_callable_params=False):
if context.executing_eagerly() and not use_resource:
self.skipTest(
"Skipping test with use_resource=False and executing eagerly.")
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
else:
var0 = variables.RefVariable(var0_np)
var1 = variables.RefVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = adam.AdamOptimizer(learning_rate=learning_rate)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
opt_variables = opt.variables()
beta1_power, beta2_power = opt._get_beta_accumulators()
self.assertTrue(beta1_power is not None)
self.assertTrue(beta2_power is not None)
self.assertIn(beta1_power, opt_variables)
self.assertIn(beta2_power, opt_variables)
# Ensure that non-slot variables are the same type as the requested
# variables.
self.assertEqual(
use_resource,
resource_variable_ops.is_resource_variable(beta1_power))
self.assertEqual(
use_resource,
resource_variable_ops.is_resource_variable(beta2_power))
if not context.executing_eagerly():
with ops.Graph().as_default():
# Shouldn't return non-slot variables from other graphs.
self.assertEqual(0, len(opt.variables()))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
if not context.executing_eagerly():
self.evaluate(update)
elif t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta2_power))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if use_resource:
self.assertEqual("var0_%d/Adam:0" % (i,),
opt.get_slot(var=var0, name="m").name)
def testBasic(self):
with self.cached_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_resource=True, use_callable_params=True)
@test_util.run_deprecated_v1
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testTwoSessions(self):
optimizer = adam.AdamOptimizer()
with context.eager_mode():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
g = ops.Graph()
with g.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
gg = ops.Graph()
with gg.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
# If the optimizer saves any state not keyed by graph the following line
# fails.
optimizer.apply_gradients([(grads0, var0)])
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = adam.AdamOptimizer(1.)
opt.minimize(lambda: v1 + v2)
# There should be two non-slot variables, and two unique slot variables
# for v1 and v2 respectively.
self.assertEqual(6, len(set(opt.variables())))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/adam_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Coordinator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import threading
import time
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
def StopOnEvent(coord, wait_for_stop, set_when_stopped):
wait_for_stop.wait()
coord.request_stop()
set_when_stopped.set()
def RaiseOnEvent(coord, wait_for_stop, set_when_stopped, ex, report_exception):
try:
wait_for_stop.wait()
raise ex
except RuntimeError as e:
if report_exception:
coord.request_stop(e)
else:
coord.request_stop(sys.exc_info())
finally:
if set_when_stopped:
set_when_stopped.set()
def RaiseOnEventUsingContextHandler(coord, wait_for_stop, set_when_stopped, ex):
with coord.stop_on_exception():
wait_for_stop.wait()
raise ex
if set_when_stopped:
set_when_stopped.set()
def SleepABit(n_secs, coord=None):
if coord:
coord.register_thread(threading.current_thread())
time.sleep(n_secs)
def WaitForThreadsToRegister(coord, num_threads):
while True:
with coord._lock:
if len(coord._registered_threads) == num_threads:
break
time.sleep(0.001)
class CoordinatorTest(test.TestCase):
def testStopAPI(self):
coord = coordinator.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
coord.request_stop()
self.assertTrue(coord.should_stop())
self.assertTrue(coord.wait_for_stop(0.01))
def testStopAsync(self):
coord = coordinator.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.1))
wait_for_stop_ev = threading.Event()
has_stopped_ev = threading.Event()
t = threading.Thread(
target=StopOnEvent, args=(coord, wait_for_stop_ev, has_stopped_ev))
t.start()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
wait_for_stop_ev.set()
has_stopped_ev.wait()
self.assertTrue(coord.wait_for_stop(0.05))
self.assertTrue(coord.should_stop())
def testJoin(self):
coord = coordinator.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01,)),
threading.Thread(target=SleepABit, args=(0.02,)),
threading.Thread(target=SleepABit, args=(0.01,))
]
for t in threads:
t.start()
coord.join(threads)
for t in threads:
self.assertFalse(t.is_alive())
def testJoinAllRegistered(self):
coord = coordinator.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01, coord)),
threading.Thread(target=SleepABit, args=(0.02, coord)),
threading.Thread(target=SleepABit, args=(0.01, coord))
]
for t in threads:
t.start()
WaitForThreadsToRegister(coord, 3)
coord.join()
for t in threads:
self.assertFalse(t.is_alive())
def testJoinSomeRegistered(self):
coord = coordinator.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01, coord)),
threading.Thread(target=SleepABit, args=(0.02,)),
threading.Thread(target=SleepABit, args=(0.01, coord))
]
for t in threads:
t.start()
WaitForThreadsToRegister(coord, 2)
# threads[1] is not registered we must pass it in.
coord.join(threads[1:1])
for t in threads:
self.assertFalse(t.is_alive())
def testJoinGraceExpires(self):
def TestWithGracePeriod(stop_grace_period):
coord = coordinator.Coordinator()
wait_for_stop_ev = threading.Event()
has_stopped_ev = threading.Event()
threads = [
threading.Thread(
target=StopOnEvent,
args=(coord, wait_for_stop_ev, has_stopped_ev)),
threading.Thread(target=SleepABit, args=(10.0,))
]
for t in threads:
t.daemon = True
t.start()
wait_for_stop_ev.set()
has_stopped_ev.wait()
with self.assertRaisesRegexp(RuntimeError, "threads still running"):
coord.join(threads, stop_grace_period_secs=stop_grace_period)
TestWithGracePeriod(1e-10)
TestWithGracePeriod(0.002)
TestWithGracePeriod(1.0)
def testJoinWithoutGraceExpires(self):
coord = coordinator.Coordinator()
wait_for_stop_ev = threading.Event()
has_stopped_ev = threading.Event()
threads = [
threading.Thread(
target=StopOnEvent, args=(coord, wait_for_stop_ev, has_stopped_ev)),
threading.Thread(target=SleepABit, args=(10.0,))
]
for t in threads:
t.daemon = True
t.start()
wait_for_stop_ev.set()
has_stopped_ev.wait()
coord.join(threads, stop_grace_period_secs=1., ignore_live_threads=True)
def testJoinRaiseReportExcInfo(self):
coord = coordinator.Coordinator()
ev_1 = threading.Event()
ev_2 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, ev_2, RuntimeError("First"), False)),
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_2, None, RuntimeError("Too late"), False))
]
for t in threads:
t.start()
ev_1.set()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinRaiseReportException(self):
coord = coordinator.Coordinator()
ev_1 = threading.Event()
ev_2 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, ev_2, RuntimeError("First"), True)),
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_2, None, RuntimeError("Too late"), True))
]
for t in threads:
t.start()
ev_1.set()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinIgnoresOutOfRange(self):
coord = coordinator.Coordinator()
ev_1 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, None,
errors_impl.OutOfRangeError(None, None, "First"), True))
]
for t in threads:
t.start()
ev_1.set()
coord.join(threads)
def testJoinIgnoresMyExceptionType(self):
coord = coordinator.Coordinator(clean_stop_exception_types=(ValueError,))
ev_1 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, None, ValueError("Clean stop"), True))
]
for t in threads:
t.start()
ev_1.set()
coord.join(threads)
def testJoinRaiseReportExceptionUsingHandler(self):
coord = coordinator.Coordinator()
ev_1 = threading.Event()
ev_2 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEventUsingContextHandler,
args=(coord, ev_1, ev_2, RuntimeError("First"))),
threading.Thread(
target=RaiseOnEventUsingContextHandler,
args=(coord, ev_2, None, RuntimeError("Too late")))
]
for t in threads:
t.start()
ev_1.set()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testClearStopClearsExceptionToo(self):
coord = coordinator.Coordinator()
ev_1 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, None, RuntimeError("First"), True)),
]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
ev_1.set()
coord.join(threads)
coord.clear_stop()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, None, RuntimeError("Second"), True)),
]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "Second"):
ev_1.set()
coord.join(threads)
def testRequestStopRaisesIfJoined(self):
coord = coordinator.Coordinator()
# Join the coordinator right away.
coord.join([])
reported = False
with self.assertRaisesRegexp(RuntimeError, "Too late"):
try:
raise RuntimeError("Too late")
except RuntimeError as e:
reported = True
coord.request_stop(e)
self.assertTrue(reported)
# If we clear_stop the exceptions are handled normally.
coord.clear_stop()
try:
raise RuntimeError("After clear")
except RuntimeError as e:
coord.request_stop(e)
with self.assertRaisesRegexp(RuntimeError, "After clear"):
coord.join([])
def testRequestStopRaisesIfJoined_ExcInfo(self):
# Same as testRequestStopRaisesIfJoined but using syc.exc_info().
coord = coordinator.Coordinator()
# Join the coordinator right away.
coord.join([])
reported = False
with self.assertRaisesRegexp(RuntimeError, "Too late"):
try:
raise RuntimeError("Too late")
except RuntimeError:
reported = True
coord.request_stop(sys.exc_info())
self.assertTrue(reported)
# If we clear_stop the exceptions are handled normally.
coord.clear_stop()
try:
raise RuntimeError("After clear")
except RuntimeError:
coord.request_stop(sys.exc_info())
with self.assertRaisesRegexp(RuntimeError, "After clear"):
coord.join([])
def _StopAt0(coord, n):
if n[0] == 0:
coord.request_stop()
else:
n[0] -= 1
class LooperTest(test.TestCase):
def testTargetArgs(self):
n = [3]
coord = coordinator.Coordinator()
thread = coordinator.LooperThread.loop(
coord, 0, target=_StopAt0, args=(coord, n))
coord.join([thread])
self.assertEqual(0, n[0])
def testTargetKwargs(self):
n = [3]
coord = coordinator.Coordinator()
thread = coordinator.LooperThread.loop(
coord, 0, target=_StopAt0, kwargs={
"coord": coord,
"n": n
})
coord.join([thread])
self.assertEqual(0, n[0])
def testTargetMixedArgs(self):
n = [3]
coord = coordinator.Coordinator()
thread = coordinator.LooperThread.loop(
coord, 0, target=_StopAt0, args=(coord,), kwargs={
"n": n
})
coord.join([thread])
self.assertEqual(0, n[0])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/coordinator_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ProximalGradientDescent for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
# pylint: disable=unused-import
from tensorflow.python.ops import math_ops
# pylint: enable=unused-import
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.ProximalGradientDescentOptimizer"])
class ProximalGradientDescentOptimizer(optimizer.Optimizer):
# pylint: disable=line-too-long
"""Optimizer that implements the proximal gradient descent algorithm.
See this [paper](http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting.pdf).
"""
def __init__(self, learning_rate, l1_regularization_strength=0.0,
l2_regularization_strength=0.0, use_locking=False,
name="ProximalGradientDescent"):
"""Construct a new proximal gradient descent optimizer.
Args:
learning_rate: A Tensor or a floating point value. The learning
rate to use.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "GradientDescent".
"""
super(ProximalGradientDescentOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
self._l1_regularization_strength_tensor = None
self._l2_regularization_strength_tensor = None
def _apply_dense(self, grad, var):
return training_ops.apply_proximal_gradient_descent(
var,
self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
return training_ops.resource_apply_proximal_gradient_descent(
var.handle,
self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
return training_ops.sparse_apply_proximal_gradient_descent(
var,
self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad.values,
grad.indices,
use_locking=self._use_locking).op
def _resource_apply_sparse(self, grad, var, indices):
return training_ops.resource_sparse_apply_proximal_gradient_descent(
var.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
grad,
indices,
use_locking=self._use_locking)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
self._l1_regularization_strength_tensor = ops.convert_to_tensor(
self._l1_regularization_strength, name="l1_regularization_strength")
self._l2_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_regularization_strength, name="l2_regularization_strength")
|
tensorflow-master
|
tensorflow/python/training/proximal_gradient_descent.py
|
tensorflow-master
|
tensorflow/python/training/__init__.py
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some common SessionRunHook classes.
Note that the symbols that are exported to v1 tf.train namespace are also
exported to v2 in tf.estimator namespace. See
https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/hooks/basic_session_run_hooks.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import six
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.client import timeline
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
from tensorflow.python.util.tf_export import tf_export
_HOOKS = "hooks"
_STEPS_PER_RUN_VAR = "steps_per_run"
class _HookTimer(object):
"""Base timer for determining when Hooks should trigger.
Should not be instantiated directly.
"""
def __init__(self):
pass
def reset(self):
"""Resets the timer."""
pass
def should_trigger_for_step(self, step):
"""Return true if the timer should trigger for the specified step."""
raise NotImplementedError
def update_last_triggered_step(self, step):
"""Update the last triggered time and step number.
Args:
step: The current step.
Returns:
A pair `(elapsed_time, elapsed_steps)`, where `elapsed_time` is the number
of seconds between the current trigger and the last one (a float), and
`elapsed_steps` is the number of steps between the current trigger and
the last one. Both values will be set to `None` on the first trigger.
"""
raise NotImplementedError
def last_triggered_step(self):
"""Returns the last triggered time step or None if never triggered."""
raise NotImplementedError
@tf_export(v1=["train.SecondOrStepTimer"])
class SecondOrStepTimer(_HookTimer):
"""Timer that triggers at most once every N seconds or once every N steps.
This symbol is also exported to v2 in tf.estimator namespace. See
https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/hooks/basic_session_run_hooks.py
"""
def __init__(self, every_secs=None, every_steps=None):
self.reset()
self._every_secs = every_secs
self._every_steps = every_steps
if self._every_secs is None and self._every_steps is None:
raise ValueError("Either every_secs or every_steps should be provided.")
if (self._every_secs is not None) and (self._every_steps is not None):
raise ValueError("Can not provide both every_secs and every_steps.")
super(SecondOrStepTimer, self).__init__()
def reset(self):
self._last_triggered_step = None
self._last_triggered_time = None
def should_trigger_for_step(self, step):
"""Return true if the timer should trigger for the specified step.
Args:
step: Training step to trigger on.
Returns:
True if the difference between the current time and the time of the last
trigger exceeds `every_secs`, or if the difference between the current
step and the last triggered step exceeds `every_steps`. False otherwise.
"""
if self._last_triggered_step is None:
return True
if self._last_triggered_step == step:
return False
if self._every_secs is not None:
if time.time() >= self._last_triggered_time + self._every_secs:
return True
if self._every_steps is not None:
if step >= self._last_triggered_step + self._every_steps:
return True
return False
def update_last_triggered_step(self, step):
current_time = time.time()
if self._last_triggered_time is None:
elapsed_secs = None
elapsed_steps = None
else:
elapsed_secs = current_time - self._last_triggered_time
elapsed_steps = step - self._last_triggered_step
self._last_triggered_time = current_time
self._last_triggered_step = step
return (elapsed_secs, elapsed_steps)
def last_triggered_step(self):
return self._last_triggered_step
class NeverTriggerTimer(_HookTimer):
"""Timer that never triggers."""
def should_trigger_for_step(self, step):
_ = step
return False
def update_last_triggered_step(self, step):
_ = step
return (None, None)
def last_triggered_step(self):
return None
@tf_export(v1=["train.LoggingTensorHook"])
class LoggingTensorHook(session_run_hook.SessionRunHook):
"""Prints the given tensors every N local steps, every N seconds, or at end.
The tensors will be printed to the log, with `INFO` severity. If you are not
seeing the logs, you might want to add the following line after your imports:
```python
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
```
Note that if `at_end` is True, `tensors` should not include any tensor
whose evaluation produces a side effect such as consuming additional inputs.
"""
def __init__(self,
tensors,
every_n_iter=None,
every_n_secs=None,
at_end=False,
formatter=None):
"""Initializes a `LoggingTensorHook`.
Args:
tensors: `dict` that maps string-valued tags to tensors/tensor names, or
`iterable` of tensors/tensor names.
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
every_n_secs: `int` or `float`, print the values of `tensors` once every N
seconds. Exactly one of `every_n_iter` and `every_n_secs` should be
provided.
at_end: `bool` specifying whether to print the values of `tensors` at the
end of the run.
formatter: function, takes dict of `tag`->`Tensor` and returns a string.
If `None` uses default printing all tensors.
Raises:
ValueError: if `every_n_iter` is non-positive.
"""
only_log_at_end = (
at_end and (every_n_iter is None) and (every_n_secs is None))
if (not only_log_at_end and
(every_n_iter is None) == (every_n_secs is None)):
raise ValueError(
"either at_end and/or exactly one of every_n_iter and every_n_secs "
"must be provided.")
if every_n_iter is not None and every_n_iter <= 0:
raise ValueError("invalid every_n_iter=%s." % every_n_iter)
if not isinstance(tensors, dict):
self._tag_order = tensors
tensors = {item: item for item in tensors}
else:
self._tag_order = sorted(tensors.keys())
self._tensors = tensors
self._formatter = formatter
self._timer = (
NeverTriggerTimer() if only_log_at_end else SecondOrStepTimer(
every_secs=every_n_secs, every_steps=every_n_iter))
self._log_at_end = at_end
def begin(self):
self._timer.reset()
self._iter_count = 0
# Convert names to tensors if given
self._current_tensors = {
tag: _as_graph_element(tensor)
for (tag, tensor) in self._tensors.items()
}
def before_run(self, run_context): # pylint: disable=unused-argument
self._should_trigger = self._timer.should_trigger_for_step(self._iter_count)
if self._should_trigger:
return SessionRunArgs(self._current_tensors)
else:
return None
def _log_tensors(self, tensor_values):
original = np.get_printoptions()
np.set_printoptions(suppress=True)
elapsed_secs, _ = self._timer.update_last_triggered_step(self._iter_count)
if self._formatter:
logging.info(self._formatter(tensor_values))
else:
stats = []
for tag in self._tag_order:
stats.append("%s = %s" % (tag, tensor_values[tag]))
if elapsed_secs is not None:
logging.info("%s (%.3f sec)", ", ".join(stats), elapsed_secs)
else:
logging.info("%s", ", ".join(stats))
np.set_printoptions(**original)
def after_run(self, run_context, run_values):
_ = run_context
if self._should_trigger:
self._log_tensors(run_values.results)
self._iter_count += 1
def end(self, session):
if self._log_at_end:
values = session.run(self._current_tensors)
self._log_tensors(values)
def get_or_create_steps_per_run_variable():
"""Gets or creates the steps_per_run variable.
In Estimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each
device program execution and before the next execution.
The purpose of using a variable, rather than a constant, is to allow
Estimator adapt the device training iterations according to the final steps
specified by users. For example, if the user sets the steps_per_run as
4 and steps as 10 in Estimator.train(), the steps_per_run
variable will have the following value before each training run.
- 1-st execution: steps_per_run = 4
- 2-nd execution: steps_per_run = 4
- 3-rd execution: steps_per_run = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi steps_per_run variables were found.
"""
graph = ops.get_default_graph()
collection_name = "{}_{}".format(_HOOKS, _STEPS_PER_RUN_VAR)
steps_per_run_vars = graph.get_collection(collection_name)
if len(steps_per_run_vars) == 1:
return steps_per_run_vars[0]
elif len(steps_per_run_vars) > 1:
raise RuntimeError("Multiple steps_per_run_var in collection.")
with variable_scope.variable_scope(_HOOKS, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_STEPS_PER_RUN_VAR,
initializer=init_ops.ones_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
class _MultiStepStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps=None, last_step=None, steps_per_run=1):
"""Initializes a `MultiStepStopAtStepHook`.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
In Estimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The steps_per_run variable
determines the number of iterations of the loop before returning to the CPU.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
steps_per_run: Number of steps executed per run call.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
if steps_per_run is None or steps_per_run < 1:
raise ValueError("steps_per_run should be greater than 0")
self._num_steps = num_steps
self._last_step = last_step
self._steps_per_run_initial_value = steps_per_run
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
self._steps_per_run_variable = get_or_create_steps_per_run_variable()
def _update_steps_per_run_variable(self, global_step, session):
steps = min(self._last_step - global_step,
self._steps_per_run_initial_value)
self._steps_per_run_variable.load(steps, session=session)
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
self._update_steps_per_run_variable(global_step, session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition in hook execution.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
self._update_steps_per_run_variable(global_step, run_context.session)
@tf_export(v1=["train.StopAtStepHook"])
class StopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
def begin(self):
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
def after_create_session(self, session, coord):
if self._last_step is None:
global_step = session.run(self._global_step_tensor)
self._last_step = global_step + self._num_steps
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results + 1
if global_step >= self._last_step:
# Check latest global step to ensure that the targeted last step is
# reached. global_step read tensor is the value of global step
# before running the operation. We're not sure whether current session.run
# incremented the global_step or not. Here we're checking it.
step = run_context.session.run(self._global_step_tensor)
if step >= self._last_step:
run_context.request_stop()
@tf_export(v1=["train.CheckpointSaverListener"])
class CheckpointSaverListener(object):
"""Interface for listeners that take action before or after checkpoint save.
`CheckpointSaverListener` triggers only in steps when `CheckpointSaverHook` is
triggered, and provides callbacks at the following points:
- before using the session
- before each call to `Saver.save()`
- after each call to `Saver.save()`
- at the end of session
To use a listener, implement a class and pass the listener to a
`CheckpointSaverHook`, as in this example:
```python
class ExampleCheckpointSaverListener(CheckpointSaverListener):
def begin(self):
# You can add ops to the graph here.
print('Starting the session.')
self.your_tensor = ...
def before_save(self, session, global_step_value):
print('About to write a checkpoint')
def after_save(self, session, global_step_value):
print('Done writing checkpoint.')
if decided_to_stop_training():
return True
def end(self, session, global_step_value):
print('Done with the session.')
...
listener = ExampleCheckpointSaverListener()
saver_hook = tf.estimator.CheckpointSaverHook(
checkpoint_dir, listeners=[listener])
with
tf.compat.v1.train.MonitoredTrainingSession(chief_only_hooks=[saver_hook]):
...
```
A `CheckpointSaverListener` may simply take some action after every
checkpoint save. It is also possible for the listener to use its own schedule
to act less frequently, e.g. based on global_step_value. In this case,
implementors should implement the `end()` method to handle actions related to
the last checkpoint save. But the listener should not act twice if
`after_save()` already handled this last checkpoint save.
A `CheckpointSaverListener` can request training to be stopped, by returning
True in `after_save`. Please note that, in replicated distributed training
setting, only `chief` should use this behavior. Otherwise each worker will do
their own evaluation, which may be wasteful of resources.
"""
def begin(self):
pass
def before_save(self, session, global_step_value):
pass
def after_save(self, session, global_step_value):
pass
def end(self, session, global_step_value):
pass
@tf_export(v1=["train.CheckpointSaverHook"])
class CheckpointSaverHook(session_run_hook.SessionRunHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances. Used for
callbacks that run immediately before or after this hook saves the
checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create CheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
self._listeners = listeners or []
self._steps_per_run = 1
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir, "graph.pbtxt")
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._timer.update_last_triggered_step(global_step)
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(stale_global_step +
self._steps_per_run):
# get the real value after train op.
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
if self._save(run_context.session, global_step):
run_context.request_stop()
def end(self, session):
last_step = session.run(self._global_step_tensor)
if last_step != self._timer.last_triggered_step():
self._save(session, last_step)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step):
"""Saves the latest checkpoint, returns should_stop."""
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
for l in self._listeners:
l.before_save(session, step)
self._get_saver().save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
should_stop = False
for l in self._listeners:
if l.after_save(session, step):
logging.info(
"A CheckpointSaverListener requested that training be stopped. "
"listener: {}".format(l))
should_stop = True
return should_stop
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor."
.format(collection_key))
self._saver = savers[0]
return savers[0]
@tf_export(v1=["train.StepCounterHook"])
class StepCounterHook(session_run_hook.SessionRunHook):
"""Hook that counts steps per second."""
def __init__(self,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
if (every_n_steps is None) == (every_n_secs is None):
raise ValueError(
"exactly one of every_n_steps and every_n_secs should be provided.")
self._timer = SecondOrStepTimer(
every_steps=every_n_steps, every_secs=every_n_secs)
self._summary_writer = summary_writer
self._output_dir = output_dir
self._last_global_step = None
self._steps_per_run = 1
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
self._summary_tag = training_util.get_global_step().op.name + "/sec"
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
steps_per_sec = elapsed_steps / elapsed_time
if self._summary_writer is not None:
summary = Summary(value=[
Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec)
])
self._summary_writer.add_summary(summary, global_step)
logging.info("%s: %g", self._summary_tag, steps_per_sec)
def after_run(self, run_context, run_values):
_ = run_context
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(stale_global_step +
self._steps_per_run):
# get the real value after train op.
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
global_step)
if elapsed_time is not None:
self._log_and_record(elapsed_steps, elapsed_time, global_step)
# Check whether the global step has been increased. Here, we do not use the
# timer.last_triggered_step as the timer might record a different global
# step value such that the comparison could be unreliable. For simplicity,
# we just compare the stale_global_step with previously recorded version.
if stale_global_step == self._last_global_step:
# Here, we give a warning in the first 5 times if we have observed that
# the global step has not been increased. For some Optimizers, the global
# step is not increased each time by design. For example,
# SyncReplicaOptimizer doesn't increase the global step in worker's main
# train step.
logging.log_first_n(
logging.WARN,
"It seems that global step (tf.train.get_global_step) has not "
"been increased. Current value (could be stable): %s vs previous "
"value: %s. You could increase the global step by passing "
"tf.train.get_global_step() to Optimizer.apply_gradients or "
"Optimizer.minimize.", 5, stale_global_step, self._last_global_step)
self._last_global_step = stale_global_step
@tf_export(v1=["train.NanLossDuringTrainingError"])
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
@tf_export(v1=["train.NanTensorHook"])
class NanTensorHook(session_run_hook.SessionRunHook):
"""Monitors the loss tensor and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, fail_on_nan_loss=True):
"""Initializes a `NanTensorHook`.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
if np.isnan(run_values.results):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we request stop without an exception.
run_context.request_stop()
@tf_export(v1=["train.SummarySaverHook"])
class SummarySaverHook(session_run_hook.SessionRunHook):
"""Saves summaries every N steps."""
def __init__(self,
save_steps=None,
save_secs=None,
output_dir=None,
summary_writer=None,
scaffold=None,
summary_op=None):
"""Initializes a `SummarySaverHook`.
Args:
save_steps: `int`, save summaries every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int`, save summaries every N seconds.
output_dir: `string`, the directory to save the summaries to. Only used if
no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string` containing the serialized `Summary`
protocol buffer or a list of `Tensor`. They are most likely an output by
TF summary methods like `tf.compat.v1.summary.scalar` or
`tf.compat.v1.summary.merge_all`. It can be passed in as one tensor; if
more than one, they must be passed in as a list.
Raises:
ValueError: Exactly one of scaffold or summary_op should be set.
"""
if ((scaffold is None and summary_op is None) or
(scaffold is not None and summary_op is not None)):
raise ValueError(
"Exactly one of scaffold or summary_op must be provided.")
self._summary_op = summary_op
self._summary_writer = summary_writer
self._output_dir = output_dir
self._scaffold = scaffold
self._timer = SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._next_step = None
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use SummarySaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
self._request_summary = (
self._next_step is None or
self._timer.should_trigger_for_step(self._next_step))
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._get_summary_op() is not None:
requests["summary"] = self._get_summary_op()
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
stale_global_step = run_values.results["global_step"]
global_step = stale_global_step + 1
if self._next_step is None or self._request_summary:
global_step = run_context.session.run(self._global_step_tensor)
if self._next_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._timer.update_last_triggered_step(global_step)
if "summary" in run_values.results:
for summary in run_values.results["summary"]:
self._summary_writer.add_summary(summary, global_step)
self._next_step = global_step + 1
def end(self, session=None):
if self._summary_writer:
self._summary_writer.flush()
def _get_summary_op(self):
"""Fetches the summary op either from self._summary_op or self._scaffold.
Returns:
Returns a list of summary `Tensor`.
"""
summary_op = None
if self._summary_op is not None:
summary_op = self._summary_op
elif self._scaffold.summary_op is not None:
summary_op = self._scaffold.summary_op
if summary_op is None:
return None
if not isinstance(summary_op, list):
return [summary_op]
return summary_op
@tf_export(v1=["train.GlobalStepWaiterHook"])
class GlobalStepWaiterHook(session_run_hook.SessionRunHook):
"""Delays execution until global step reaches `wait_until_step`.
This hook delays execution until global step reaches to `wait_until_step`. It
is used to gradually start workers in distributed settings. One example usage
would be setting `wait_until_step=int(K*log(task_id+1))` assuming that
task_id=0 is the chief.
"""
def __init__(self, wait_until_step):
"""Initializes a `GlobalStepWaiterHook`.
Args:
wait_until_step: an `int` shows until which global step should we wait.
"""
self._wait_until_step = wait_until_step
def begin(self):
self._worker_is_started = False
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use _GlobalStepWaiterHook.")
def before_run(self, run_context):
if self._worker_is_started:
return None
if self._wait_until_step <= 0:
self._worker_is_started = True
return None
logging.info("Waiting for global step %d before starting training.",
self._wait_until_step)
last_logged_step = 0
while True:
current_step = run_context.session.run(self._global_step_tensor)
if current_step >= self._wait_until_step:
self._worker_is_started = True
return None
if current_step - last_logged_step > 1000:
logging.info(
"Waiting for global step %d before starting training. "
"Current step is %d.", self._wait_until_step, current_step)
last_logged_step = current_step
time.sleep(0.5)
@tf_export(v1=["train.FinalOpsHook"])
class FinalOpsHook(session_run_hook.SessionRunHook):
"""A hook which evaluates `Tensors` at the end of a session."""
def __init__(self, final_ops, final_ops_feed_dict=None):
"""Initializes `FinalOpHook` with ops to run at the end of the session.
Args:
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when running
`final_ops_dict`.
"""
self._final_ops = final_ops
self._final_ops_feed_dict = final_ops_feed_dict
self._final_ops_values = None
@property
def final_ops_values(self):
return self._final_ops_values
def end(self, session):
if self._final_ops is not None:
try:
self._final_ops_values = session.run(
self._final_ops, feed_dict=self._final_ops_feed_dict)
except (errors.OutOfRangeError, StopIteration) as e:
logging.warning(
"An OutOfRangeError or StopIteration exception is raised by the "
"code in FinalOpsHook. This typically means the Ops running by the "
"FinalOpsHook have a dependency back to some input source, which "
"should not happen. For example, for metrics in "
"tf.estimator.Estimator, all metrics functions return two Ops: "
"`value_op` and `update_op`. Estimator.evaluate calls the "
"`update_op` for each batch of the data in input source and, once "
"it is exhausted, it call the `value_op` to get the metric values. "
"The `value_op` here should have dependency back to variables "
"reading only, rather than reading another batch from input. "
"Otherwise, the `value_op`, executed by `FinalOpsHook`, triggers "
"another data reading, which ends OutOfRangeError/StopIteration. "
"Please fix that.")
raise e
@tf_export(v1=["train.FeedFnHook"])
class FeedFnHook(session_run_hook.SessionRunHook):
"""Runs `feed_fn` and sets the `feed_dict` accordingly."""
def __init__(self, feed_fn):
"""Initializes a `FeedFnHook`.
Args:
feed_fn: function that takes no arguments and returns `dict` of `Tensor`
to feed.
"""
self.feed_fn = feed_fn
def before_run(self, run_context): # pylint: disable=unused-argument
return session_run_hook.SessionRunArgs(
fetches=None, feed_dict=self.feed_fn())
@tf_export(v1=["train.ProfilerHook"])
class ProfilerHook(session_run_hook.SessionRunHook):
"""Captures CPU/GPU profiling information every N steps or seconds.
This produces files called "timeline-<step>.json", which are in Chrome
Trace format.
For more information see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
"""
def __init__(self,
save_steps=None,
save_secs=None,
output_dir="",
show_dataflow=True,
show_memory=False):
"""Initializes a hook that takes periodic profiling snapshots.
`options.run_metadata` argument of `tf.Session.Run` is used to collect
metadata about execution. This hook sets the metadata and dumps it in Chrome
Trace format.
Args:
save_steps: `int`, save profile traces every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int` or `float`, save profile traces every N seconds.
output_dir: `string`, the directory to save the profile traces to.
Defaults to the current directory.
show_dataflow: `bool`, if True, add flow events to the trace connecting
producers and consumers of tensors.
show_memory: `bool`, if True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
"""
self._output_file = os.path.join(output_dir, "timeline-{}.json")
self._file_writer = SummaryWriterCache.get(output_dir)
self._show_dataflow = show_dataflow
self._show_memory = show_memory
self._timer = SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
def begin(self):
self._next_step = None
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use ProfilerHook.")
def before_run(self, run_context):
self._request_summary = (
self._next_step is not None and
self._timer.should_trigger_for_step(self._next_step))
requests = {"global_step": self._global_step_tensor}
opts = (
config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
if self._request_summary else None)
return SessionRunArgs(requests, options=opts)
def after_run(self, run_context, run_values):
stale_global_step = run_values.results["global_step"]
if self._next_step is None:
# Update the timer so that it does not activate until N steps or seconds
# have passed.
self._timer.update_last_triggered_step(stale_global_step)
global_step = stale_global_step + 1
if self._request_summary:
global_step = run_context.session.run(self._global_step_tensor)
self._timer.update_last_triggered_step(global_step)
self._save(global_step, self._output_file.format(global_step),
run_values.run_metadata.step_stats)
self._file_writer.add_run_metadata(run_values.run_metadata,
"step_%d" % global_step)
self._next_step = global_step + 1
def _save(self, step, save_path, step_stats):
logging.info("Saving timeline for %d into '%s'.", step, save_path)
with gfile.Open(save_path, "w") as f:
trace = timeline.Timeline(step_stats)
f.write(
trace.generate_chrome_trace_format(
show_dataflow=self._show_dataflow, show_memory=self._show_memory))
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
|
tensorflow-master
|
tensorflow/python/training/basic_session_run_hooks.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for GradientDescent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class GradientDescentOptimizerTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
optimizer = gradient_descent.GradientDescentOptimizer(3.0)
sgd_op = optimizer.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
self.evaluate(var1))
self.assertEqual(0, len(optimizer.variables()))
@test_util.run_deprecated_v1
def testBasicResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]))
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
self.evaluate(var1))
@test_util.run_deprecated_v1
def testBasicCallableParams(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
lr = lambda: 3.0
sgd_op = gradient_descent.GradientDescentOptimizer(lr).apply_gradients(
zip([grads0, grads1], [var0, var1]))
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
self.evaluate(var1))
@test_util.run_deprecated_v1
def testMinimizeResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(var0, x) + var1
loss = pred * pred
sgd_op = gradient_descent.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - np_grad], self.evaluate(var1))
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
pred += var1
loss = pred * pred
sgd_op = gradient_descent.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - np_grad], self.evaluate(var1))
@test_util.run_deprecated_v1
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
lrate = constant_op.constant(3.0)
sgd_op = gradient_descent.GradientDescentOptimizer(
lrate).apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
self.evaluate(var1))
@test_util.run_deprecated_v1
def testGradWrtRef(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
opt = gradient_descent.GradientDescentOptimizer(3.0)
values = [1.0, 3.0]
vars_ = [variables.Variable([v], dtype=dtype) for v in values]
grads_and_vars = opt.compute_gradients(vars_[0] + vars_[1], vars_)
variables.global_variables_initializer().run()
for grad, _ in grads_and_vars:
self.assertAllCloseAccordingToType([1.0], self.evaluate(grad))
@test_util.run_deprecated_v1
def testWithGlobalStep(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
global_step = variables.Variable(0, trainable=False)
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params and global_step
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
self.evaluate(var1))
self.assertAllCloseAccordingToType(1, self.evaluate(global_step))
@test_util.run_deprecated_v1
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0], [2.0]], self.evaluate(var0))
self.assertAllCloseAccordingToType([[3.0], [4.0]], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[1.0 - 3.0 * 0.1], [2.0]],
self.evaluate(var0))
self.assertAllCloseAccordingToType([[3.0], [4.0 - 3.0 * 0.01]],
self.evaluate(var1))
def testCapturingInDefunWhileExecutingEagerly(self):
with context.eager_mode():
optimizer = gradient_descent.GradientDescentOptimizer(1.0)
def step():
self.v = resource_variable_ops.ResourceVariable(1.0)
with backprop.GradientTape() as tape:
loss = self.v ** 2
grad = tape.gradient(loss, self.v)
optimizer.apply_gradients([(grad, self.v)])
return self.v.read_value()
compiled_step = function.defun(step)
self.assertEqual(float(step()), -1.0)
self.assertEqual(float(compiled_step()), -1.0)
# This shouldn't fail; in particular, the learning rate tensor should
# be an EagerTensor once again, not a graph Tensor.
self.assertEqual(float(step()), -1.0)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/gradient_descent_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ProximalAdagrad for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.ProximalAdagradOptimizer"])
class ProximalAdagradOptimizer(optimizer.Optimizer):
# pylint: disable=line-too-long
"""Optimizer that implements the Proximal Adagrad algorithm.
See this [paper](http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting.pdf).
"""
def __init__(self, learning_rate, initial_accumulator_value=0.1,
l1_regularization_strength=0.0, l2_regularization_strength=0.0,
use_locking=False, name="ProximalAdagrad"):
"""Construct a new ProximalAdagrad optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
initial_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adagrad".
Raises:
ValueError: If the `initial_accumulator_value` is invalid.
"""
if initial_accumulator_value <= 0.0:
raise ValueError("initial_accumulator_value must be positive: %s" %
initial_accumulator_value)
super(ProximalAdagradOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._initial_accumulator_value = initial_accumulator_value
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
# Created in Initialize.
self._l1_regularization_strength_tensor = None
self._l2_regularization_strength_tensor = None
self._learning_rate_tensor = None
def _create_slots(self, var_list):
for v in var_list:
with ops.colocate_with(v):
val = constant_op.constant(self._initial_accumulator_value,
shape=v.get_shape(),
dtype=v.dtype.base_dtype)
self._get_or_make_slot(v, val, "accumulator", self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
self._l1_regularization_strength_tensor = ops.convert_to_tensor(
self._l1_regularization_strength,
name="l1_regularization_strength")
self._l2_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_regularization_strength,
name="l2_regularization_strength")
def _apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.apply_proximal_adagrad(
var, acc, self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad, use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.resource_apply_proximal_adagrad(
var.handle, acc.handle, self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad, use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.sparse_apply_proximal_adagrad(
var, acc, self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad.values, grad.indices,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
acc = self.get_slot(var, "accumulator")
return training_ops.resource_sparse_apply_proximal_adagrad(
var.handle, acc.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
grad, indices,
use_locking=self._use_locking)
|
tensorflow-master
|
tensorflow/python/training/proximal_adagrad.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.